xref: /f-stack/dpdk/drivers/vdpa/ifc/base/ifcvf.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include "ifcvf.h"
6 #include "ifcvf_osdep.h"
7 
8 STATIC void *
get_cap_addr(struct ifcvf_hw * hw,struct ifcvf_pci_cap * cap)9 get_cap_addr(struct ifcvf_hw *hw, struct ifcvf_pci_cap *cap)
10 {
11 	u8 bar = cap->bar;
12 	u32 length = cap->length;
13 	u32 offset = cap->offset;
14 
15 	if (bar > IFCVF_PCI_MAX_RESOURCE - 1) {
16 		DEBUGOUT("invalid bar: %u\n", bar);
17 		return NULL;
18 	}
19 
20 	if (offset + length < offset) {
21 		DEBUGOUT("offset(%u) + length(%u) overflows\n",
22 			offset, length);
23 		return NULL;
24 	}
25 
26 	if (offset + length > hw->mem_resource[cap->bar].len) {
27 		DEBUGOUT("offset(%u) + length(%u) overflows bar length(%u)",
28 			offset, length, (u32)hw->mem_resource[cap->bar].len);
29 		return NULL;
30 	}
31 
32 	return hw->mem_resource[bar].addr + offset;
33 }
34 
35 int
ifcvf_init_hw(struct ifcvf_hw * hw,PCI_DEV * dev)36 ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev)
37 {
38 	int ret;
39 	u8 pos;
40 	struct ifcvf_pci_cap cap;
41 
42 	ret = PCI_READ_CONFIG_BYTE(dev, &pos, PCI_CAPABILITY_LIST);
43 	if (ret < 0) {
44 		DEBUGOUT("failed to read pci capability list\n");
45 		return -1;
46 	}
47 
48 	while (pos) {
49 		ret = PCI_READ_CONFIG_RANGE(dev, (u32 *)&cap,
50 				sizeof(cap), pos);
51 		if (ret < 0) {
52 			DEBUGOUT("failed to read cap at pos: %x", pos);
53 			break;
54 		}
55 
56 		if (cap.cap_vndr != PCI_CAP_ID_VNDR)
57 			goto next;
58 
59 		DEBUGOUT("cfg type: %u, bar: %u, offset: %u, "
60 				"len: %u\n", cap.cfg_type, cap.bar,
61 				cap.offset, cap.length);
62 
63 		switch (cap.cfg_type) {
64 		case IFCVF_PCI_CAP_COMMON_CFG:
65 			hw->common_cfg = get_cap_addr(hw, &cap);
66 			break;
67 		case IFCVF_PCI_CAP_NOTIFY_CFG:
68 			PCI_READ_CONFIG_DWORD(dev, &hw->notify_off_multiplier,
69 					pos + sizeof(cap));
70 			hw->notify_base = get_cap_addr(hw, &cap);
71 			hw->notify_region = cap.bar;
72 			break;
73 		case IFCVF_PCI_CAP_ISR_CFG:
74 			hw->isr = get_cap_addr(hw, &cap);
75 			break;
76 		case IFCVF_PCI_CAP_DEVICE_CFG:
77 			hw->dev_cfg = get_cap_addr(hw, &cap);
78 			break;
79 		}
80 next:
81 		pos = cap.cap_next;
82 	}
83 
84 	hw->lm_cfg = hw->mem_resource[4].addr;
85 
86 	if (hw->common_cfg == NULL || hw->notify_base == NULL ||
87 			hw->isr == NULL || hw->dev_cfg == NULL) {
88 		DEBUGOUT("capability incomplete\n");
89 		return -1;
90 	}
91 
92 	DEBUGOUT("capability mapping:\ncommon cfg: %p\n"
93 			"notify base: %p\nisr cfg: %p\ndevice cfg: %p\n"
94 			"multiplier: %u\n",
95 			hw->common_cfg, hw->dev_cfg,
96 			hw->isr, hw->notify_base,
97 			hw->notify_off_multiplier);
98 
99 	return 0;
100 }
101 
102 STATIC u8
ifcvf_get_status(struct ifcvf_hw * hw)103 ifcvf_get_status(struct ifcvf_hw *hw)
104 {
105 	return IFCVF_READ_REG8(&hw->common_cfg->device_status);
106 }
107 
108 STATIC void
ifcvf_set_status(struct ifcvf_hw * hw,u8 status)109 ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
110 {
111 	IFCVF_WRITE_REG8(status, &hw->common_cfg->device_status);
112 }
113 
114 STATIC void
ifcvf_reset(struct ifcvf_hw * hw)115 ifcvf_reset(struct ifcvf_hw *hw)
116 {
117 	ifcvf_set_status(hw, 0);
118 
119 	/* flush status write */
120 	while (ifcvf_get_status(hw))
121 		msec_delay(1);
122 }
123 
124 STATIC void
ifcvf_add_status(struct ifcvf_hw * hw,u8 status)125 ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
126 {
127 	if (status != 0)
128 		status |= ifcvf_get_status(hw);
129 
130 	ifcvf_set_status(hw, status);
131 	ifcvf_get_status(hw);
132 }
133 
134 u64
ifcvf_get_features(struct ifcvf_hw * hw)135 ifcvf_get_features(struct ifcvf_hw *hw)
136 {
137 	u32 features_lo, features_hi;
138 	struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
139 
140 	IFCVF_WRITE_REG32(0, &cfg->device_feature_select);
141 	features_lo = IFCVF_READ_REG32(&cfg->device_feature);
142 
143 	IFCVF_WRITE_REG32(1, &cfg->device_feature_select);
144 	features_hi = IFCVF_READ_REG32(&cfg->device_feature);
145 
146 	return ((u64)features_hi << 32) | features_lo;
147 }
148 
149 STATIC void
ifcvf_set_features(struct ifcvf_hw * hw,u64 features)150 ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
151 {
152 	struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
153 
154 	IFCVF_WRITE_REG32(0, &cfg->guest_feature_select);
155 	IFCVF_WRITE_REG32(features & ((1ULL << 32) - 1), &cfg->guest_feature);
156 
157 	IFCVF_WRITE_REG32(1, &cfg->guest_feature_select);
158 	IFCVF_WRITE_REG32(features >> 32, &cfg->guest_feature);
159 }
160 
161 STATIC int
ifcvf_config_features(struct ifcvf_hw * hw)162 ifcvf_config_features(struct ifcvf_hw *hw)
163 {
164 	u64 host_features;
165 
166 	host_features = ifcvf_get_features(hw);
167 	hw->req_features &= host_features;
168 
169 	ifcvf_set_features(hw, hw->req_features);
170 	ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_FEATURES_OK);
171 
172 	if (!(ifcvf_get_status(hw) & IFCVF_CONFIG_STATUS_FEATURES_OK)) {
173 		DEBUGOUT("failed to set FEATURES_OK status\n");
174 		return -1;
175 	}
176 
177 	return 0;
178 }
179 
180 STATIC void
io_write64_twopart(u64 val,u32 * lo,u32 * hi)181 io_write64_twopart(u64 val, u32 *lo, u32 *hi)
182 {
183 	IFCVF_WRITE_REG32(val & ((1ULL << 32) - 1), lo);
184 	IFCVF_WRITE_REG32(val >> 32, hi);
185 }
186 
187 STATIC int
ifcvf_hw_enable(struct ifcvf_hw * hw)188 ifcvf_hw_enable(struct ifcvf_hw *hw)
189 {
190 	struct ifcvf_pci_common_cfg *cfg;
191 	u8 *lm_cfg;
192 	u32 i;
193 	u16 notify_off;
194 
195 	cfg = hw->common_cfg;
196 	lm_cfg = hw->lm_cfg;
197 
198 	IFCVF_WRITE_REG16(0, &cfg->msix_config);
199 	if (IFCVF_READ_REG16(&cfg->msix_config) == IFCVF_MSI_NO_VECTOR) {
200 		DEBUGOUT("msix vec alloc failed for device config\n");
201 		return -1;
202 	}
203 
204 	for (i = 0; i < hw->nr_vring; i++) {
205 		IFCVF_WRITE_REG16(i, &cfg->queue_select);
206 		io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
207 				&cfg->queue_desc_hi);
208 		io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
209 				&cfg->queue_avail_hi);
210 		io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
211 				&cfg->queue_used_hi);
212 		IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
213 
214 		*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
215 				(i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4) =
216 			(u32)hw->vring[i].last_avail_idx |
217 			((u32)hw->vring[i].last_used_idx << 16);
218 
219 		IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
220 		if (IFCVF_READ_REG16(&cfg->queue_msix_vector) ==
221 				IFCVF_MSI_NO_VECTOR) {
222 			DEBUGOUT("queue %u, msix vec alloc failed\n",
223 					i);
224 			return -1;
225 		}
226 
227 		notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off);
228 		hw->notify_addr[i] = (void *)((u8 *)hw->notify_base +
229 				notify_off * hw->notify_off_multiplier);
230 		IFCVF_WRITE_REG16(1, &cfg->queue_enable);
231 	}
232 
233 	return 0;
234 }
235 
236 STATIC void
ifcvf_hw_disable(struct ifcvf_hw * hw)237 ifcvf_hw_disable(struct ifcvf_hw *hw)
238 {
239 	u32 i;
240 	struct ifcvf_pci_common_cfg *cfg;
241 	u32 ring_state;
242 
243 	cfg = hw->common_cfg;
244 
245 	IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->msix_config);
246 	for (i = 0; i < hw->nr_vring; i++) {
247 		IFCVF_WRITE_REG16(i, &cfg->queue_select);
248 		IFCVF_WRITE_REG16(0, &cfg->queue_enable);
249 		IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->queue_msix_vector);
250 		ring_state = *(u32 *)(hw->lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
251 				(i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4);
252 		hw->vring[i].last_avail_idx = (u16)(ring_state >> 16);
253 		hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
254 	}
255 }
256 
257 int
ifcvf_start_hw(struct ifcvf_hw * hw)258 ifcvf_start_hw(struct ifcvf_hw *hw)
259 {
260 	ifcvf_reset(hw);
261 	ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_ACK);
262 	ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER);
263 
264 	if (ifcvf_config_features(hw) < 0)
265 		return -1;
266 
267 	if (ifcvf_hw_enable(hw) < 0)
268 		return -1;
269 
270 	ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER_OK);
271 	return 0;
272 }
273 
274 void
ifcvf_stop_hw(struct ifcvf_hw * hw)275 ifcvf_stop_hw(struct ifcvf_hw *hw)
276 {
277 	ifcvf_hw_disable(hw);
278 	ifcvf_reset(hw);
279 }
280 
281 void
ifcvf_enable_logging(struct ifcvf_hw * hw,u64 log_base,u64 log_size)282 ifcvf_enable_logging(struct ifcvf_hw *hw, u64 log_base, u64 log_size)
283 {
284 	u8 *lm_cfg;
285 
286 	lm_cfg = hw->lm_cfg;
287 
288 	*(u32 *)(lm_cfg + IFCVF_LM_BASE_ADDR_LOW) =
289 		log_base & IFCVF_32_BIT_MASK;
290 
291 	*(u32 *)(lm_cfg + IFCVF_LM_BASE_ADDR_HIGH) =
292 		(log_base >> 32) & IFCVF_32_BIT_MASK;
293 
294 	*(u32 *)(lm_cfg + IFCVF_LM_END_ADDR_LOW) =
295 		(log_base + log_size) & IFCVF_32_BIT_MASK;
296 
297 	*(u32 *)(lm_cfg + IFCVF_LM_END_ADDR_HIGH) =
298 		((log_base + log_size) >> 32) & IFCVF_32_BIT_MASK;
299 
300 	*(u32 *)(lm_cfg + IFCVF_LM_LOGGING_CTRL) = IFCVF_LM_ENABLE_VF;
301 }
302 
303 void
ifcvf_disable_logging(struct ifcvf_hw * hw)304 ifcvf_disable_logging(struct ifcvf_hw *hw)
305 {
306 	u8 *lm_cfg;
307 
308 	lm_cfg = hw->lm_cfg;
309 	*(u32 *)(lm_cfg + IFCVF_LM_LOGGING_CTRL) = IFCVF_LM_DISABLE;
310 }
311 
312 void
ifcvf_notify_queue(struct ifcvf_hw * hw,u16 qid)313 ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
314 {
315 	IFCVF_WRITE_REG16(qid, hw->notify_addr[qid]);
316 }
317 
318 u8
ifcvf_get_notify_region(struct ifcvf_hw * hw)319 ifcvf_get_notify_region(struct ifcvf_hw *hw)
320 {
321 	return hw->notify_region;
322 }
323 
324 u64
ifcvf_get_queue_notify_off(struct ifcvf_hw * hw,int qid)325 ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid)
326 {
327 	return (u8 *)hw->notify_addr[qid] -
328 		(u8 *)hw->mem_resource[hw->notify_region].addr;
329 }
330