xref: /f-stack/dpdk/drivers/raw/ioat/idxd_pci.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <rte_bus_pci.h>
6 #include <rte_memzone.h>
7 
8 #include "ioat_private.h"
9 #include "ioat_spec.h"
10 
11 #define IDXD_VENDOR_ID		0x8086
12 #define IDXD_DEVICE_ID_SPR	0x0B25
13 
14 #define IDXD_PMD_RAWDEV_NAME_PCI rawdev_idxd_pci
15 
16 const struct rte_pci_id pci_id_idxd_map[] = {
17 	{ RTE_PCI_DEVICE(IDXD_VENDOR_ID, IDXD_DEVICE_ID_SPR) },
18 	{ .vendor_id = 0, /* sentinel */ },
19 };
20 
21 static inline int
idxd_pci_dev_command(struct idxd_rawdev * idxd,enum rte_idxd_cmds command)22 idxd_pci_dev_command(struct idxd_rawdev *idxd, enum rte_idxd_cmds command)
23 {
24 	uint8_t err_code;
25 	uint16_t qid = idxd->qid;
26 	int i = 0;
27 
28 	if (command >= idxd_disable_wq && command <= idxd_reset_wq)
29 		qid = (1 << qid);
30 	rte_spinlock_lock(&idxd->u.pci->lk);
31 	idxd->u.pci->regs->cmd = (command << IDXD_CMD_SHIFT) | qid;
32 
33 	do {
34 		rte_pause();
35 		err_code = idxd->u.pci->regs->cmdstatus;
36 		if (++i >= 1000) {
37 			IOAT_PMD_ERR("Timeout waiting for command response from HW");
38 			rte_spinlock_unlock(&idxd->u.pci->lk);
39 			return err_code;
40 		}
41 	} while (idxd->u.pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK);
42 	rte_spinlock_unlock(&idxd->u.pci->lk);
43 
44 	return err_code & CMDSTATUS_ERR_MASK;
45 }
46 
47 static uint32_t *
idxd_get_wq_cfg(struct idxd_pci_common * pci,uint8_t wq_idx)48 idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)
49 {
50 	return RTE_PTR_ADD(pci->wq_regs_base,
51 			(uintptr_t)wq_idx << (5 + pci->wq_cfg_sz));
52 }
53 
54 static int
idxd_is_wq_enabled(struct idxd_rawdev * idxd)55 idxd_is_wq_enabled(struct idxd_rawdev *idxd)
56 {
57 	uint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[WQ_STATE_IDX];
58 	return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
59 }
60 
61 static void
idxd_pci_dev_stop(struct rte_rawdev * dev)62 idxd_pci_dev_stop(struct rte_rawdev *dev)
63 {
64 	struct idxd_rawdev *idxd = dev->dev_private;
65 	uint8_t err_code;
66 
67 	if (!idxd_is_wq_enabled(idxd)) {
68 		IOAT_PMD_ERR("Work queue %d already disabled", idxd->qid);
69 		return;
70 	}
71 
72 	err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
73 	if (err_code || idxd_is_wq_enabled(idxd)) {
74 		IOAT_PMD_ERR("Failed disabling work queue %d, error code: %#x",
75 				idxd->qid, err_code);
76 		return;
77 	}
78 	IOAT_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
79 }
80 
81 static int
idxd_pci_dev_start(struct rte_rawdev * dev)82 idxd_pci_dev_start(struct rte_rawdev *dev)
83 {
84 	struct idxd_rawdev *idxd = dev->dev_private;
85 	uint8_t err_code;
86 
87 	if (idxd_is_wq_enabled(idxd)) {
88 		IOAT_PMD_WARN("WQ %d already enabled", idxd->qid);
89 		return 0;
90 	}
91 
92 	if (idxd->public.batch_ring == NULL) {
93 		IOAT_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
94 		return -EINVAL;
95 	}
96 
97 	err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
98 	if (err_code || !idxd_is_wq_enabled(idxd)) {
99 		IOAT_PMD_ERR("Failed enabling work queue %d, error code: %#x",
100 				idxd->qid, err_code);
101 		return err_code == 0 ? -1 : err_code;
102 	}
103 
104 	IOAT_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
105 
106 	return 0;
107 }
108 
109 static const struct rte_rawdev_ops idxd_pci_ops = {
110 		.dev_close = idxd_rawdev_close,
111 		.dev_selftest = ioat_rawdev_test,
112 		.dump = idxd_dev_dump,
113 		.dev_configure = idxd_dev_configure,
114 		.dev_start = idxd_pci_dev_start,
115 		.dev_stop = idxd_pci_dev_stop,
116 		.dev_info_get = idxd_dev_info_get,
117 		.xstats_get = ioat_xstats_get,
118 		.xstats_get_names = ioat_xstats_get_names,
119 		.xstats_reset = ioat_xstats_reset,
120 };
121 
122 /* each portal uses 4 x 4k pages */
123 #define IDXD_PORTAL_SIZE (4096 * 4)
124 
125 static int
init_pci_device(struct rte_pci_device * dev,struct idxd_rawdev * idxd)126 init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd)
127 {
128 	struct idxd_pci_common *pci;
129 	uint8_t nb_groups, nb_engines, nb_wqs;
130 	uint16_t grp_offset, wq_offset; /* how far into bar0 the regs are */
131 	uint16_t wq_size, total_wq_size;
132 	uint8_t lg2_max_batch, lg2_max_copy_size;
133 	unsigned int i, err_code;
134 
135 	pci = malloc(sizeof(*pci));
136 	if (pci == NULL) {
137 		IOAT_PMD_ERR("%s: Can't allocate memory", __func__);
138 		goto err;
139 	}
140 	rte_spinlock_init(&pci->lk);
141 
142 	/* assign the bar registers, and then configure device */
143 	pci->regs = dev->mem_resource[0].addr;
144 	grp_offset = (uint16_t)pci->regs->offsets[0];
145 	pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
146 	wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
147 	pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
148 	pci->portals = dev->mem_resource[2].addr;
149 	pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
150 
151 	/* sanity check device status */
152 	if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
153 		/* need function-level-reset (FLR) or is enabled */
154 		IOAT_PMD_ERR("Device status is not disabled, cannot init");
155 		goto err;
156 	}
157 	if (pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK) {
158 		/* command in progress */
159 		IOAT_PMD_ERR("Device has a command in progress, cannot init");
160 		goto err;
161 	}
162 
163 	/* read basic info about the hardware for use when configuring */
164 	nb_groups = (uint8_t)pci->regs->grpcap;
165 	nb_engines = (uint8_t)pci->regs->engcap;
166 	nb_wqs = (uint8_t)(pci->regs->wqcap >> 16);
167 	total_wq_size = (uint16_t)pci->regs->wqcap;
168 	lg2_max_copy_size = (uint8_t)(pci->regs->gencap >> 16) & 0x1F;
169 	lg2_max_batch = (uint8_t)(pci->regs->gencap >> 21) & 0x0F;
170 
171 	IOAT_PMD_DEBUG("nb_groups = %u, nb_engines = %u, nb_wqs = %u",
172 			nb_groups, nb_engines, nb_wqs);
173 
174 	/* zero out any old config */
175 	for (i = 0; i < nb_groups; i++) {
176 		pci->grp_regs[i].grpengcfg = 0;
177 		pci->grp_regs[i].grpwqcfg[0] = 0;
178 	}
179 	for (i = 0; i < nb_wqs; i++)
180 		idxd_get_wq_cfg(pci, i)[0] = 0;
181 
182 	/* put each engine into a separate group to avoid reordering */
183 	if (nb_groups > nb_engines)
184 		nb_groups = nb_engines;
185 	if (nb_groups < nb_engines)
186 		nb_engines = nb_groups;
187 
188 	/* assign engines to groups, round-robin style */
189 	for (i = 0; i < nb_engines; i++) {
190 		IOAT_PMD_DEBUG("Assigning engine %u to group %u",
191 				i, i % nb_groups);
192 		pci->grp_regs[i % nb_groups].grpengcfg |= (1ULL << i);
193 	}
194 
195 	/* now do the same for queues and give work slots to each queue */
196 	wq_size = total_wq_size / nb_wqs;
197 	IOAT_PMD_DEBUG("Work queue size = %u, max batch = 2^%u, max copy = 2^%u",
198 			wq_size, lg2_max_batch, lg2_max_copy_size);
199 	for (i = 0; i < nb_wqs; i++) {
200 		/* add engine "i" to a group */
201 		IOAT_PMD_DEBUG("Assigning work queue %u to group %u",
202 				i, i % nb_groups);
203 		pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
204 		/* now configure it, in terms of size, max batch, mode */
205 		idxd_get_wq_cfg(pci, i)[WQ_SIZE_IDX] = wq_size;
206 		idxd_get_wq_cfg(pci, i)[WQ_MODE_IDX] = (1 << WQ_PRIORITY_SHIFT) |
207 				WQ_MODE_DEDICATED;
208 		idxd_get_wq_cfg(pci, i)[WQ_SIZES_IDX] = lg2_max_copy_size |
209 				(lg2_max_batch << WQ_BATCH_SZ_SHIFT);
210 	}
211 
212 	/* dump the group configuration to output */
213 	for (i = 0; i < nb_groups; i++) {
214 		IOAT_PMD_DEBUG("## Group %d", i);
215 		IOAT_PMD_DEBUG("    GRPWQCFG: %"PRIx64, pci->grp_regs[i].grpwqcfg[0]);
216 		IOAT_PMD_DEBUG("    GRPENGCFG: %"PRIx64, pci->grp_regs[i].grpengcfg);
217 		IOAT_PMD_DEBUG("    GRPFLAGS: %"PRIx32, pci->grp_regs[i].grpflags);
218 	}
219 
220 	idxd->u.pci = pci;
221 	idxd->max_batches = wq_size;
222 
223 	/* enable the device itself */
224 	err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
225 	if (err_code) {
226 		IOAT_PMD_ERR("Error enabling device: code %#x", err_code);
227 		return err_code;
228 	}
229 	IOAT_PMD_DEBUG("IDXD Device enabled OK");
230 
231 	return nb_wqs;
232 
233 err:
234 	free(pci);
235 	return -1;
236 }
237 
238 static int
idxd_rawdev_probe_pci(struct rte_pci_driver * drv,struct rte_pci_device * dev)239 idxd_rawdev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)
240 {
241 	struct idxd_rawdev idxd = {{0}}; /* Double {} to avoid error on BSD12 */
242 	uint8_t nb_wqs;
243 	int qid, ret = 0;
244 	char name[PCI_PRI_STR_SIZE];
245 
246 	rte_pci_device_name(&dev->addr, name, sizeof(name));
247 	IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
248 	dev->device.driver = &drv->driver;
249 
250 	ret = init_pci_device(dev, &idxd);
251 	if (ret < 0) {
252 		IOAT_PMD_ERR("Error initializing PCI hardware");
253 		return ret;
254 	}
255 	nb_wqs = (uint8_t)ret;
256 
257 	/* set up one device for each queue */
258 	for (qid = 0; qid < nb_wqs; qid++) {
259 		char qname[32];
260 
261 		/* add the queue number to each device name */
262 		snprintf(qname, sizeof(qname), "%s-q%d", name, qid);
263 		idxd.qid = qid;
264 		idxd.public.portal = RTE_PTR_ADD(idxd.u.pci->portals,
265 				qid * IDXD_PORTAL_SIZE);
266 		if (idxd_is_wq_enabled(&idxd))
267 			IOAT_PMD_ERR("Error, WQ %u seems enabled", qid);
268 		ret = idxd_rawdev_create(qname, &dev->device,
269 				&idxd, &idxd_pci_ops);
270 		if (ret != 0) {
271 			IOAT_PMD_ERR("Failed to create rawdev %s", name);
272 			if (qid == 0) /* if no devices using this, free pci */
273 				free(idxd.u.pci);
274 			return ret;
275 		}
276 	}
277 
278 	return 0;
279 }
280 
281 static int
idxd_rawdev_destroy(const char * name)282 idxd_rawdev_destroy(const char *name)
283 {
284 	int ret;
285 	uint8_t err_code;
286 	struct rte_rawdev *rdev;
287 	struct idxd_rawdev *idxd;
288 
289 	if (!name) {
290 		IOAT_PMD_ERR("Invalid device name");
291 		return -EINVAL;
292 	}
293 
294 	rdev = rte_rawdev_pmd_get_named_dev(name);
295 	if (!rdev) {
296 		IOAT_PMD_ERR("Invalid device name (%s)", name);
297 		return -EINVAL;
298 	}
299 
300 	idxd = rdev->dev_private;
301 	if (!idxd) {
302 		IOAT_PMD_ERR("Error getting dev_private");
303 		return -EINVAL;
304 	}
305 
306 	/* disable the device */
307 	err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
308 	if (err_code) {
309 		IOAT_PMD_ERR("Error disabling device: code %#x", err_code);
310 		return err_code;
311 	}
312 	IOAT_PMD_DEBUG("IDXD Device disabled OK");
313 
314 	/* free device memory */
315 	IOAT_PMD_DEBUG("Freeing device driver memory");
316 	rdev->dev_private = NULL;
317 	rte_free(idxd->public.batch_ring);
318 	rte_free(idxd->public.hdl_ring);
319 	rte_memzone_free(idxd->mz);
320 
321 	/* rte_rawdev_close is called by pmd_release */
322 	ret = rte_rawdev_pmd_release(rdev);
323 	if (ret)
324 		IOAT_PMD_DEBUG("Device cleanup failed");
325 
326 	return 0;
327 }
328 
329 static int
idxd_rawdev_remove_pci(struct rte_pci_device * dev)330 idxd_rawdev_remove_pci(struct rte_pci_device *dev)
331 {
332 	char name[PCI_PRI_STR_SIZE];
333 	int ret = 0;
334 
335 	rte_pci_device_name(&dev->addr, name, sizeof(name));
336 
337 	IOAT_PMD_INFO("Closing %s on NUMA node %d",
338 			name, dev->device.numa_node);
339 
340 	ret = idxd_rawdev_destroy(name);
341 
342 	return ret;
343 }
344 
345 struct rte_pci_driver idxd_pmd_drv_pci = {
346 	.id_table = pci_id_idxd_map,
347 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
348 	.probe = idxd_rawdev_probe_pci,
349 	.remove = idxd_rawdev_remove_pci,
350 };
351 
352 RTE_PMD_REGISTER_PCI(IDXD_PMD_RAWDEV_NAME_PCI, idxd_pmd_drv_pci);
353 RTE_PMD_REGISTER_PCI_TABLE(IDXD_PMD_RAWDEV_NAME_PCI, pci_id_idxd_map);
354 RTE_PMD_REGISTER_KMOD_DEP(IDXD_PMD_RAWDEV_NAME_PCI,
355 			  "* igb_uio | uio_pci_generic | vfio-pci");
356