xref: /dpdk/drivers/raw/ioat/ioat_rawdev.c (revision eeded204)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #include <rte_cycles.h>
6 #include <rte_bus_pci.h>
7 #include <rte_memzone.h>
8 #include <rte_string_fns.h>
9 #include <rte_rawdev_pmd.h>
10 
11 #include "rte_ioat_rawdev.h"
12 #include "ioat_spec.h"
13 #include "ioat_private.h"
14 
15 static struct rte_pci_driver ioat_pmd_drv;
16 
17 #define IOAT_VENDOR_ID		0x8086
18 #define IOAT_DEVICE_ID_SKX	0x2021
19 #define IOAT_DEVICE_ID_BDX0	0x6f20
20 #define IOAT_DEVICE_ID_BDX1	0x6f21
21 #define IOAT_DEVICE_ID_BDX2	0x6f22
22 #define IOAT_DEVICE_ID_BDX3	0x6f23
23 #define IOAT_DEVICE_ID_BDX4	0x6f24
24 #define IOAT_DEVICE_ID_BDX5	0x6f25
25 #define IOAT_DEVICE_ID_BDX6	0x6f26
26 #define IOAT_DEVICE_ID_BDX7	0x6f27
27 #define IOAT_DEVICE_ID_BDXE	0x6f2E
28 #define IOAT_DEVICE_ID_BDXF	0x6f2F
29 #define IOAT_DEVICE_ID_ICX	0x0b00
30 
31 RTE_LOG_REGISTER_DEFAULT(ioat_pmd_logtype, INFO);
32 
33 #define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
34 #define COMPLETION_SZ sizeof(__m128i)
35 
36 static int
ioat_dev_configure(const struct rte_rawdev * dev,rte_rawdev_obj_t config,size_t config_size)37 ioat_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config,
38 		size_t config_size)
39 {
40 	struct rte_ioat_rawdev_config *params = config;
41 	struct rte_ioat_rawdev *ioat = dev->dev_private;
42 	char mz_name[RTE_MEMZONE_NAMESIZE];
43 	unsigned short i;
44 
45 	if (dev->started)
46 		return -EBUSY;
47 
48 	if (params == NULL || config_size != sizeof(*params))
49 		return -EINVAL;
50 
51 	if (params->ring_size > 4096 || params->ring_size < 64 ||
52 			!rte_is_power_of_2(params->ring_size))
53 		return -EINVAL;
54 
55 	ioat->ring_size = params->ring_size;
56 	ioat->hdls_disable = params->hdls_disable;
57 	if (ioat->desc_ring != NULL) {
58 		rte_memzone_free(ioat->desc_mz);
59 		ioat->desc_ring = NULL;
60 		ioat->desc_mz = NULL;
61 	}
62 
63 	/* allocate one block of memory for both descriptors
64 	 * and completion handles.
65 	 */
66 	snprintf(mz_name, sizeof(mz_name), "rawdev%u_desc_ring", dev->dev_id);
67 	ioat->desc_mz = rte_memzone_reserve(mz_name,
68 			(DESC_SZ + COMPLETION_SZ) * ioat->ring_size,
69 			dev->device->numa_node, RTE_MEMZONE_IOVA_CONTIG);
70 	if (ioat->desc_mz == NULL)
71 		return -ENOMEM;
72 	ioat->desc_ring = ioat->desc_mz->addr;
73 	ioat->hdls = (void *)&ioat->desc_ring[ioat->ring_size];
74 
75 	ioat->ring_addr = ioat->desc_mz->iova;
76 
77 	/* configure descriptor ring - each one points to next */
78 	for (i = 0; i < ioat->ring_size; i++) {
79 		ioat->desc_ring[i].next = ioat->ring_addr +
80 				(((i + 1) % ioat->ring_size) * DESC_SZ);
81 	}
82 
83 	return 0;
84 }
85 
86 static int
ioat_dev_start(struct rte_rawdev * dev)87 ioat_dev_start(struct rte_rawdev *dev)
88 {
89 	struct rte_ioat_rawdev *ioat = dev->dev_private;
90 
91 	if (ioat->ring_size == 0 || ioat->desc_ring == NULL)
92 		return -EBUSY;
93 
94 	/* inform hardware of where the descriptor ring is */
95 	ioat->regs->chainaddr = ioat->ring_addr;
96 	/* inform hardware of where to write the status/completions */
97 	ioat->regs->chancmp = ioat->status_addr;
98 
99 	/* prime the status register to be set to the last element */
100 	ioat->status =  ioat->ring_addr + ((ioat->ring_size - 1) * DESC_SZ);
101 	return 0;
102 }
103 
104 static void
ioat_dev_stop(struct rte_rawdev * dev)105 ioat_dev_stop(struct rte_rawdev *dev)
106 {
107 	RTE_SET_USED(dev);
108 }
109 
110 static int
ioat_dev_info_get(struct rte_rawdev * dev,rte_rawdev_obj_t dev_info,size_t dev_info_size)111 ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info,
112 		size_t dev_info_size)
113 {
114 	struct rte_ioat_rawdev_config *cfg = dev_info;
115 	struct rte_ioat_rawdev *ioat = dev->dev_private;
116 
117 	if (dev_info == NULL || dev_info_size != sizeof(*cfg))
118 		return -EINVAL;
119 
120 	cfg->ring_size = ioat->ring_size;
121 	cfg->hdls_disable = ioat->hdls_disable;
122 	return 0;
123 }
124 
125 static int
ioat_dev_close(struct rte_rawdev * dev __rte_unused)126 ioat_dev_close(struct rte_rawdev *dev __rte_unused)
127 {
128 	return 0;
129 }
130 
131 static int
ioat_rawdev_create(const char * name,struct rte_pci_device * dev)132 ioat_rawdev_create(const char *name, struct rte_pci_device *dev)
133 {
134 	static const struct rte_rawdev_ops ioat_rawdev_ops = {
135 			.dev_configure = ioat_dev_configure,
136 			.dev_start = ioat_dev_start,
137 			.dev_stop = ioat_dev_stop,
138 			.dev_close = ioat_dev_close,
139 			.dev_info_get = ioat_dev_info_get,
140 			.xstats_get = ioat_xstats_get,
141 			.xstats_get_names = ioat_xstats_get_names,
142 			.xstats_reset = ioat_xstats_reset,
143 			.dev_selftest = ioat_rawdev_test,
144 	};
145 
146 	struct rte_rawdev *rawdev = NULL;
147 	struct rte_ioat_rawdev *ioat = NULL;
148 	const struct rte_memzone *mz = NULL;
149 	char mz_name[RTE_MEMZONE_NAMESIZE];
150 	int ret = 0;
151 	int retry = 0;
152 
153 	if (!name) {
154 		IOAT_PMD_ERR("Invalid name of the device!");
155 		ret = -EINVAL;
156 		goto cleanup;
157 	}
158 
159 	/* Allocate device structure */
160 	rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct rte_ioat_rawdev),
161 					 dev->device.numa_node);
162 	if (rawdev == NULL) {
163 		IOAT_PMD_ERR("Unable to allocate raw device");
164 		ret = -ENOMEM;
165 		goto cleanup;
166 	}
167 
168 	/* Allocate memory for the primary process or else return the memory
169 	 * of primary memzone for the secondary process.
170 	 */
171 	snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
172 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
173 		mz = rte_memzone_lookup(mz_name);
174 		if (mz == NULL) {
175 			IOAT_PMD_ERR("Unable lookup memzone for private data\n");
176 			ret = -ENOMEM;
177 			goto cleanup;
178 		}
179 		rawdev->dev_private = mz->addr;
180 		rawdev->dev_ops = &ioat_rawdev_ops;
181 		rawdev->device = &dev->device;
182 		rawdev->driver_name = dev->device.driver->name;
183 		return 0;
184 	}
185 	mz = rte_memzone_reserve(mz_name, sizeof(struct rte_ioat_rawdev),
186 			dev->device.numa_node, RTE_MEMZONE_IOVA_CONTIG);
187 	if (mz == NULL) {
188 		IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
189 		ret = -ENOMEM;
190 		goto cleanup;
191 	}
192 
193 	rawdev->dev_private = mz->addr;
194 	rawdev->dev_ops = &ioat_rawdev_ops;
195 	rawdev->device = &dev->device;
196 	rawdev->driver_name = dev->device.driver->name;
197 
198 	ioat = rawdev->dev_private;
199 	ioat->type = RTE_IOAT_DEV;
200 	ioat->rawdev = rawdev;
201 	ioat->mz = mz;
202 	ioat->regs = dev->mem_resource[0].addr;
203 	ioat->doorbell = &ioat->regs->dmacount;
204 	ioat->ring_size = 0;
205 	ioat->desc_ring = NULL;
206 	ioat->status_addr = ioat->mz->iova +
207 			offsetof(struct rte_ioat_rawdev, status);
208 
209 	/* do device initialization - reset and set error behaviour */
210 	if (ioat->regs->chancnt != 1)
211 		IOAT_PMD_ERR("%s: Channel count == %d\n", __func__,
212 				ioat->regs->chancnt);
213 
214 	if (ioat->regs->chanctrl & 0x100) { /* locked by someone else */
215 		IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
216 		ioat->regs->chanctrl = 0;
217 	}
218 
219 	ioat->regs->chancmd = RTE_IOAT_CHANCMD_SUSPEND;
220 	rte_delay_ms(1);
221 	ioat->regs->chancmd = RTE_IOAT_CHANCMD_RESET;
222 	rte_delay_ms(1);
223 	while (ioat->regs->chancmd & RTE_IOAT_CHANCMD_RESET) {
224 		ioat->regs->chainaddr = 0;
225 		rte_delay_ms(1);
226 		if (++retry >= 200) {
227 			IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=0x%"PRIx8", CHANSTS=0x%"PRIx64", CHANERR=0x%"PRIx32"\n",
228 					__func__,
229 					ioat->regs->chancmd,
230 					ioat->regs->chansts,
231 					ioat->regs->chanerr);
232 			ret = -EIO;
233 		}
234 	}
235 	ioat->regs->chanctrl = RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
236 			RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN;
237 
238 	return 0;
239 
240 cleanup:
241 	if (rawdev)
242 		rte_rawdev_pmd_release(rawdev);
243 
244 	return ret;
245 }
246 
247 static int
ioat_rawdev_destroy(const char * name)248 ioat_rawdev_destroy(const char *name)
249 {
250 	int ret;
251 	struct rte_rawdev *rdev;
252 
253 	if (!name) {
254 		IOAT_PMD_ERR("Invalid device name");
255 		return -EINVAL;
256 	}
257 
258 	rdev = rte_rawdev_pmd_get_named_dev(name);
259 	if (!rdev) {
260 		IOAT_PMD_ERR("Invalid device name (%s)", name);
261 		return -EINVAL;
262 	}
263 
264 	if (rdev->dev_private != NULL) {
265 		struct rte_ioat_rawdev *ioat = rdev->dev_private;
266 		rdev->dev_private = NULL;
267 		rte_memzone_free(ioat->desc_mz);
268 		rte_memzone_free(ioat->mz);
269 	}
270 
271 	/* rte_rawdev_close is called by pmd_release */
272 	ret = rte_rawdev_pmd_release(rdev);
273 	if (ret)
274 		IOAT_PMD_DEBUG("Device cleanup failed");
275 
276 	return 0;
277 }
278 
279 static int
ioat_rawdev_probe(struct rte_pci_driver * drv,struct rte_pci_device * dev)280 ioat_rawdev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
281 {
282 	char name[32];
283 	int ret = 0;
284 
285 
286 	rte_pci_device_name(&dev->addr, name, sizeof(name));
287 	IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
288 
289 	dev->device.driver = &drv->driver;
290 	ret = ioat_rawdev_create(name, dev);
291 	return ret;
292 }
293 
294 static int
ioat_rawdev_remove(struct rte_pci_device * dev)295 ioat_rawdev_remove(struct rte_pci_device *dev)
296 {
297 	char name[32];
298 	int ret;
299 
300 	rte_pci_device_name(&dev->addr, name, sizeof(name));
301 
302 	IOAT_PMD_INFO("Closing %s on NUMA node %d",
303 			name, dev->device.numa_node);
304 
305 	ret = ioat_rawdev_destroy(name);
306 	return ret;
307 }
308 
309 static const struct rte_pci_id pci_id_ioat_map[] = {
310 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
311 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
312 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
313 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
314 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
315 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
316 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
317 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
318 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
319 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
320 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
321 	{ RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_ICX) },
322 	{ .vendor_id = 0, /* sentinel */ },
323 };
324 
325 static struct rte_pci_driver ioat_pmd_drv = {
326 	.id_table = pci_id_ioat_map,
327 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
328 	.probe = ioat_rawdev_probe,
329 	.remove = ioat_rawdev_remove,
330 };
331 
332 RTE_PMD_REGISTER_PCI(IOAT_PMD_RAWDEV_NAME, ioat_pmd_drv);
333 RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_RAWDEV_NAME, pci_id_ioat_map);
334 RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_RAWDEV_NAME, "* igb_uio | uio_pci_generic");
335