xref: /f-stack/dpdk/lib/librte_rawdev/rte_rawdev.c (revision 490ee526)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 NXP
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15 
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
23 #include <rte_eal.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 
32 #include "rte_rawdev.h"
33 #include "rte_rawdev_pmd.h"
34 
35 /* dynamic log identifier */
36 int librawdev_logtype;
37 
38 static struct rte_rawdev rte_rawdevices[RTE_RAWDEV_MAX_DEVS];
39 
40 struct rte_rawdev *rte_rawdevs = rte_rawdevices;
41 
42 static struct rte_rawdev_global rawdev_globals = {
43 	.nb_devs		= 0
44 };
45 
46 /* Raw device, northbound API implementation */
47 uint8_t
48 rte_rawdev_count(void)
49 {
50 	return rawdev_globals.nb_devs;
51 }
52 
53 uint16_t
54 rte_rawdev_get_dev_id(const char *name)
55 {
56 	uint16_t i;
57 
58 	if (!name)
59 		return -EINVAL;
60 
61 	for (i = 0; i < rawdev_globals.nb_devs; i++)
62 		if ((strcmp(rte_rawdevices[i].name, name)
63 				== 0) &&
64 				(rte_rawdevices[i].attached ==
65 						RTE_RAWDEV_ATTACHED))
66 			return i;
67 	return -ENODEV;
68 }
69 
70 int
71 rte_rawdev_socket_id(uint16_t dev_id)
72 {
73 	struct rte_rawdev *dev;
74 
75 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
76 	dev = &rte_rawdevs[dev_id];
77 
78 	return dev->socket_id;
79 }
80 
81 int
82 rte_rawdev_info_get(uint16_t dev_id, struct rte_rawdev_info *dev_info)
83 {
84 	struct rte_rawdev *rawdev;
85 
86 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
87 	RTE_FUNC_PTR_OR_ERR_RET(dev_info, -EINVAL);
88 
89 	rawdev = &rte_rawdevs[dev_id];
90 
91 	RTE_FUNC_PTR_OR_ERR_RET(*rawdev->dev_ops->dev_info_get, -ENOTSUP);
92 	(*rawdev->dev_ops->dev_info_get)(rawdev, dev_info->dev_private);
93 
94 	if (dev_info) {
95 
96 		dev_info->driver_name = rawdev->driver_name;
97 		dev_info->device = rawdev->device;
98 	}
99 
100 	return 0;
101 }
102 
103 int
104 rte_rawdev_configure(uint16_t dev_id, struct rte_rawdev_info *dev_conf)
105 {
106 	struct rte_rawdev *dev;
107 	int diag;
108 
109 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
110 	RTE_FUNC_PTR_OR_ERR_RET(dev_conf, -EINVAL);
111 
112 	dev = &rte_rawdevs[dev_id];
113 
114 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
115 
116 	if (dev->started) {
117 		RTE_RDEV_ERR(
118 		   "device %d must be stopped to allow configuration", dev_id);
119 		return -EBUSY;
120 	}
121 
122 	/* Configure the device */
123 	diag = (*dev->dev_ops->dev_configure)(dev, dev_conf->dev_private);
124 	if (diag != 0)
125 		RTE_RDEV_ERR("dev%d dev_configure = %d", dev_id, diag);
126 	else
127 		dev->attached = 1;
128 
129 	return diag;
130 }
131 
132 int
133 rte_rawdev_queue_conf_get(uint16_t dev_id,
134 			  uint16_t queue_id,
135 			  rte_rawdev_obj_t queue_conf)
136 {
137 	struct rte_rawdev *dev;
138 
139 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
140 	dev = &rte_rawdevs[dev_id];
141 
142 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
143 	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
144 	return 0;
145 }
146 
147 int
148 rte_rawdev_queue_setup(uint16_t dev_id,
149 		       uint16_t queue_id,
150 		       rte_rawdev_obj_t queue_conf)
151 {
152 	struct rte_rawdev *dev;
153 
154 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
155 	dev = &rte_rawdevs[dev_id];
156 
157 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
158 	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
159 }
160 
161 int
162 rte_rawdev_queue_release(uint16_t dev_id, uint16_t queue_id)
163 {
164 	struct rte_rawdev *dev;
165 
166 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
167 	dev = &rte_rawdevs[dev_id];
168 
169 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
170 	return (*dev->dev_ops->queue_release)(dev, queue_id);
171 }
172 
173 uint16_t
174 rte_rawdev_queue_count(uint16_t dev_id)
175 {
176 	struct rte_rawdev *dev;
177 
178 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
179 	dev = &rte_rawdevs[dev_id];
180 
181 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_count, -ENOTSUP);
182 	return (*dev->dev_ops->queue_count)(dev);
183 }
184 
185 int
186 rte_rawdev_get_attr(uint16_t dev_id,
187 		    const char *attr_name,
188 		    uint64_t *attr_value)
189 {
190 	struct rte_rawdev *dev;
191 
192 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
193 	dev = &rte_rawdevs[dev_id];
194 
195 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->attr_get, -ENOTSUP);
196 	return (*dev->dev_ops->attr_get)(dev, attr_name, attr_value);
197 }
198 
199 int
200 rte_rawdev_set_attr(uint16_t dev_id,
201 		    const char *attr_name,
202 		    const uint64_t attr_value)
203 {
204 	struct rte_rawdev *dev;
205 
206 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
207 	dev = &rte_rawdevs[dev_id];
208 
209 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->attr_set, -ENOTSUP);
210 	return (*dev->dev_ops->attr_set)(dev, attr_name, attr_value);
211 }
212 
213 int
214 rte_rawdev_enqueue_buffers(uint16_t dev_id,
215 			   struct rte_rawdev_buf **buffers,
216 			   unsigned int count,
217 			   rte_rawdev_obj_t context)
218 {
219 	struct rte_rawdev *dev;
220 
221 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
222 	dev = &rte_rawdevs[dev_id];
223 
224 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->enqueue_bufs, -ENOTSUP);
225 	return (*dev->dev_ops->enqueue_bufs)(dev, buffers, count, context);
226 }
227 
228 int
229 rte_rawdev_dequeue_buffers(uint16_t dev_id,
230 			   struct rte_rawdev_buf **buffers,
231 			   unsigned int count,
232 			   rte_rawdev_obj_t context)
233 {
234 	struct rte_rawdev *dev;
235 
236 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
237 	dev = &rte_rawdevs[dev_id];
238 
239 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dequeue_bufs, -ENOTSUP);
240 	return (*dev->dev_ops->dequeue_bufs)(dev, buffers, count, context);
241 }
242 
243 int
244 rte_rawdev_dump(uint16_t dev_id, FILE *f)
245 {
246 	struct rte_rawdev *dev;
247 
248 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
249 	dev = &rte_rawdevs[dev_id];
250 
251 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
252 	return (*dev->dev_ops->dump)(dev, f);
253 }
254 
255 static int
256 xstats_get_count(uint16_t dev_id)
257 {
258 	struct rte_rawdev *dev = &rte_rawdevs[dev_id];
259 
260 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
261 	return (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
262 }
263 
264 int
265 rte_rawdev_xstats_names_get(uint16_t dev_id,
266 		struct rte_rawdev_xstats_name *xstats_names,
267 		unsigned int size)
268 {
269 	const struct rte_rawdev *dev;
270 	int cnt_expected_entries;
271 
272 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
273 
274 	cnt_expected_entries = xstats_get_count(dev_id);
275 
276 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
277 	    (int)size < cnt_expected_entries || size <= 0)
278 		return cnt_expected_entries;
279 
280 	dev = &rte_rawdevs[dev_id];
281 
282 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
283 	return (*dev->dev_ops->xstats_get_names)(dev, xstats_names, size);
284 }
285 
286 /* retrieve rawdev extended statistics */
287 int
288 rte_rawdev_xstats_get(uint16_t dev_id,
289 		      const unsigned int ids[],
290 		      uint64_t values[],
291 		      unsigned int n)
292 {
293 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
294 	const struct rte_rawdev *dev = &rte_rawdevs[dev_id];
295 
296 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get, -ENOTSUP);
297 	return (*dev->dev_ops->xstats_get)(dev, ids, values, n);
298 }
299 
300 uint64_t
301 rte_rawdev_xstats_by_name_get(uint16_t dev_id,
302 			      const char *name,
303 			      unsigned int *id)
304 {
305 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
306 	const struct rte_rawdev *dev = &rte_rawdevs[dev_id];
307 	unsigned int temp = -1;
308 
309 	if (id != NULL)
310 		*id = (unsigned int)-1;
311 	else
312 		id = &temp; /* driver never gets a NULL value */
313 
314 	/* implemented by driver */
315 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_by_name, -ENOTSUP);
316 	return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
317 }
318 
319 int
320 rte_rawdev_xstats_reset(uint16_t dev_id,
321 			const uint32_t ids[], uint32_t nb_ids)
322 {
323 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
324 	struct rte_rawdev *dev = &rte_rawdevs[dev_id];
325 
326 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_reset, -ENOTSUP);
327 	return (*dev->dev_ops->xstats_reset)(dev, ids, nb_ids);
328 }
329 
330 int
331 rte_rawdev_firmware_status_get(uint16_t dev_id, rte_rawdev_obj_t status_info)
332 {
333 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
334 	struct rte_rawdev *dev = &rte_rawdevs[dev_id];
335 
336 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_status_get, -ENOTSUP);
337 	return (*dev->dev_ops->firmware_status_get)(dev, status_info);
338 }
339 
340 int
341 rte_rawdev_firmware_version_get(uint16_t dev_id, rte_rawdev_obj_t version_info)
342 {
343 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
344 	struct rte_rawdev *dev = &rte_rawdevs[dev_id];
345 
346 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_version_get, -ENOTSUP);
347 	return (*dev->dev_ops->firmware_version_get)(dev, version_info);
348 }
349 
350 int
351 rte_rawdev_firmware_load(uint16_t dev_id, rte_rawdev_obj_t firmware_image)
352 {
353 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
354 	struct rte_rawdev *dev = &rte_rawdevs[dev_id];
355 
356 	if (!firmware_image)
357 		return -EINVAL;
358 
359 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_load, -ENOTSUP);
360 	return (*dev->dev_ops->firmware_load)(dev, firmware_image);
361 }
362 
363 int
364 rte_rawdev_firmware_unload(uint16_t dev_id)
365 {
366 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
367 	struct rte_rawdev *dev = &rte_rawdevs[dev_id];
368 
369 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_load, -ENOTSUP);
370 	return (*dev->dev_ops->firmware_unload)(dev);
371 }
372 
373 int
374 rte_rawdev_selftest(uint16_t dev_id)
375 {
376 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
377 	struct rte_rawdev *dev = &rte_rawdevs[dev_id];
378 
379 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
380 	return (*dev->dev_ops->dev_selftest)();
381 }
382 
383 int
384 rte_rawdev_start(uint16_t dev_id)
385 {
386 	struct rte_rawdev *dev;
387 	int diag;
388 
389 	RTE_RDEV_DEBUG("Start dev_id=%" PRIu8, dev_id);
390 
391 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
392 	dev = &rte_rawdevs[dev_id];
393 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
394 
395 	if (dev->started != 0) {
396 		RTE_RDEV_ERR("Device with dev_id=%" PRIu8 "already started",
397 			     dev_id);
398 		return 0;
399 	}
400 
401 	diag = (*dev->dev_ops->dev_start)(dev);
402 	if (diag == 0)
403 		dev->started = 1;
404 	else
405 		return diag;
406 
407 	return 0;
408 }
409 
410 void
411 rte_rawdev_stop(uint16_t dev_id)
412 {
413 	struct rte_rawdev *dev;
414 
415 	RTE_RDEV_DEBUG("Stop dev_id=%" PRIu8, dev_id);
416 
417 	RTE_RAWDEV_VALID_DEVID_OR_RET(dev_id);
418 	dev = &rte_rawdevs[dev_id];
419 
420 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
421 
422 	if (dev->started == 0) {
423 		RTE_RDEV_ERR("Device with dev_id=%" PRIu8 "already stopped",
424 			dev_id);
425 		return;
426 	}
427 
428 	(*dev->dev_ops->dev_stop)(dev);
429 	dev->started = 0;
430 }
431 
432 int
433 rte_rawdev_close(uint16_t dev_id)
434 {
435 	struct rte_rawdev *dev;
436 
437 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
438 	dev = &rte_rawdevs[dev_id];
439 
440 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
441 	/* Device must be stopped before it can be closed */
442 	if (dev->started == 1) {
443 		RTE_RDEV_ERR("Device %u must be stopped before closing",
444 			     dev_id);
445 		return -EBUSY;
446 	}
447 
448 	return (*dev->dev_ops->dev_close)(dev);
449 }
450 
451 int
452 rte_rawdev_reset(uint16_t dev_id)
453 {
454 	struct rte_rawdev *dev;
455 
456 	RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
457 	dev = &rte_rawdevs[dev_id];
458 
459 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
460 	/* Reset is not dependent on state of the device */
461 	return (*dev->dev_ops->dev_reset)(dev);
462 }
463 
464 static inline uint8_t
465 rte_rawdev_find_free_device_index(void)
466 {
467 	uint16_t dev_id;
468 
469 	for (dev_id = 0; dev_id < RTE_RAWDEV_MAX_DEVS; dev_id++) {
470 		if (rte_rawdevs[dev_id].attached ==
471 				RTE_RAWDEV_DETACHED)
472 			return dev_id;
473 	}
474 
475 	return RTE_RAWDEV_MAX_DEVS;
476 }
477 
478 struct rte_rawdev *
479 rte_rawdev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id)
480 {
481 	struct rte_rawdev *rawdev;
482 	uint16_t dev_id;
483 
484 	if (rte_rawdev_pmd_get_named_dev(name) != NULL) {
485 		RTE_RDEV_ERR("Event device with name %s already allocated!",
486 			     name);
487 		return NULL;
488 	}
489 
490 	dev_id = rte_rawdev_find_free_device_index();
491 	if (dev_id == RTE_RAWDEV_MAX_DEVS) {
492 		RTE_RDEV_ERR("Reached maximum number of raw devices");
493 		return NULL;
494 	}
495 
496 	rawdev = &rte_rawdevs[dev_id];
497 
498 	rawdev->dev_private = rte_zmalloc_socket("rawdev private",
499 				     dev_priv_size,
500 				     RTE_CACHE_LINE_SIZE,
501 				     socket_id);
502 	if (!rawdev->dev_private) {
503 		RTE_RDEV_ERR("Unable to allocate memory to Skeleton dev");
504 		return NULL;
505 	}
506 
507 
508 	rawdev->dev_id = dev_id;
509 	rawdev->socket_id = socket_id;
510 	rawdev->started = 0;
511 	snprintf(rawdev->name, RTE_RAWDEV_NAME_MAX_LEN, "%s", name);
512 
513 	rawdev->attached = RTE_RAWDEV_ATTACHED;
514 	rawdev_globals.nb_devs++;
515 
516 	return rawdev;
517 }
518 
519 int
520 rte_rawdev_pmd_release(struct rte_rawdev *rawdev)
521 {
522 	int ret;
523 
524 	if (rawdev == NULL)
525 		return -EINVAL;
526 
527 	ret = rte_rawdev_close(rawdev->dev_id);
528 	if (ret < 0)
529 		return ret;
530 
531 	rawdev->attached = RTE_RAWDEV_DETACHED;
532 	rawdev_globals.nb_devs--;
533 
534 	rawdev->dev_id = 0;
535 	rawdev->socket_id = 0;
536 	rawdev->dev_ops = NULL;
537 	if (rawdev->dev_private) {
538 		rte_free(rawdev->dev_private);
539 		rawdev->dev_private = NULL;
540 	}
541 
542 	return 0;
543 }
544 
545 RTE_INIT(librawdev_init_log)
546 {
547 	librawdev_logtype = rte_log_register("lib.rawdev");
548 	if (librawdev_logtype >= 0)
549 		rte_log_set_level(librawdev_logtype, RTE_LOG_INFO);
550 }
551