1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/queue.h>
35 #include <ctype.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <stdarg.h>
40 #include <errno.h>
41 #include <stdint.h>
42 #include <inttypes.h>
43 #include <netinet/in.h>
44 
45 #include <rte_byteorder.h>
46 #include <rte_log.h>
47 #include <rte_debug.h>
48 #include <rte_dev.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68 #include <rte_string_fns.h>
69 
70 #include "rte_crypto.h"
71 #include "rte_cryptodev.h"
72 #include "rte_cryptodev_pmd.h"
73 
74 struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
75 
76 struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
77 
78 static struct rte_cryptodev_global cryptodev_globals = {
79 		.devs			= &rte_crypto_devices[0],
80 		.data			= { NULL },
81 		.nb_devs		= 0,
82 		.max_devs		= RTE_CRYPTO_MAX_DEVS
83 };
84 
85 struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals;
86 
87 /* spinlock for crypto device callbacks */
88 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
89 
90 
91 /**
92  * The user application callback description.
93  *
94  * It contains callback address to be registered by user application,
95  * the pointer to the parameters for callback, and the event type.
96  */
97 struct rte_cryptodev_callback {
98 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
99 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
100 	void *cb_arg;				/**< Parameter for callback */
101 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
102 	uint32_t active;			/**< Callback is executing */
103 };
104 
105 #define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG		("max_nb_queue_pairs")
106 #define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG		("max_nb_sessions")
107 #define RTE_CRYPTODEV_VDEV_SOCKET_ID			("socket_id")
108 
109 static const char *cryptodev_vdev_valid_params[] = {
110 	RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
111 	RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
112 	RTE_CRYPTODEV_VDEV_SOCKET_ID
113 };
114 
115 static uint8_t
116 number_of_sockets(void)
117 {
118 	int sockets = 0;
119 	int i;
120 	const struct rte_memseg *ms = rte_eal_get_physmem_layout();
121 
122 	for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
123 		if (sockets < ms[i].socket_id)
124 			sockets = ms[i].socket_id;
125 	}
126 
127 	/* Number of sockets = maximum socket_id + 1 */
128 	return ++sockets;
129 }
130 
131 /** Parse integer from integer argument */
132 static int
133 parse_integer_arg(const char *key __rte_unused,
134 		const char *value, void *extra_args)
135 {
136 	int *i = (int *) extra_args;
137 
138 	*i = atoi(value);
139 	if (*i < 0) {
140 		CDEV_LOG_ERR("Argument has to be positive.");
141 		return -1;
142 	}
143 
144 	return 0;
145 }
146 
147 int
148 rte_cryptodev_parse_vdev_init_params(struct rte_crypto_vdev_init_params *params,
149 		const char *input_args)
150 {
151 	struct rte_kvargs *kvlist = NULL;
152 	int ret = 0;
153 
154 	if (params == NULL)
155 		return -EINVAL;
156 
157 	if (input_args) {
158 		kvlist = rte_kvargs_parse(input_args,
159 				cryptodev_vdev_valid_params);
160 		if (kvlist == NULL)
161 			return -1;
162 
163 		ret = rte_kvargs_process(kvlist,
164 					RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
165 					&parse_integer_arg,
166 					&params->max_nb_queue_pairs);
167 		if (ret < 0)
168 			goto free_kvlist;
169 
170 		ret = rte_kvargs_process(kvlist,
171 					RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
172 					&parse_integer_arg,
173 					&params->max_nb_sessions);
174 		if (ret < 0)
175 			goto free_kvlist;
176 
177 		ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
178 					&parse_integer_arg,
179 					&params->socket_id);
180 		if (ret < 0)
181 			goto free_kvlist;
182 
183 		if (params->socket_id >= number_of_sockets()) {
184 			CDEV_LOG_ERR("Invalid socket id specified to create "
185 				"the virtual crypto device on");
186 			goto free_kvlist;
187 		}
188 	}
189 
190 free_kvlist:
191 	rte_kvargs_free(kvlist);
192 	return ret;
193 }
194 
195 const char *
196 rte_cryptodev_get_feature_name(uint64_t flag)
197 {
198 	switch (flag) {
199 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
200 		return "SYMMETRIC_CRYPTO";
201 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
202 		return "ASYMMETRIC_CRYPTO";
203 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
204 		return "SYM_OPERATION_CHAINING";
205 	case RTE_CRYPTODEV_FF_CPU_SSE:
206 		return "CPU_SSE";
207 	case RTE_CRYPTODEV_FF_CPU_AVX:
208 		return "CPU_AVX";
209 	case RTE_CRYPTODEV_FF_CPU_AVX2:
210 		return "CPU_AVX2";
211 	case RTE_CRYPTODEV_FF_CPU_AESNI:
212 		return "CPU_AESNI";
213 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
214 		return "HW_ACCELERATED";
215 
216 	default:
217 		return NULL;
218 	}
219 }
220 
221 
222 int
223 rte_cryptodev_create_vdev(const char *name, const char *args)
224 {
225 	return rte_eal_vdev_init(name, args);
226 }
227 
228 int
229 rte_cryptodev_get_dev_id(const char *name) {
230 	unsigned i;
231 
232 	if (name == NULL)
233 		return -1;
234 
235 	for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
236 		if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name)
237 				== 0) &&
238 				(rte_cryptodev_globals->devs[i].attached ==
239 						RTE_CRYPTODEV_ATTACHED))
240 			return i;
241 
242 	return -1;
243 }
244 
245 uint8_t
246 rte_cryptodev_count(void)
247 {
248 	return rte_cryptodev_globals->nb_devs;
249 }
250 
251 uint8_t
252 rte_cryptodev_count_devtype(enum rte_cryptodev_type type)
253 {
254 	uint8_t i, dev_count = 0;
255 
256 	for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
257 		if (rte_cryptodev_globals->devs[i].dev_type == type &&
258 			rte_cryptodev_globals->devs[i].attached ==
259 					RTE_CRYPTODEV_ATTACHED)
260 			dev_count++;
261 
262 	return dev_count;
263 }
264 
265 int
266 rte_cryptodev_socket_id(uint8_t dev_id)
267 {
268 	struct rte_cryptodev *dev;
269 
270 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
271 		return -1;
272 
273 	dev = rte_cryptodev_pmd_get_dev(dev_id);
274 
275 	return dev->data->socket_id;
276 }
277 
278 static inline int
279 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
280 		int socket_id)
281 {
282 	char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
283 	const struct rte_memzone *mz;
284 	int n;
285 
286 	/* generate memzone name */
287 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
288 	if (n >= (int)sizeof(mz_name))
289 		return -EINVAL;
290 
291 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
292 		mz = rte_memzone_reserve(mz_name,
293 				sizeof(struct rte_cryptodev_data),
294 				socket_id, 0);
295 	} else
296 		mz = rte_memzone_lookup(mz_name);
297 
298 	if (mz == NULL)
299 		return -ENOMEM;
300 
301 	*data = mz->addr;
302 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
303 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
304 
305 	return 0;
306 }
307 
308 static uint8_t
309 rte_cryptodev_find_free_device_index(void)
310 {
311 	uint8_t dev_id;
312 
313 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
314 		if (rte_crypto_devices[dev_id].attached ==
315 				RTE_CRYPTODEV_DETACHED)
316 			return dev_id;
317 	}
318 	return RTE_CRYPTO_MAX_DEVS;
319 }
320 
321 struct rte_cryptodev *
322 rte_cryptodev_pmd_allocate(const char *name, enum pmd_type type, int socket_id)
323 {
324 	struct rte_cryptodev *cryptodev;
325 	uint8_t dev_id;
326 
327 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
328 		CDEV_LOG_ERR("Crypto device with name %s already "
329 				"allocated!", name);
330 		return NULL;
331 	}
332 
333 	dev_id = rte_cryptodev_find_free_device_index();
334 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
335 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
336 		return NULL;
337 	}
338 
339 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
340 
341 	if (cryptodev->data == NULL) {
342 		struct rte_cryptodev_data *cryptodev_data =
343 				cryptodev_globals.data[dev_id];
344 
345 		int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
346 				socket_id);
347 
348 		if (retval < 0 || cryptodev_data == NULL)
349 			return NULL;
350 
351 		cryptodev->data = cryptodev_data;
352 
353 		snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
354 				"%s", name);
355 
356 		cryptodev->data->dev_id = dev_id;
357 		cryptodev->data->socket_id = socket_id;
358 		cryptodev->data->dev_started = 0;
359 
360 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
361 		cryptodev->pmd_type = type;
362 
363 		cryptodev_globals.nb_devs++;
364 	}
365 
366 	return cryptodev;
367 }
368 
369 static inline int
370 rte_cryptodev_create_unique_device_name(char *name, size_t size,
371 		struct rte_pci_device *pci_dev)
372 {
373 	int ret;
374 
375 	if ((name == NULL) || (pci_dev == NULL))
376 		return -EINVAL;
377 
378 	ret = snprintf(name, size, "%d:%d.%d",
379 			pci_dev->addr.bus, pci_dev->addr.devid,
380 			pci_dev->addr.function);
381 	if (ret < 0)
382 		return ret;
383 	return 0;
384 }
385 
386 int
387 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
388 {
389 	int ret;
390 
391 	if (cryptodev == NULL)
392 		return -EINVAL;
393 
394 	ret = rte_cryptodev_close(cryptodev->data->dev_id);
395 	if (ret < 0)
396 		return ret;
397 
398 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
399 	cryptodev_globals.nb_devs--;
400 	return 0;
401 }
402 
403 struct rte_cryptodev *
404 rte_cryptodev_pmd_virtual_dev_init(const char *name, size_t dev_private_size,
405 		int socket_id)
406 {
407 	struct rte_cryptodev *cryptodev;
408 
409 	/* allocate device structure */
410 	cryptodev = rte_cryptodev_pmd_allocate(name, PMD_VDEV, socket_id);
411 	if (cryptodev == NULL)
412 		return NULL;
413 
414 	/* allocate private device structure */
415 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
416 		cryptodev->data->dev_private =
417 				rte_zmalloc_socket("cryptodev device private",
418 						dev_private_size,
419 						RTE_CACHE_LINE_SIZE,
420 						socket_id);
421 
422 		if (cryptodev->data->dev_private == NULL)
423 			rte_panic("Cannot allocate memzone for private device"
424 					" data");
425 	}
426 
427 	/* initialise user call-back tail queue */
428 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
429 
430 	return cryptodev;
431 }
432 
433 static int
434 rte_cryptodev_init(struct rte_pci_driver *pci_drv,
435 		struct rte_pci_device *pci_dev)
436 {
437 	struct rte_cryptodev_driver *cryptodrv;
438 	struct rte_cryptodev *cryptodev;
439 
440 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
441 
442 	int retval;
443 
444 	cryptodrv = (struct rte_cryptodev_driver *)pci_drv;
445 	if (cryptodrv == NULL)
446 		return -ENODEV;
447 
448 	/* Create unique Crypto device name using PCI address */
449 	rte_cryptodev_create_unique_device_name(cryptodev_name,
450 			sizeof(cryptodev_name), pci_dev);
451 
452 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, PMD_PDEV,
453 			rte_socket_id());
454 	if (cryptodev == NULL)
455 		return -ENOMEM;
456 
457 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
458 		cryptodev->data->dev_private =
459 				rte_zmalloc_socket(
460 						"cryptodev private structure",
461 						cryptodrv->dev_private_size,
462 						RTE_CACHE_LINE_SIZE,
463 						rte_socket_id());
464 
465 		if (cryptodev->data->dev_private == NULL)
466 			rte_panic("Cannot allocate memzone for private "
467 					"device data");
468 	}
469 
470 	cryptodev->pci_dev = pci_dev;
471 	cryptodev->driver = cryptodrv;
472 
473 	/* init user callbacks */
474 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
475 
476 	/* Invoke PMD device initialization function */
477 	retval = (*cryptodrv->cryptodev_init)(cryptodrv, cryptodev);
478 	if (retval == 0)
479 		return 0;
480 
481 	CDEV_LOG_ERR("driver %s: crypto_dev_init(vendor_id=0x%x device_id=0x%x)"
482 			" failed", pci_drv->name,
483 			(unsigned) pci_dev->id.vendor_id,
484 			(unsigned) pci_dev->id.device_id);
485 
486 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
487 		rte_free(cryptodev->data->dev_private);
488 
489 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
490 	cryptodev_globals.nb_devs--;
491 
492 	return -ENXIO;
493 }
494 
495 static int
496 rte_cryptodev_uninit(struct rte_pci_device *pci_dev)
497 {
498 	const struct rte_cryptodev_driver *cryptodrv;
499 	struct rte_cryptodev *cryptodev;
500 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
501 	int ret;
502 
503 	if (pci_dev == NULL)
504 		return -EINVAL;
505 
506 	/* Create unique device name using PCI address */
507 	rte_cryptodev_create_unique_device_name(cryptodev_name,
508 			sizeof(cryptodev_name), pci_dev);
509 
510 	cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
511 	if (cryptodev == NULL)
512 		return -ENODEV;
513 
514 	cryptodrv = (const struct rte_cryptodev_driver *)pci_dev->driver;
515 	if (cryptodrv == NULL)
516 		return -ENODEV;
517 
518 	/* Invoke PMD device uninit function */
519 	if (*cryptodrv->cryptodev_uninit) {
520 		ret = (*cryptodrv->cryptodev_uninit)(cryptodrv, cryptodev);
521 		if (ret)
522 			return ret;
523 	}
524 
525 	/* free crypto device */
526 	rte_cryptodev_pmd_release_device(cryptodev);
527 
528 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
529 		rte_free(cryptodev->data->dev_private);
530 
531 	cryptodev->pci_dev = NULL;
532 	cryptodev->driver = NULL;
533 	cryptodev->data = NULL;
534 
535 	return 0;
536 }
537 
538 int
539 rte_cryptodev_pmd_driver_register(struct rte_cryptodev_driver *cryptodrv,
540 		enum pmd_type type)
541 {
542 	/* Call crypto device initialization directly if device is virtual */
543 	if (type == PMD_VDEV)
544 		return rte_cryptodev_init((struct rte_pci_driver *)cryptodrv,
545 				NULL);
546 
547 	/*
548 	 * Register PCI driver for physical device intialisation during
549 	 * PCI probing
550 	 */
551 	cryptodrv->pci_drv.devinit = rte_cryptodev_init;
552 	cryptodrv->pci_drv.devuninit = rte_cryptodev_uninit;
553 
554 	rte_eal_pci_register(&cryptodrv->pci_drv);
555 
556 	return 0;
557 }
558 
559 
560 uint16_t
561 rte_cryptodev_queue_pair_count(uint8_t dev_id)
562 {
563 	struct rte_cryptodev *dev;
564 
565 	dev = &rte_crypto_devices[dev_id];
566 	return dev->data->nb_queue_pairs;
567 }
568 
569 static int
570 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
571 		int socket_id)
572 {
573 	struct rte_cryptodev_info dev_info;
574 	void **qp;
575 	unsigned i;
576 
577 	if ((dev == NULL) || (nb_qpairs < 1)) {
578 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
579 							dev, nb_qpairs);
580 		return -EINVAL;
581 	}
582 
583 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
584 			nb_qpairs, dev->data->dev_id);
585 
586 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
587 
588 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
589 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
590 
591 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
592 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
593 				nb_qpairs, dev->data->dev_id);
594 	    return -EINVAL;
595 	}
596 
597 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
598 		dev->data->queue_pairs = rte_zmalloc_socket(
599 				"cryptodev->queue_pairs",
600 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
601 				RTE_CACHE_LINE_SIZE, socket_id);
602 
603 		if (dev->data->queue_pairs == NULL) {
604 			dev->data->nb_queue_pairs = 0;
605 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
606 							"nb_queues %u",
607 							nb_qpairs);
608 			return -(ENOMEM);
609 		}
610 	} else { /* re-configure */
611 		int ret;
612 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
613 
614 		qp = dev->data->queue_pairs;
615 
616 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
617 				-ENOTSUP);
618 
619 		for (i = nb_qpairs; i < old_nb_queues; i++) {
620 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
621 			if (ret < 0)
622 				return ret;
623 		}
624 
625 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
626 				RTE_CACHE_LINE_SIZE);
627 		if (qp == NULL) {
628 			CDEV_LOG_ERR("failed to realloc qp meta data,"
629 						" nb_queues %u", nb_qpairs);
630 			return -(ENOMEM);
631 		}
632 
633 		if (nb_qpairs > old_nb_queues) {
634 			uint16_t new_qs = nb_qpairs - old_nb_queues;
635 
636 			memset(qp + old_nb_queues, 0,
637 				sizeof(qp[0]) * new_qs);
638 		}
639 
640 		dev->data->queue_pairs = qp;
641 
642 	}
643 	dev->data->nb_queue_pairs = nb_qpairs;
644 	return 0;
645 }
646 
647 int
648 rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id)
649 {
650 	struct rte_cryptodev *dev;
651 
652 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
653 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
654 		return -EINVAL;
655 	}
656 
657 	dev = &rte_crypto_devices[dev_id];
658 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
659 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
660 		return -EINVAL;
661 	}
662 
663 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP);
664 
665 	return dev->dev_ops->queue_pair_start(dev, queue_pair_id);
666 
667 }
668 
669 int
670 rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
671 {
672 	struct rte_cryptodev *dev;
673 
674 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
675 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
676 		return -EINVAL;
677 	}
678 
679 	dev = &rte_crypto_devices[dev_id];
680 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
681 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
682 		return -EINVAL;
683 	}
684 
685 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP);
686 
687 	return dev->dev_ops->queue_pair_stop(dev, queue_pair_id);
688 
689 }
690 
691 static int
692 rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
693 		unsigned nb_objs, unsigned obj_cache_size, int socket_id);
694 
695 int
696 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
697 {
698 	struct rte_cryptodev *dev;
699 	int diag;
700 
701 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
702 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
703 		return -EINVAL;
704 	}
705 
706 	dev = &rte_crypto_devices[dev_id];
707 
708 	if (dev->data->dev_started) {
709 		CDEV_LOG_ERR(
710 		    "device %d must be stopped to allow configuration", dev_id);
711 		return -EBUSY;
712 	}
713 
714 	/* Setup new number of queue pairs and reconfigure device. */
715 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
716 			config->socket_id);
717 	if (diag != 0) {
718 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
719 				dev_id, diag);
720 		return diag;
721 	}
722 
723 	/* Setup Session mempool for device */
724 	return rte_cryptodev_sym_session_pool_create(dev,
725 			config->session_mp.nb_objs,
726 			config->session_mp.cache_size,
727 			config->socket_id);
728 }
729 
730 
731 int
732 rte_cryptodev_start(uint8_t dev_id)
733 {
734 	struct rte_cryptodev *dev;
735 	int diag;
736 
737 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
738 
739 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
740 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
741 		return -EINVAL;
742 	}
743 
744 	dev = &rte_crypto_devices[dev_id];
745 
746 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
747 
748 	if (dev->data->dev_started != 0) {
749 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
750 			dev_id);
751 		return 0;
752 	}
753 
754 	diag = (*dev->dev_ops->dev_start)(dev);
755 	if (diag == 0)
756 		dev->data->dev_started = 1;
757 	else
758 		return diag;
759 
760 	return 0;
761 }
762 
763 void
764 rte_cryptodev_stop(uint8_t dev_id)
765 {
766 	struct rte_cryptodev *dev;
767 
768 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
769 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
770 		return;
771 	}
772 
773 	dev = &rte_crypto_devices[dev_id];
774 
775 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
776 
777 	if (dev->data->dev_started == 0) {
778 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
779 			dev_id);
780 		return;
781 	}
782 
783 	dev->data->dev_started = 0;
784 	(*dev->dev_ops->dev_stop)(dev);
785 }
786 
787 int
788 rte_cryptodev_close(uint8_t dev_id)
789 {
790 	struct rte_cryptodev *dev;
791 	int retval;
792 
793 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
794 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
795 		return -1;
796 	}
797 
798 	dev = &rte_crypto_devices[dev_id];
799 
800 	/* Device must be stopped before it can be closed */
801 	if (dev->data->dev_started == 1) {
802 		CDEV_LOG_ERR("Device %u must be stopped before closing",
803 				dev_id);
804 		return -EBUSY;
805 	}
806 
807 	/* We can't close the device if there are outstanding sessions in use */
808 	if (dev->data->session_pool != NULL) {
809 		if (!rte_mempool_full(dev->data->session_pool)) {
810 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
811 					"has sessions still in use, free "
812 					"all sessions before calling close",
813 					(unsigned)dev_id);
814 			return -EBUSY;
815 		}
816 	}
817 
818 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
819 	retval = (*dev->dev_ops->dev_close)(dev);
820 
821 	if (retval < 0)
822 		return retval;
823 
824 	return 0;
825 }
826 
827 int
828 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
829 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
830 {
831 	struct rte_cryptodev *dev;
832 
833 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
834 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
835 		return -EINVAL;
836 	}
837 
838 	dev = &rte_crypto_devices[dev_id];
839 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
840 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
841 		return -EINVAL;
842 	}
843 
844 	if (dev->data->dev_started) {
845 		CDEV_LOG_ERR(
846 		    "device %d must be stopped to allow configuration", dev_id);
847 		return -EBUSY;
848 	}
849 
850 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
851 
852 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
853 			socket_id);
854 }
855 
856 
857 int
858 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
859 {
860 	struct rte_cryptodev *dev;
861 
862 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
863 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
864 		return -ENODEV;
865 	}
866 
867 	if (stats == NULL) {
868 		CDEV_LOG_ERR("Invalid stats ptr");
869 		return -EINVAL;
870 	}
871 
872 	dev = &rte_crypto_devices[dev_id];
873 	memset(stats, 0, sizeof(*stats));
874 
875 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
876 	(*dev->dev_ops->stats_get)(dev, stats);
877 	return 0;
878 }
879 
880 void
881 rte_cryptodev_stats_reset(uint8_t dev_id)
882 {
883 	struct rte_cryptodev *dev;
884 
885 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
886 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
887 		return;
888 	}
889 
890 	dev = &rte_crypto_devices[dev_id];
891 
892 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
893 	(*dev->dev_ops->stats_reset)(dev);
894 }
895 
896 
897 void
898 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
899 {
900 	struct rte_cryptodev *dev;
901 
902 	if (dev_id >= cryptodev_globals.nb_devs) {
903 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
904 		return;
905 	}
906 
907 	dev = &rte_crypto_devices[dev_id];
908 
909 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
910 
911 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
912 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
913 
914 	dev_info->pci_dev = dev->pci_dev;
915 	if (dev->driver)
916 		dev_info->driver_name = dev->driver->pci_drv.name;
917 }
918 
919 
920 int
921 rte_cryptodev_callback_register(uint8_t dev_id,
922 			enum rte_cryptodev_event_type event,
923 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
924 {
925 	struct rte_cryptodev *dev;
926 	struct rte_cryptodev_callback *user_cb;
927 
928 	if (!cb_fn)
929 		return -EINVAL;
930 
931 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
932 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
933 		return -EINVAL;
934 	}
935 
936 	dev = &rte_crypto_devices[dev_id];
937 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
938 
939 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
940 		if (user_cb->cb_fn == cb_fn &&
941 			user_cb->cb_arg == cb_arg &&
942 			user_cb->event == event) {
943 			break;
944 		}
945 	}
946 
947 	/* create a new callback. */
948 	if (user_cb == NULL) {
949 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
950 				sizeof(struct rte_cryptodev_callback), 0);
951 		if (user_cb != NULL) {
952 			user_cb->cb_fn = cb_fn;
953 			user_cb->cb_arg = cb_arg;
954 			user_cb->event = event;
955 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
956 		}
957 	}
958 
959 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
960 	return (user_cb == NULL) ? -ENOMEM : 0;
961 }
962 
963 int
964 rte_cryptodev_callback_unregister(uint8_t dev_id,
965 			enum rte_cryptodev_event_type event,
966 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
967 {
968 	int ret;
969 	struct rte_cryptodev *dev;
970 	struct rte_cryptodev_callback *cb, *next;
971 
972 	if (!cb_fn)
973 		return -EINVAL;
974 
975 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
976 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
977 		return -EINVAL;
978 	}
979 
980 	dev = &rte_crypto_devices[dev_id];
981 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
982 
983 	ret = 0;
984 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
985 
986 		next = TAILQ_NEXT(cb, next);
987 
988 		if (cb->cb_fn != cb_fn || cb->event != event ||
989 				(cb->cb_arg != (void *)-1 &&
990 				cb->cb_arg != cb_arg))
991 			continue;
992 
993 		/*
994 		 * if this callback is not executing right now,
995 		 * then remove it.
996 		 */
997 		if (cb->active == 0) {
998 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
999 			rte_free(cb);
1000 		} else {
1001 			ret = -EAGAIN;
1002 		}
1003 	}
1004 
1005 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1006 	return ret;
1007 }
1008 
1009 void
1010 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1011 	enum rte_cryptodev_event_type event)
1012 {
1013 	struct rte_cryptodev_callback *cb_lst;
1014 	struct rte_cryptodev_callback dev_cb;
1015 
1016 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1017 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1018 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1019 			continue;
1020 		dev_cb = *cb_lst;
1021 		cb_lst->active = 1;
1022 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1023 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1024 						dev_cb.cb_arg);
1025 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1026 		cb_lst->active = 0;
1027 	}
1028 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1029 }
1030 
1031 
1032 static void
1033 rte_cryptodev_sym_session_init(struct rte_mempool *mp,
1034 		void *opaque_arg,
1035 		void *_sess,
1036 		__rte_unused unsigned i)
1037 {
1038 	struct rte_cryptodev_sym_session *sess = _sess;
1039 	struct rte_cryptodev *dev = opaque_arg;
1040 
1041 	memset(sess, 0, mp->elt_size);
1042 
1043 	sess->dev_id = dev->data->dev_id;
1044 	sess->dev_type = dev->dev_type;
1045 	sess->mp = mp;
1046 
1047 	if (dev->dev_ops->session_initialize)
1048 		(*dev->dev_ops->session_initialize)(mp, sess);
1049 }
1050 
1051 static int
1052 rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
1053 		unsigned nb_objs, unsigned obj_cache_size, int socket_id)
1054 {
1055 	char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1056 	unsigned priv_sess_size;
1057 
1058 	unsigned n = snprintf(mp_name, sizeof(mp_name), "cdev_%d_sess_mp",
1059 			dev->data->dev_id);
1060 	if (n > sizeof(mp_name)) {
1061 		CDEV_LOG_ERR("Unable to create unique name for session mempool");
1062 		return -ENOMEM;
1063 	}
1064 
1065 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_get_size, -ENOTSUP);
1066 	priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
1067 	if (priv_sess_size == 0) {
1068 		CDEV_LOG_ERR("%s returned and invalid private session size ",
1069 						dev->data->name);
1070 		return -ENOMEM;
1071 	}
1072 
1073 	unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
1074 			priv_sess_size;
1075 
1076 	dev->data->session_pool = rte_mempool_lookup(mp_name);
1077 	if (dev->data->session_pool != NULL) {
1078 		if ((dev->data->session_pool->elt_size != elt_size) ||
1079 				(dev->data->session_pool->cache_size <
1080 				obj_cache_size) ||
1081 				(dev->data->session_pool->size < nb_objs)) {
1082 
1083 			CDEV_LOG_ERR("%s mempool already exists with different"
1084 					" initialization parameters", mp_name);
1085 			dev->data->session_pool = NULL;
1086 			return -ENOMEM;
1087 		}
1088 	} else {
1089 		dev->data->session_pool = rte_mempool_create(
1090 				mp_name, /* mempool name */
1091 				nb_objs, /* number of elements*/
1092 				elt_size, /* element size*/
1093 				obj_cache_size, /* Cache size*/
1094 				0, /* private data size */
1095 				NULL, /* obj initialization constructor */
1096 				NULL, /* obj initialization constructor arg */
1097 				rte_cryptodev_sym_session_init,
1098 				/**< obj constructor*/
1099 				dev, /* obj constructor arg */
1100 				socket_id, /* socket id */
1101 				0); /* flags */
1102 
1103 		if (dev->data->session_pool == NULL) {
1104 			CDEV_LOG_ERR("%s mempool allocation failed", mp_name);
1105 			return -ENOMEM;
1106 		}
1107 	}
1108 
1109 	CDEV_LOG_DEBUG("%s mempool created!", mp_name);
1110 	return 0;
1111 }
1112 
1113 struct rte_cryptodev_sym_session *
1114 rte_cryptodev_sym_session_create(uint8_t dev_id,
1115 		struct rte_crypto_sym_xform *xform)
1116 {
1117 	struct rte_cryptodev *dev;
1118 	struct rte_cryptodev_sym_session *sess;
1119 	void *_sess;
1120 
1121 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1122 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1123 		return NULL;
1124 	}
1125 
1126 	dev = &rte_crypto_devices[dev_id];
1127 
1128 	/* Allocate a session structure from the session pool */
1129 	if (rte_mempool_get(dev->data->session_pool, &_sess)) {
1130 		CDEV_LOG_ERR("Couldn't get object from session mempool");
1131 		return NULL;
1132 	}
1133 
1134 	sess = (struct rte_cryptodev_sym_session *)_sess;
1135 
1136 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
1137 	if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
1138 			NULL) {
1139 		CDEV_LOG_ERR("dev_id %d failed to configure session details",
1140 				dev_id);
1141 
1142 		/* Return session to mempool */
1143 		rte_mempool_put(sess->mp, _sess);
1144 		return NULL;
1145 	}
1146 
1147 	return sess;
1148 }
1149 
1150 struct rte_cryptodev_sym_session *
1151 rte_cryptodev_sym_session_free(uint8_t dev_id,
1152 		struct rte_cryptodev_sym_session *sess)
1153 {
1154 	struct rte_cryptodev *dev;
1155 
1156 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1157 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1158 		return sess;
1159 	}
1160 
1161 	dev = &rte_crypto_devices[dev_id];
1162 
1163 	/* Check the session belongs to this device type */
1164 	if (sess->dev_type != dev->dev_type)
1165 		return sess;
1166 
1167 	/* Let device implementation clear session material */
1168 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_clear, sess);
1169 	dev->dev_ops->session_clear(dev, (void *)sess->_private);
1170 
1171 	/* Return session to mempool */
1172 	rte_mempool_put(sess->mp, (void *)sess);
1173 
1174 	return NULL;
1175 }
1176 
1177 /** Initialise rte_crypto_op mempool element */
1178 static void
1179 rte_crypto_op_init(struct rte_mempool *mempool,
1180 		void *opaque_arg,
1181 		void *_op_data,
1182 		__rte_unused unsigned i)
1183 {
1184 	struct rte_crypto_op *op = _op_data;
1185 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1186 
1187 	memset(_op_data, 0, mempool->elt_size);
1188 
1189 	__rte_crypto_op_reset(op, type);
1190 
1191 	op->phys_addr = rte_mem_virt2phy(_op_data);
1192 	op->mempool = mempool;
1193 }
1194 
1195 
1196 struct rte_mempool *
1197 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1198 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1199 		int socket_id)
1200 {
1201 	struct rte_crypto_op_pool_private *priv;
1202 
1203 	unsigned elt_size = sizeof(struct rte_crypto_op) +
1204 			sizeof(struct rte_crypto_sym_op) +
1205 			priv_size;
1206 
1207 	/* lookup mempool in case already allocated */
1208 	struct rte_mempool *mp = rte_mempool_lookup(name);
1209 
1210 	if (mp != NULL) {
1211 		priv = (struct rte_crypto_op_pool_private *)
1212 				rte_mempool_get_priv(mp);
1213 
1214 		if (mp->elt_size != elt_size ||
1215 				mp->cache_size < cache_size ||
1216 				mp->size < nb_elts ||
1217 				priv->priv_size <  priv_size) {
1218 			mp = NULL;
1219 			CDEV_LOG_ERR("Mempool %s already exists but with "
1220 					"incompatible parameters", name);
1221 			return NULL;
1222 		}
1223 		return mp;
1224 	}
1225 
1226 	mp = rte_mempool_create(
1227 			name,
1228 			nb_elts,
1229 			elt_size,
1230 			cache_size,
1231 			sizeof(struct rte_crypto_op_pool_private),
1232 			NULL,
1233 			NULL,
1234 			rte_crypto_op_init,
1235 			&type,
1236 			socket_id,
1237 			0);
1238 
1239 	if (mp == NULL) {
1240 		CDEV_LOG_ERR("Failed to create mempool %s", name);
1241 		return NULL;
1242 	}
1243 
1244 	priv = (struct rte_crypto_op_pool_private *)
1245 			rte_mempool_get_priv(mp);
1246 
1247 	priv->priv_size = priv_size;
1248 	priv->type = type;
1249 
1250 	return mp;
1251 }
1252