1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/queue.h>
35 #include <ctype.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <stdarg.h>
40 #include <errno.h>
41 #include <stdint.h>
42 #include <inttypes.h>
43 #include <netinet/in.h>
44 
45 #include <rte_byteorder.h>
46 #include <rte_log.h>
47 #include <rte_debug.h>
48 #include <rte_dev.h>
49 #include <rte_interrupts.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67 
68 #include "rte_crypto.h"
69 #include "rte_cryptodev.h"
70 #include "rte_cryptodev_pmd.h"
71 
72 static uint8_t nb_drivers;
73 
74 struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
75 
76 struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
77 
78 static struct rte_cryptodev_global cryptodev_globals = {
79 		.devs			= &rte_crypto_devices[0],
80 		.data			= { NULL },
81 		.nb_devs		= 0,
82 		.max_devs		= RTE_CRYPTO_MAX_DEVS
83 };
84 
85 struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals;
86 
87 /* spinlock for crypto device callbacks */
88 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
89 
90 
91 /**
92  * The user application callback description.
93  *
94  * It contains callback address to be registered by user application,
95  * the pointer to the parameters for callback, and the event type.
96  */
97 struct rte_cryptodev_callback {
98 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
99 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
100 	void *cb_arg;				/**< Parameter for callback */
101 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
102 	uint32_t active;			/**< Callback is executing */
103 };
104 
105 /**
106  * The crypto cipher algorithm strings identifiers.
107  * It could be used in application command line.
108  */
109 const char *
110 rte_crypto_cipher_algorithm_strings[] = {
111 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
112 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
113 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
114 
115 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
116 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
117 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
118 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
119 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
120 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
121 
122 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
123 
124 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
125 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
126 
127 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
128 
129 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
130 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
131 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
132 };
133 
134 /**
135  * The crypto cipher operation strings identifiers.
136  * It could be used in application command line.
137  */
138 const char *
139 rte_crypto_cipher_operation_strings[] = {
140 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
141 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
142 };
143 
144 /**
145  * The crypto auth algorithm strings identifiers.
146  * It could be used in application command line.
147  */
148 const char *
149 rte_crypto_auth_algorithm_strings[] = {
150 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
151 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
152 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
153 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
154 
155 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
156 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
157 
158 	[RTE_CRYPTO_AUTH_NULL]		= "null",
159 
160 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
161 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
162 
163 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
164 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
165 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
166 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
167 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
168 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
169 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
170 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
171 
172 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
173 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
174 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
175 };
176 
177 /**
178  * The crypto AEAD algorithm strings identifiers.
179  * It could be used in application command line.
180  */
181 const char *
182 rte_crypto_aead_algorithm_strings[] = {
183 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
184 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
185 };
186 
187 /**
188  * The crypto AEAD operation strings identifiers.
189  * It could be used in application command line.
190  */
191 const char *
192 rte_crypto_aead_operation_strings[] = {
193 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
194 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
195 };
196 
197 int
198 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
199 		const char *algo_string)
200 {
201 	unsigned int i;
202 
203 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
204 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
205 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
206 			return 0;
207 		}
208 	}
209 
210 	/* Invalid string */
211 	return -1;
212 }
213 
214 int
215 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
216 		const char *algo_string)
217 {
218 	unsigned int i;
219 
220 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
221 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
222 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
223 			return 0;
224 		}
225 	}
226 
227 	/* Invalid string */
228 	return -1;
229 }
230 
231 int
232 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
233 		const char *algo_string)
234 {
235 	unsigned int i;
236 
237 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
238 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
239 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
240 			return 0;
241 		}
242 	}
243 
244 	/* Invalid string */
245 	return -1;
246 }
247 
248 /**
249  * The crypto auth operation strings identifiers.
250  * It could be used in application command line.
251  */
252 const char *
253 rte_crypto_auth_operation_strings[] = {
254 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
255 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
256 };
257 
258 const struct rte_cryptodev_symmetric_capability *
259 rte_cryptodev_sym_capability_get(uint8_t dev_id,
260 		const struct rte_cryptodev_sym_capability_idx *idx)
261 {
262 	const struct rte_cryptodev_capabilities *capability;
263 	struct rte_cryptodev_info dev_info;
264 	int i = 0;
265 
266 	rte_cryptodev_info_get(dev_id, &dev_info);
267 
268 	while ((capability = &dev_info.capabilities[i++])->op !=
269 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
270 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
271 			continue;
272 
273 		if (capability->sym.xform_type != idx->type)
274 			continue;
275 
276 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
277 			capability->sym.auth.algo == idx->algo.auth)
278 			return &capability->sym;
279 
280 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
281 			capability->sym.cipher.algo == idx->algo.cipher)
282 			return &capability->sym;
283 
284 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
285 				capability->sym.aead.algo == idx->algo.aead)
286 			return &capability->sym;
287 	}
288 
289 	return NULL;
290 
291 }
292 
293 #define param_range_check(x, y) \
294 	(((x < y.min) || (x > y.max)) || \
295 	(y.increment != 0 && (x % y.increment) != 0))
296 
297 int
298 rte_cryptodev_sym_capability_check_cipher(
299 		const struct rte_cryptodev_symmetric_capability *capability,
300 		uint16_t key_size, uint16_t iv_size)
301 {
302 	if (param_range_check(key_size, capability->cipher.key_size))
303 		return -1;
304 
305 	if (param_range_check(iv_size, capability->cipher.iv_size))
306 		return -1;
307 
308 	return 0;
309 }
310 
311 int
312 rte_cryptodev_sym_capability_check_auth(
313 		const struct rte_cryptodev_symmetric_capability *capability,
314 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
315 {
316 	if (param_range_check(key_size, capability->auth.key_size))
317 		return -1;
318 
319 	if (param_range_check(digest_size, capability->auth.digest_size))
320 		return -1;
321 
322 	if (param_range_check(iv_size, capability->auth.iv_size))
323 		return -1;
324 
325 	return 0;
326 }
327 
328 int
329 rte_cryptodev_sym_capability_check_aead(
330 		const struct rte_cryptodev_symmetric_capability *capability,
331 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
332 		uint16_t iv_size)
333 {
334 	if (param_range_check(key_size, capability->aead.key_size))
335 		return -1;
336 
337 	if (param_range_check(digest_size, capability->aead.digest_size))
338 		return -1;
339 
340 	if (param_range_check(aad_size, capability->aead.aad_size))
341 		return -1;
342 
343 	if (param_range_check(iv_size, capability->aead.iv_size))
344 		return -1;
345 
346 	return 0;
347 }
348 
349 const char *
350 rte_cryptodev_get_feature_name(uint64_t flag)
351 {
352 	switch (flag) {
353 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
354 		return "SYMMETRIC_CRYPTO";
355 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
356 		return "ASYMMETRIC_CRYPTO";
357 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
358 		return "SYM_OPERATION_CHAINING";
359 	case RTE_CRYPTODEV_FF_CPU_SSE:
360 		return "CPU_SSE";
361 	case RTE_CRYPTODEV_FF_CPU_AVX:
362 		return "CPU_AVX";
363 	case RTE_CRYPTODEV_FF_CPU_AVX2:
364 		return "CPU_AVX2";
365 	case RTE_CRYPTODEV_FF_CPU_AVX512:
366 		return "CPU_AVX512";
367 	case RTE_CRYPTODEV_FF_CPU_AESNI:
368 		return "CPU_AESNI";
369 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
370 		return "HW_ACCELERATED";
371 	case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER:
372 		return "MBUF_SCATTER_GATHER";
373 	case RTE_CRYPTODEV_FF_CPU_NEON:
374 		return "CPU_NEON";
375 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
376 		return "CPU_ARM_CE";
377 	default:
378 		return NULL;
379 	}
380 }
381 
382 struct rte_cryptodev *
383 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
384 {
385 	return &rte_cryptodev_globals->devs[dev_id];
386 }
387 
388 struct rte_cryptodev *
389 rte_cryptodev_pmd_get_named_dev(const char *name)
390 {
391 	struct rte_cryptodev *dev;
392 	unsigned int i;
393 
394 	if (name == NULL)
395 		return NULL;
396 
397 	for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
398 		dev = &rte_cryptodev_globals->devs[i];
399 
400 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
401 				(strcmp(dev->data->name, name) == 0))
402 			return dev;
403 	}
404 
405 	return NULL;
406 }
407 
408 unsigned int
409 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
410 {
411 	struct rte_cryptodev *dev = NULL;
412 
413 	if (dev_id >= rte_cryptodev_globals->nb_devs)
414 		return 0;
415 
416 	dev = rte_cryptodev_pmd_get_dev(dev_id);
417 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
418 		return 0;
419 	else
420 		return 1;
421 }
422 
423 
424 int
425 rte_cryptodev_get_dev_id(const char *name)
426 {
427 	unsigned i;
428 
429 	if (name == NULL)
430 		return -1;
431 
432 	for (i = 0; i < rte_cryptodev_globals->nb_devs; i++)
433 		if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name)
434 				== 0) &&
435 				(rte_cryptodev_globals->devs[i].attached ==
436 						RTE_CRYPTODEV_ATTACHED))
437 			return i;
438 
439 	return -1;
440 }
441 
442 uint8_t
443 rte_cryptodev_count(void)
444 {
445 	return rte_cryptodev_globals->nb_devs;
446 }
447 
448 uint8_t
449 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
450 {
451 	uint8_t i, dev_count = 0;
452 
453 	for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
454 		if (rte_cryptodev_globals->devs[i].driver_id == driver_id &&
455 			rte_cryptodev_globals->devs[i].attached ==
456 					RTE_CRYPTODEV_ATTACHED)
457 			dev_count++;
458 
459 	return dev_count;
460 }
461 
462 uint8_t
463 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
464 	uint8_t nb_devices)
465 {
466 	uint8_t i, count = 0;
467 	struct rte_cryptodev *devs = rte_cryptodev_globals->devs;
468 	uint8_t max_devs = rte_cryptodev_globals->max_devs;
469 
470 	for (i = 0; i < max_devs && count < nb_devices;	i++) {
471 
472 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
473 			int cmp;
474 
475 			cmp = strncmp(devs[i].device->driver->name,
476 					driver_name,
477 					strlen(driver_name));
478 
479 			if (cmp == 0)
480 				devices[count++] = devs[i].data->dev_id;
481 		}
482 	}
483 
484 	return count;
485 }
486 
487 void *
488 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
489 {
490 	if (rte_crypto_devices[dev_id].feature_flags &
491 			RTE_CRYPTODEV_FF_SECURITY)
492 		return rte_crypto_devices[dev_id].security_ctx;
493 
494 	return NULL;
495 }
496 
497 int
498 rte_cryptodev_socket_id(uint8_t dev_id)
499 {
500 	struct rte_cryptodev *dev;
501 
502 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
503 		return -1;
504 
505 	dev = rte_cryptodev_pmd_get_dev(dev_id);
506 
507 	return dev->data->socket_id;
508 }
509 
510 static inline int
511 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
512 		int socket_id)
513 {
514 	char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
515 	const struct rte_memzone *mz;
516 	int n;
517 
518 	/* generate memzone name */
519 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
520 	if (n >= (int)sizeof(mz_name))
521 		return -EINVAL;
522 
523 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
524 		mz = rte_memzone_reserve(mz_name,
525 				sizeof(struct rte_cryptodev_data),
526 				socket_id, 0);
527 	} else
528 		mz = rte_memzone_lookup(mz_name);
529 
530 	if (mz == NULL)
531 		return -ENOMEM;
532 
533 	*data = mz->addr;
534 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
535 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
536 
537 	return 0;
538 }
539 
540 static uint8_t
541 rte_cryptodev_find_free_device_index(void)
542 {
543 	uint8_t dev_id;
544 
545 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
546 		if (rte_crypto_devices[dev_id].attached ==
547 				RTE_CRYPTODEV_DETACHED)
548 			return dev_id;
549 	}
550 	return RTE_CRYPTO_MAX_DEVS;
551 }
552 
553 struct rte_cryptodev *
554 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
555 {
556 	struct rte_cryptodev *cryptodev;
557 	uint8_t dev_id;
558 
559 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
560 		CDEV_LOG_ERR("Crypto device with name %s already "
561 				"allocated!", name);
562 		return NULL;
563 	}
564 
565 	dev_id = rte_cryptodev_find_free_device_index();
566 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
567 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
568 		return NULL;
569 	}
570 
571 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
572 
573 	if (cryptodev->data == NULL) {
574 		struct rte_cryptodev_data *cryptodev_data =
575 				cryptodev_globals.data[dev_id];
576 
577 		int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
578 				socket_id);
579 
580 		if (retval < 0 || cryptodev_data == NULL)
581 			return NULL;
582 
583 		cryptodev->data = cryptodev_data;
584 
585 		snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
586 				"%s", name);
587 
588 		cryptodev->data->dev_id = dev_id;
589 		cryptodev->data->socket_id = socket_id;
590 		cryptodev->data->dev_started = 0;
591 
592 		/* init user callbacks */
593 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
594 
595 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
596 
597 		cryptodev_globals.nb_devs++;
598 	}
599 
600 	return cryptodev;
601 }
602 
603 int
604 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
605 {
606 	int ret;
607 
608 	if (cryptodev == NULL)
609 		return -EINVAL;
610 
611 	/* Close device only if device operations have been set */
612 	if (cryptodev->dev_ops) {
613 		ret = rte_cryptodev_close(cryptodev->data->dev_id);
614 		if (ret < 0)
615 			return ret;
616 	}
617 
618 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
619 	cryptodev_globals.nb_devs--;
620 	return 0;
621 }
622 
623 uint16_t
624 rte_cryptodev_queue_pair_count(uint8_t dev_id)
625 {
626 	struct rte_cryptodev *dev;
627 
628 	dev = &rte_crypto_devices[dev_id];
629 	return dev->data->nb_queue_pairs;
630 }
631 
632 static int
633 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
634 		int socket_id)
635 {
636 	struct rte_cryptodev_info dev_info;
637 	void **qp;
638 	unsigned i;
639 
640 	if ((dev == NULL) || (nb_qpairs < 1)) {
641 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
642 							dev, nb_qpairs);
643 		return -EINVAL;
644 	}
645 
646 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
647 			nb_qpairs, dev->data->dev_id);
648 
649 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
650 
651 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
652 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
653 
654 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
655 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
656 				nb_qpairs, dev->data->dev_id);
657 	    return -EINVAL;
658 	}
659 
660 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
661 		dev->data->queue_pairs = rte_zmalloc_socket(
662 				"cryptodev->queue_pairs",
663 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
664 				RTE_CACHE_LINE_SIZE, socket_id);
665 
666 		if (dev->data->queue_pairs == NULL) {
667 			dev->data->nb_queue_pairs = 0;
668 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
669 							"nb_queues %u",
670 							nb_qpairs);
671 			return -(ENOMEM);
672 		}
673 	} else { /* re-configure */
674 		int ret;
675 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
676 
677 		qp = dev->data->queue_pairs;
678 
679 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
680 				-ENOTSUP);
681 
682 		for (i = nb_qpairs; i < old_nb_queues; i++) {
683 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
684 			if (ret < 0)
685 				return ret;
686 		}
687 
688 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
689 				RTE_CACHE_LINE_SIZE);
690 		if (qp == NULL) {
691 			CDEV_LOG_ERR("failed to realloc qp meta data,"
692 						" nb_queues %u", nb_qpairs);
693 			return -(ENOMEM);
694 		}
695 
696 		if (nb_qpairs > old_nb_queues) {
697 			uint16_t new_qs = nb_qpairs - old_nb_queues;
698 
699 			memset(qp + old_nb_queues, 0,
700 				sizeof(qp[0]) * new_qs);
701 		}
702 
703 		dev->data->queue_pairs = qp;
704 
705 	}
706 	dev->data->nb_queue_pairs = nb_qpairs;
707 	return 0;
708 }
709 
710 int
711 rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id)
712 {
713 	struct rte_cryptodev *dev;
714 
715 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
716 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
717 		return -EINVAL;
718 	}
719 
720 	dev = &rte_crypto_devices[dev_id];
721 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
722 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
723 		return -EINVAL;
724 	}
725 
726 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP);
727 
728 	return dev->dev_ops->queue_pair_start(dev, queue_pair_id);
729 
730 }
731 
732 int
733 rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
734 {
735 	struct rte_cryptodev *dev;
736 
737 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
738 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
739 		return -EINVAL;
740 	}
741 
742 	dev = &rte_crypto_devices[dev_id];
743 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
744 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
745 		return -EINVAL;
746 	}
747 
748 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP);
749 
750 	return dev->dev_ops->queue_pair_stop(dev, queue_pair_id);
751 
752 }
753 
754 int
755 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
756 {
757 	struct rte_cryptodev *dev;
758 	int diag;
759 
760 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
761 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
762 		return -EINVAL;
763 	}
764 
765 	dev = &rte_crypto_devices[dev_id];
766 
767 	if (dev->data->dev_started) {
768 		CDEV_LOG_ERR(
769 		    "device %d must be stopped to allow configuration", dev_id);
770 		return -EBUSY;
771 	}
772 
773 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
774 
775 	/* Setup new number of queue pairs and reconfigure device. */
776 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
777 			config->socket_id);
778 	if (diag != 0) {
779 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
780 				dev_id, diag);
781 		return diag;
782 	}
783 
784 	return (*dev->dev_ops->dev_configure)(dev, config);
785 }
786 
787 
788 int
789 rte_cryptodev_start(uint8_t dev_id)
790 {
791 	struct rte_cryptodev *dev;
792 	int diag;
793 
794 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
795 
796 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
797 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
798 		return -EINVAL;
799 	}
800 
801 	dev = &rte_crypto_devices[dev_id];
802 
803 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
804 
805 	if (dev->data->dev_started != 0) {
806 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
807 			dev_id);
808 		return 0;
809 	}
810 
811 	diag = (*dev->dev_ops->dev_start)(dev);
812 	if (diag == 0)
813 		dev->data->dev_started = 1;
814 	else
815 		return diag;
816 
817 	return 0;
818 }
819 
820 void
821 rte_cryptodev_stop(uint8_t dev_id)
822 {
823 	struct rte_cryptodev *dev;
824 
825 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
826 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
827 		return;
828 	}
829 
830 	dev = &rte_crypto_devices[dev_id];
831 
832 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
833 
834 	if (dev->data->dev_started == 0) {
835 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
836 			dev_id);
837 		return;
838 	}
839 
840 	(*dev->dev_ops->dev_stop)(dev);
841 	dev->data->dev_started = 0;
842 }
843 
844 int
845 rte_cryptodev_close(uint8_t dev_id)
846 {
847 	struct rte_cryptodev *dev;
848 	int retval;
849 
850 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
851 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
852 		return -1;
853 	}
854 
855 	dev = &rte_crypto_devices[dev_id];
856 
857 	/* Device must be stopped before it can be closed */
858 	if (dev->data->dev_started == 1) {
859 		CDEV_LOG_ERR("Device %u must be stopped before closing",
860 				dev_id);
861 		return -EBUSY;
862 	}
863 
864 	/* We can't close the device if there are outstanding sessions in use */
865 	if (dev->data->session_pool != NULL) {
866 		if (!rte_mempool_full(dev->data->session_pool)) {
867 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
868 					"has sessions still in use, free "
869 					"all sessions before calling close",
870 					(unsigned)dev_id);
871 			return -EBUSY;
872 		}
873 	}
874 
875 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
876 	retval = (*dev->dev_ops->dev_close)(dev);
877 
878 	if (retval < 0)
879 		return retval;
880 
881 	return 0;
882 }
883 
884 int
885 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
886 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
887 		struct rte_mempool *session_pool)
888 
889 {
890 	struct rte_cryptodev *dev;
891 
892 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
893 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
894 		return -EINVAL;
895 	}
896 
897 	dev = &rte_crypto_devices[dev_id];
898 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
899 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
900 		return -EINVAL;
901 	}
902 
903 	if (dev->data->dev_started) {
904 		CDEV_LOG_ERR(
905 		    "device %d must be stopped to allow configuration", dev_id);
906 		return -EBUSY;
907 	}
908 
909 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
910 
911 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
912 			socket_id, session_pool);
913 }
914 
915 
916 int
917 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
918 {
919 	struct rte_cryptodev *dev;
920 
921 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
922 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
923 		return -ENODEV;
924 	}
925 
926 	if (stats == NULL) {
927 		CDEV_LOG_ERR("Invalid stats ptr");
928 		return -EINVAL;
929 	}
930 
931 	dev = &rte_crypto_devices[dev_id];
932 	memset(stats, 0, sizeof(*stats));
933 
934 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
935 	(*dev->dev_ops->stats_get)(dev, stats);
936 	return 0;
937 }
938 
939 void
940 rte_cryptodev_stats_reset(uint8_t dev_id)
941 {
942 	struct rte_cryptodev *dev;
943 
944 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
945 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
946 		return;
947 	}
948 
949 	dev = &rte_crypto_devices[dev_id];
950 
951 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
952 	(*dev->dev_ops->stats_reset)(dev);
953 }
954 
955 
956 void
957 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
958 {
959 	struct rte_cryptodev *dev;
960 
961 	if (dev_id >= cryptodev_globals.nb_devs) {
962 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
963 		return;
964 	}
965 
966 	dev = &rte_crypto_devices[dev_id];
967 
968 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
969 
970 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
971 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
972 
973 	dev_info->driver_name = dev->device->driver->name;
974 }
975 
976 
977 int
978 rte_cryptodev_callback_register(uint8_t dev_id,
979 			enum rte_cryptodev_event_type event,
980 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
981 {
982 	struct rte_cryptodev *dev;
983 	struct rte_cryptodev_callback *user_cb;
984 
985 	if (!cb_fn)
986 		return -EINVAL;
987 
988 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
989 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
990 		return -EINVAL;
991 	}
992 
993 	dev = &rte_crypto_devices[dev_id];
994 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
995 
996 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
997 		if (user_cb->cb_fn == cb_fn &&
998 			user_cb->cb_arg == cb_arg &&
999 			user_cb->event == event) {
1000 			break;
1001 		}
1002 	}
1003 
1004 	/* create a new callback. */
1005 	if (user_cb == NULL) {
1006 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1007 				sizeof(struct rte_cryptodev_callback), 0);
1008 		if (user_cb != NULL) {
1009 			user_cb->cb_fn = cb_fn;
1010 			user_cb->cb_arg = cb_arg;
1011 			user_cb->event = event;
1012 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1013 		}
1014 	}
1015 
1016 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1017 	return (user_cb == NULL) ? -ENOMEM : 0;
1018 }
1019 
1020 int
1021 rte_cryptodev_callback_unregister(uint8_t dev_id,
1022 			enum rte_cryptodev_event_type event,
1023 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1024 {
1025 	int ret;
1026 	struct rte_cryptodev *dev;
1027 	struct rte_cryptodev_callback *cb, *next;
1028 
1029 	if (!cb_fn)
1030 		return -EINVAL;
1031 
1032 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1033 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1034 		return -EINVAL;
1035 	}
1036 
1037 	dev = &rte_crypto_devices[dev_id];
1038 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1039 
1040 	ret = 0;
1041 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1042 
1043 		next = TAILQ_NEXT(cb, next);
1044 
1045 		if (cb->cb_fn != cb_fn || cb->event != event ||
1046 				(cb->cb_arg != (void *)-1 &&
1047 				cb->cb_arg != cb_arg))
1048 			continue;
1049 
1050 		/*
1051 		 * if this callback is not executing right now,
1052 		 * then remove it.
1053 		 */
1054 		if (cb->active == 0) {
1055 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1056 			rte_free(cb);
1057 		} else {
1058 			ret = -EAGAIN;
1059 		}
1060 	}
1061 
1062 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1063 	return ret;
1064 }
1065 
1066 void
1067 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1068 	enum rte_cryptodev_event_type event)
1069 {
1070 	struct rte_cryptodev_callback *cb_lst;
1071 	struct rte_cryptodev_callback dev_cb;
1072 
1073 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1074 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1075 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1076 			continue;
1077 		dev_cb = *cb_lst;
1078 		cb_lst->active = 1;
1079 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1080 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1081 						dev_cb.cb_arg);
1082 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1083 		cb_lst->active = 0;
1084 	}
1085 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1086 }
1087 
1088 
1089 int
1090 rte_cryptodev_sym_session_init(uint8_t dev_id,
1091 		struct rte_cryptodev_sym_session *sess,
1092 		struct rte_crypto_sym_xform *xforms,
1093 		struct rte_mempool *mp)
1094 {
1095 	struct rte_cryptodev *dev;
1096 	uint8_t index;
1097 	int ret;
1098 
1099 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1100 
1101 	if (sess == NULL || xforms == NULL || dev == NULL)
1102 		return -EINVAL;
1103 
1104 	index = dev->driver_id;
1105 
1106 	if (sess->sess_private_data[index] == NULL) {
1107 		ret = dev->dev_ops->session_configure(dev, xforms, sess, mp);
1108 		if (ret < 0) {
1109 			CDEV_LOG_ERR(
1110 				"dev_id %d failed to configure session details",
1111 				dev_id);
1112 			return ret;
1113 		}
1114 	}
1115 
1116 	return 0;
1117 }
1118 
1119 struct rte_cryptodev_sym_session *
1120 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1121 {
1122 	struct rte_cryptodev_sym_session *sess;
1123 
1124 	/* Allocate a session structure from the session pool */
1125 	if (rte_mempool_get(mp, (void **)&sess)) {
1126 		CDEV_LOG_ERR("couldn't get object from session mempool");
1127 		return NULL;
1128 	}
1129 
1130 	/* Clear device session pointer */
1131 	memset(sess, 0, (sizeof(void *) * nb_drivers));
1132 
1133 	return sess;
1134 }
1135 
1136 int
1137 rte_cryptodev_queue_pair_attach_sym_session(uint8_t dev_id, uint16_t qp_id,
1138 		struct rte_cryptodev_sym_session *sess)
1139 {
1140 	struct rte_cryptodev *dev;
1141 
1142 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1143 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1144 		return -EINVAL;
1145 	}
1146 
1147 	dev = &rte_crypto_devices[dev_id];
1148 
1149 	/* The API is optional, not returning error if driver do not suuport */
1150 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0);
1151 
1152 	void *sess_priv = get_session_private_data(sess, dev->driver_id);
1153 
1154 	if (dev->dev_ops->qp_attach_session(dev, qp_id, sess_priv)) {
1155 		CDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session",
1156 				dev_id, qp_id);
1157 		return -EPERM;
1158 	}
1159 
1160 	return 0;
1161 }
1162 
1163 int
1164 rte_cryptodev_queue_pair_detach_sym_session(uint8_t dev_id, uint16_t qp_id,
1165 		struct rte_cryptodev_sym_session *sess)
1166 {
1167 	struct rte_cryptodev *dev;
1168 
1169 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1170 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1171 		return -EINVAL;
1172 	}
1173 
1174 	dev = &rte_crypto_devices[dev_id];
1175 
1176 	/* The API is optional, not returning error if driver do not suuport */
1177 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0);
1178 
1179 	void *sess_priv = get_session_private_data(sess, dev->driver_id);
1180 
1181 	if (dev->dev_ops->qp_detach_session(dev, qp_id, sess_priv)) {
1182 		CDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session",
1183 				dev_id, qp_id);
1184 		return -EPERM;
1185 	}
1186 
1187 	return 0;
1188 }
1189 
1190 int
1191 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1192 		struct rte_cryptodev_sym_session *sess)
1193 {
1194 	struct rte_cryptodev *dev;
1195 
1196 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1197 
1198 	if (dev == NULL || sess == NULL)
1199 		return -EINVAL;
1200 
1201 	dev->dev_ops->session_clear(dev, sess);
1202 
1203 	return 0;
1204 }
1205 
1206 int
1207 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1208 {
1209 	uint8_t i;
1210 	void *sess_priv;
1211 	struct rte_mempool *sess_mp;
1212 
1213 	if (sess == NULL)
1214 		return -EINVAL;
1215 
1216 	/* Check that all device private data has been freed */
1217 	for (i = 0; i < nb_drivers; i++) {
1218 		sess_priv = get_session_private_data(sess, i);
1219 		if (sess_priv != NULL)
1220 			return -EBUSY;
1221 	}
1222 
1223 	/* Return session to mempool */
1224 	sess_mp = rte_mempool_from_obj(sess);
1225 	rte_mempool_put(sess_mp, sess);
1226 
1227 	return 0;
1228 }
1229 
1230 unsigned int
1231 rte_cryptodev_get_header_session_size(void)
1232 {
1233 	/*
1234 	 * Header contains pointers to the private data
1235 	 * of all registered drivers
1236 	 */
1237 	return (sizeof(void *) * nb_drivers);
1238 }
1239 
1240 unsigned int
1241 rte_cryptodev_get_private_session_size(uint8_t dev_id)
1242 {
1243 	struct rte_cryptodev *dev;
1244 	unsigned int header_size = sizeof(void *) * nb_drivers;
1245 	unsigned int priv_sess_size;
1246 
1247 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1248 		return 0;
1249 
1250 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1251 
1252 	if (*dev->dev_ops->session_get_size == NULL)
1253 		return 0;
1254 
1255 	priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
1256 
1257 	/*
1258 	 * If size is less than session header size,
1259 	 * return the latter, as this guarantees that
1260 	 * sessionless operations will work
1261 	 */
1262 	if (priv_sess_size < header_size)
1263 		return header_size;
1264 
1265 	return priv_sess_size;
1266 
1267 }
1268 
1269 /** Initialise rte_crypto_op mempool element */
1270 static void
1271 rte_crypto_op_init(struct rte_mempool *mempool,
1272 		void *opaque_arg,
1273 		void *_op_data,
1274 		__rte_unused unsigned i)
1275 {
1276 	struct rte_crypto_op *op = _op_data;
1277 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1278 
1279 	memset(_op_data, 0, mempool->elt_size);
1280 
1281 	__rte_crypto_op_reset(op, type);
1282 
1283 	op->phys_addr = rte_mem_virt2iova(_op_data);
1284 	op->mempool = mempool;
1285 }
1286 
1287 
1288 struct rte_mempool *
1289 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1290 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1291 		int socket_id)
1292 {
1293 	struct rte_crypto_op_pool_private *priv;
1294 
1295 	unsigned elt_size = sizeof(struct rte_crypto_op) +
1296 			sizeof(struct rte_crypto_sym_op) +
1297 			priv_size;
1298 
1299 	/* lookup mempool in case already allocated */
1300 	struct rte_mempool *mp = rte_mempool_lookup(name);
1301 
1302 	if (mp != NULL) {
1303 		priv = (struct rte_crypto_op_pool_private *)
1304 				rte_mempool_get_priv(mp);
1305 
1306 		if (mp->elt_size != elt_size ||
1307 				mp->cache_size < cache_size ||
1308 				mp->size < nb_elts ||
1309 				priv->priv_size <  priv_size) {
1310 			mp = NULL;
1311 			CDEV_LOG_ERR("Mempool %s already exists but with "
1312 					"incompatible parameters", name);
1313 			return NULL;
1314 		}
1315 		return mp;
1316 	}
1317 
1318 	mp = rte_mempool_create(
1319 			name,
1320 			nb_elts,
1321 			elt_size,
1322 			cache_size,
1323 			sizeof(struct rte_crypto_op_pool_private),
1324 			NULL,
1325 			NULL,
1326 			rte_crypto_op_init,
1327 			&type,
1328 			socket_id,
1329 			0);
1330 
1331 	if (mp == NULL) {
1332 		CDEV_LOG_ERR("Failed to create mempool %s", name);
1333 		return NULL;
1334 	}
1335 
1336 	priv = (struct rte_crypto_op_pool_private *)
1337 			rte_mempool_get_priv(mp);
1338 
1339 	priv->priv_size = priv_size;
1340 	priv->type = type;
1341 
1342 	return mp;
1343 }
1344 
1345 int
1346 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1347 {
1348 	struct rte_cryptodev *dev = NULL;
1349 	uint32_t i = 0;
1350 
1351 	if (name == NULL)
1352 		return -EINVAL;
1353 
1354 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1355 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1356 				"%s_%u", dev_name_prefix, i);
1357 
1358 		if (ret < 0)
1359 			return ret;
1360 
1361 		dev = rte_cryptodev_pmd_get_named_dev(name);
1362 		if (!dev)
1363 			return 0;
1364 	}
1365 
1366 	return -1;
1367 }
1368 
1369 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1370 
1371 static struct cryptodev_driver_list cryptodev_driver_list =
1372 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1373 
1374 int
1375 rte_cryptodev_driver_id_get(const char *name)
1376 {
1377 	struct cryptodev_driver *driver;
1378 	const char *driver_name;
1379 
1380 	if (name == NULL) {
1381 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1382 		return -1;
1383 	}
1384 
1385 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1386 		driver_name = driver->driver->name;
1387 		if (strncmp(driver_name, name, strlen(driver_name)) == 0)
1388 			return driver->id;
1389 	}
1390 	return -1;
1391 }
1392 
1393 const char *
1394 rte_cryptodev_name_get(uint8_t dev_id)
1395 {
1396 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1397 
1398 	if (dev == NULL)
1399 		return NULL;
1400 
1401 	return dev->data->name;
1402 }
1403 
1404 const char *
1405 rte_cryptodev_driver_name_get(uint8_t driver_id)
1406 {
1407 	struct cryptodev_driver *driver;
1408 
1409 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1410 		if (driver->id == driver_id)
1411 			return driver->driver->name;
1412 	return NULL;
1413 }
1414 
1415 uint8_t
1416 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1417 		const struct rte_driver *drv)
1418 {
1419 	crypto_drv->driver = drv;
1420 	crypto_drv->id = nb_drivers;
1421 
1422 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1423 
1424 	return nb_drivers++;
1425 }
1426