1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/queue.h>
35 #include <ctype.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <stdarg.h>
40 #include <errno.h>
41 #include <stdint.h>
42 #include <inttypes.h>
43 #include <netinet/in.h>
44 
45 #include <rte_byteorder.h>
46 #include <rte_log.h>
47 #include <rte_debug.h>
48 #include <rte_dev.h>
49 #include <rte_interrupts.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67 
68 #include "rte_crypto.h"
69 #include "rte_cryptodev.h"
70 #include "rte_cryptodev_pmd.h"
71 
72 static uint8_t nb_drivers;
73 
74 struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
75 
76 struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
77 
78 static struct rte_cryptodev_global cryptodev_globals = {
79 		.devs			= &rte_crypto_devices[0],
80 		.data			= { NULL },
81 		.nb_devs		= 0,
82 		.max_devs		= RTE_CRYPTO_MAX_DEVS
83 };
84 
85 struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals;
86 
87 /* spinlock for crypto device callbacks */
88 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
89 
90 
91 /**
92  * The user application callback description.
93  *
94  * It contains callback address to be registered by user application,
95  * the pointer to the parameters for callback, and the event type.
96  */
97 struct rte_cryptodev_callback {
98 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
99 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
100 	void *cb_arg;				/**< Parameter for callback */
101 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
102 	uint32_t active;			/**< Callback is executing */
103 };
104 
105 /**
106  * The crypto cipher algorithm strings identifiers.
107  * It could be used in application command line.
108  */
109 const char *
110 rte_crypto_cipher_algorithm_strings[] = {
111 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
112 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
113 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
114 
115 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
116 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
117 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
118 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
119 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
120 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
121 
122 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
123 
124 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
125 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
126 
127 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
128 
129 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
130 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
131 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
132 };
133 
134 /**
135  * The crypto cipher operation strings identifiers.
136  * It could be used in application command line.
137  */
138 const char *
139 rte_crypto_cipher_operation_strings[] = {
140 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
141 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
142 };
143 
144 /**
145  * The crypto auth algorithm strings identifiers.
146  * It could be used in application command line.
147  */
148 const char *
149 rte_crypto_auth_algorithm_strings[] = {
150 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
151 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
152 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
153 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
154 
155 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
156 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
157 
158 	[RTE_CRYPTO_AUTH_NULL]		= "null",
159 
160 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
161 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
162 
163 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
164 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
165 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
166 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
167 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
168 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
169 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
170 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
171 
172 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
173 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
174 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
175 };
176 
177 /**
178  * The crypto AEAD algorithm strings identifiers.
179  * It could be used in application command line.
180  */
181 const char *
182 rte_crypto_aead_algorithm_strings[] = {
183 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
184 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
185 };
186 
187 /**
188  * The crypto AEAD operation strings identifiers.
189  * It could be used in application command line.
190  */
191 const char *
192 rte_crypto_aead_operation_strings[] = {
193 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
194 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
195 };
196 
197 int
198 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
199 		const char *algo_string)
200 {
201 	unsigned int i;
202 
203 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
204 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
205 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
206 			return 0;
207 		}
208 	}
209 
210 	/* Invalid string */
211 	return -1;
212 }
213 
214 int
215 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
216 		const char *algo_string)
217 {
218 	unsigned int i;
219 
220 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
221 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
222 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
223 			return 0;
224 		}
225 	}
226 
227 	/* Invalid string */
228 	return -1;
229 }
230 
231 int
232 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
233 		const char *algo_string)
234 {
235 	unsigned int i;
236 
237 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
238 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
239 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
240 			return 0;
241 		}
242 	}
243 
244 	/* Invalid string */
245 	return -1;
246 }
247 
248 /**
249  * The crypto auth operation strings identifiers.
250  * It could be used in application command line.
251  */
252 const char *
253 rte_crypto_auth_operation_strings[] = {
254 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
255 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
256 };
257 
258 const struct rte_cryptodev_symmetric_capability *
259 rte_cryptodev_sym_capability_get(uint8_t dev_id,
260 		const struct rte_cryptodev_sym_capability_idx *idx)
261 {
262 	const struct rte_cryptodev_capabilities *capability;
263 	struct rte_cryptodev_info dev_info;
264 	int i = 0;
265 
266 	rte_cryptodev_info_get(dev_id, &dev_info);
267 
268 	while ((capability = &dev_info.capabilities[i++])->op !=
269 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
270 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
271 			continue;
272 
273 		if (capability->sym.xform_type != idx->type)
274 			continue;
275 
276 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
277 			capability->sym.auth.algo == idx->algo.auth)
278 			return &capability->sym;
279 
280 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
281 			capability->sym.cipher.algo == idx->algo.cipher)
282 			return &capability->sym;
283 
284 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
285 				capability->sym.aead.algo == idx->algo.aead)
286 			return &capability->sym;
287 	}
288 
289 	return NULL;
290 
291 }
292 
293 static int
294 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
295 {
296 	unsigned int next_size;
297 
298 	/* Check lower/upper bounds */
299 	if (size < range->min)
300 		return -1;
301 
302 	if (size > range->max)
303 		return -1;
304 
305 	/* If range is actually only one value, size is correct */
306 	if (range->increment == 0)
307 		return 0;
308 
309 	/* Check if value is one of the supported sizes */
310 	for (next_size = range->min; next_size <= range->max;
311 			next_size += range->increment)
312 		if (size == next_size)
313 			return 0;
314 
315 	return -1;
316 }
317 
318 int
319 rte_cryptodev_sym_capability_check_cipher(
320 		const struct rte_cryptodev_symmetric_capability *capability,
321 		uint16_t key_size, uint16_t iv_size)
322 {
323 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
324 		return -1;
325 
326 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
327 		return -1;
328 
329 	return 0;
330 }
331 
332 int
333 rte_cryptodev_sym_capability_check_auth(
334 		const struct rte_cryptodev_symmetric_capability *capability,
335 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
336 {
337 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
338 		return -1;
339 
340 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
341 		return -1;
342 
343 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
344 		return -1;
345 
346 	return 0;
347 }
348 
349 int
350 rte_cryptodev_sym_capability_check_aead(
351 		const struct rte_cryptodev_symmetric_capability *capability,
352 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
353 		uint16_t iv_size)
354 {
355 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
356 		return -1;
357 
358 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
359 		return -1;
360 
361 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
362 		return -1;
363 
364 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
365 		return -1;
366 
367 	return 0;
368 }
369 
370 const char *
371 rte_cryptodev_get_feature_name(uint64_t flag)
372 {
373 	switch (flag) {
374 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
375 		return "SYMMETRIC_CRYPTO";
376 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
377 		return "ASYMMETRIC_CRYPTO";
378 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
379 		return "SYM_OPERATION_CHAINING";
380 	case RTE_CRYPTODEV_FF_CPU_SSE:
381 		return "CPU_SSE";
382 	case RTE_CRYPTODEV_FF_CPU_AVX:
383 		return "CPU_AVX";
384 	case RTE_CRYPTODEV_FF_CPU_AVX2:
385 		return "CPU_AVX2";
386 	case RTE_CRYPTODEV_FF_CPU_AVX512:
387 		return "CPU_AVX512";
388 	case RTE_CRYPTODEV_FF_CPU_AESNI:
389 		return "CPU_AESNI";
390 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
391 		return "HW_ACCELERATED";
392 	case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER:
393 		return "MBUF_SCATTER_GATHER";
394 	case RTE_CRYPTODEV_FF_CPU_NEON:
395 		return "CPU_NEON";
396 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
397 		return "CPU_ARM_CE";
398 	default:
399 		return NULL;
400 	}
401 }
402 
403 struct rte_cryptodev *
404 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
405 {
406 	return &rte_cryptodev_globals->devs[dev_id];
407 }
408 
409 struct rte_cryptodev *
410 rte_cryptodev_pmd_get_named_dev(const char *name)
411 {
412 	struct rte_cryptodev *dev;
413 	unsigned int i;
414 
415 	if (name == NULL)
416 		return NULL;
417 
418 	for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
419 		dev = &rte_cryptodev_globals->devs[i];
420 
421 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
422 				(strcmp(dev->data->name, name) == 0))
423 			return dev;
424 	}
425 
426 	return NULL;
427 }
428 
429 unsigned int
430 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
431 {
432 	struct rte_cryptodev *dev = NULL;
433 
434 	if (dev_id >= rte_cryptodev_globals->nb_devs)
435 		return 0;
436 
437 	dev = rte_cryptodev_pmd_get_dev(dev_id);
438 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
439 		return 0;
440 	else
441 		return 1;
442 }
443 
444 
445 int
446 rte_cryptodev_get_dev_id(const char *name)
447 {
448 	unsigned i;
449 
450 	if (name == NULL)
451 		return -1;
452 
453 	for (i = 0; i < rte_cryptodev_globals->nb_devs; i++)
454 		if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name)
455 				== 0) &&
456 				(rte_cryptodev_globals->devs[i].attached ==
457 						RTE_CRYPTODEV_ATTACHED))
458 			return i;
459 
460 	return -1;
461 }
462 
463 uint8_t
464 rte_cryptodev_count(void)
465 {
466 	return rte_cryptodev_globals->nb_devs;
467 }
468 
469 uint8_t
470 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
471 {
472 	uint8_t i, dev_count = 0;
473 
474 	for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
475 		if (rte_cryptodev_globals->devs[i].driver_id == driver_id &&
476 			rte_cryptodev_globals->devs[i].attached ==
477 					RTE_CRYPTODEV_ATTACHED)
478 			dev_count++;
479 
480 	return dev_count;
481 }
482 
483 uint8_t
484 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
485 	uint8_t nb_devices)
486 {
487 	uint8_t i, count = 0;
488 	struct rte_cryptodev *devs = rte_cryptodev_globals->devs;
489 	uint8_t max_devs = rte_cryptodev_globals->max_devs;
490 
491 	for (i = 0; i < max_devs && count < nb_devices;	i++) {
492 
493 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
494 			int cmp;
495 
496 			cmp = strncmp(devs[i].device->driver->name,
497 					driver_name,
498 					strlen(driver_name));
499 
500 			if (cmp == 0)
501 				devices[count++] = devs[i].data->dev_id;
502 		}
503 	}
504 
505 	return count;
506 }
507 
508 void *
509 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
510 {
511 	if (rte_crypto_devices[dev_id].feature_flags &
512 			RTE_CRYPTODEV_FF_SECURITY)
513 		return rte_crypto_devices[dev_id].security_ctx;
514 
515 	return NULL;
516 }
517 
518 int
519 rte_cryptodev_socket_id(uint8_t dev_id)
520 {
521 	struct rte_cryptodev *dev;
522 
523 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
524 		return -1;
525 
526 	dev = rte_cryptodev_pmd_get_dev(dev_id);
527 
528 	return dev->data->socket_id;
529 }
530 
531 static inline int
532 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
533 		int socket_id)
534 {
535 	char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
536 	const struct rte_memzone *mz;
537 	int n;
538 
539 	/* generate memzone name */
540 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
541 	if (n >= (int)sizeof(mz_name))
542 		return -EINVAL;
543 
544 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
545 		mz = rte_memzone_reserve(mz_name,
546 				sizeof(struct rte_cryptodev_data),
547 				socket_id, 0);
548 	} else
549 		mz = rte_memzone_lookup(mz_name);
550 
551 	if (mz == NULL)
552 		return -ENOMEM;
553 
554 	*data = mz->addr;
555 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
556 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
557 
558 	return 0;
559 }
560 
561 static uint8_t
562 rte_cryptodev_find_free_device_index(void)
563 {
564 	uint8_t dev_id;
565 
566 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
567 		if (rte_crypto_devices[dev_id].attached ==
568 				RTE_CRYPTODEV_DETACHED)
569 			return dev_id;
570 	}
571 	return RTE_CRYPTO_MAX_DEVS;
572 }
573 
574 struct rte_cryptodev *
575 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
576 {
577 	struct rte_cryptodev *cryptodev;
578 	uint8_t dev_id;
579 
580 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
581 		CDEV_LOG_ERR("Crypto device with name %s already "
582 				"allocated!", name);
583 		return NULL;
584 	}
585 
586 	dev_id = rte_cryptodev_find_free_device_index();
587 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
588 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
589 		return NULL;
590 	}
591 
592 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
593 
594 	if (cryptodev->data == NULL) {
595 		struct rte_cryptodev_data *cryptodev_data =
596 				cryptodev_globals.data[dev_id];
597 
598 		int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
599 				socket_id);
600 
601 		if (retval < 0 || cryptodev_data == NULL)
602 			return NULL;
603 
604 		cryptodev->data = cryptodev_data;
605 
606 		snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
607 				"%s", name);
608 
609 		cryptodev->data->dev_id = dev_id;
610 		cryptodev->data->socket_id = socket_id;
611 		cryptodev->data->dev_started = 0;
612 
613 		/* init user callbacks */
614 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
615 
616 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
617 
618 		cryptodev_globals.nb_devs++;
619 	}
620 
621 	return cryptodev;
622 }
623 
624 int
625 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
626 {
627 	int ret;
628 
629 	if (cryptodev == NULL)
630 		return -EINVAL;
631 
632 	/* Close device only if device operations have been set */
633 	if (cryptodev->dev_ops) {
634 		ret = rte_cryptodev_close(cryptodev->data->dev_id);
635 		if (ret < 0)
636 			return ret;
637 	}
638 
639 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
640 	cryptodev_globals.nb_devs--;
641 	return 0;
642 }
643 
644 uint16_t
645 rte_cryptodev_queue_pair_count(uint8_t dev_id)
646 {
647 	struct rte_cryptodev *dev;
648 
649 	dev = &rte_crypto_devices[dev_id];
650 	return dev->data->nb_queue_pairs;
651 }
652 
653 static int
654 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
655 		int socket_id)
656 {
657 	struct rte_cryptodev_info dev_info;
658 	void **qp;
659 	unsigned i;
660 
661 	if ((dev == NULL) || (nb_qpairs < 1)) {
662 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
663 							dev, nb_qpairs);
664 		return -EINVAL;
665 	}
666 
667 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
668 			nb_qpairs, dev->data->dev_id);
669 
670 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
671 
672 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
673 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
674 
675 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
676 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
677 				nb_qpairs, dev->data->dev_id);
678 	    return -EINVAL;
679 	}
680 
681 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
682 		dev->data->queue_pairs = rte_zmalloc_socket(
683 				"cryptodev->queue_pairs",
684 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
685 				RTE_CACHE_LINE_SIZE, socket_id);
686 
687 		if (dev->data->queue_pairs == NULL) {
688 			dev->data->nb_queue_pairs = 0;
689 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
690 							"nb_queues %u",
691 							nb_qpairs);
692 			return -(ENOMEM);
693 		}
694 	} else { /* re-configure */
695 		int ret;
696 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
697 
698 		qp = dev->data->queue_pairs;
699 
700 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
701 				-ENOTSUP);
702 
703 		for (i = nb_qpairs; i < old_nb_queues; i++) {
704 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
705 			if (ret < 0)
706 				return ret;
707 		}
708 
709 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
710 				RTE_CACHE_LINE_SIZE);
711 		if (qp == NULL) {
712 			CDEV_LOG_ERR("failed to realloc qp meta data,"
713 						" nb_queues %u", nb_qpairs);
714 			return -(ENOMEM);
715 		}
716 
717 		if (nb_qpairs > old_nb_queues) {
718 			uint16_t new_qs = nb_qpairs - old_nb_queues;
719 
720 			memset(qp + old_nb_queues, 0,
721 				sizeof(qp[0]) * new_qs);
722 		}
723 
724 		dev->data->queue_pairs = qp;
725 
726 	}
727 	dev->data->nb_queue_pairs = nb_qpairs;
728 	return 0;
729 }
730 
731 int
732 rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id)
733 {
734 	struct rte_cryptodev *dev;
735 
736 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
737 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
738 		return -EINVAL;
739 	}
740 
741 	dev = &rte_crypto_devices[dev_id];
742 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
743 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
744 		return -EINVAL;
745 	}
746 
747 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP);
748 
749 	return dev->dev_ops->queue_pair_start(dev, queue_pair_id);
750 
751 }
752 
753 int
754 rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
755 {
756 	struct rte_cryptodev *dev;
757 
758 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
759 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
760 		return -EINVAL;
761 	}
762 
763 	dev = &rte_crypto_devices[dev_id];
764 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
765 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
766 		return -EINVAL;
767 	}
768 
769 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP);
770 
771 	return dev->dev_ops->queue_pair_stop(dev, queue_pair_id);
772 
773 }
774 
775 int
776 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
777 {
778 	struct rte_cryptodev *dev;
779 	int diag;
780 
781 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
782 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
783 		return -EINVAL;
784 	}
785 
786 	dev = &rte_crypto_devices[dev_id];
787 
788 	if (dev->data->dev_started) {
789 		CDEV_LOG_ERR(
790 		    "device %d must be stopped to allow configuration", dev_id);
791 		return -EBUSY;
792 	}
793 
794 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
795 
796 	/* Setup new number of queue pairs and reconfigure device. */
797 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
798 			config->socket_id);
799 	if (diag != 0) {
800 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
801 				dev_id, diag);
802 		return diag;
803 	}
804 
805 	return (*dev->dev_ops->dev_configure)(dev, config);
806 }
807 
808 
809 int
810 rte_cryptodev_start(uint8_t dev_id)
811 {
812 	struct rte_cryptodev *dev;
813 	int diag;
814 
815 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
816 
817 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
818 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
819 		return -EINVAL;
820 	}
821 
822 	dev = &rte_crypto_devices[dev_id];
823 
824 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
825 
826 	if (dev->data->dev_started != 0) {
827 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
828 			dev_id);
829 		return 0;
830 	}
831 
832 	diag = (*dev->dev_ops->dev_start)(dev);
833 	if (diag == 0)
834 		dev->data->dev_started = 1;
835 	else
836 		return diag;
837 
838 	return 0;
839 }
840 
841 void
842 rte_cryptodev_stop(uint8_t dev_id)
843 {
844 	struct rte_cryptodev *dev;
845 
846 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
847 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
848 		return;
849 	}
850 
851 	dev = &rte_crypto_devices[dev_id];
852 
853 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
854 
855 	if (dev->data->dev_started == 0) {
856 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
857 			dev_id);
858 		return;
859 	}
860 
861 	(*dev->dev_ops->dev_stop)(dev);
862 	dev->data->dev_started = 0;
863 }
864 
865 int
866 rte_cryptodev_close(uint8_t dev_id)
867 {
868 	struct rte_cryptodev *dev;
869 	int retval;
870 
871 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
872 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
873 		return -1;
874 	}
875 
876 	dev = &rte_crypto_devices[dev_id];
877 
878 	/* Device must be stopped before it can be closed */
879 	if (dev->data->dev_started == 1) {
880 		CDEV_LOG_ERR("Device %u must be stopped before closing",
881 				dev_id);
882 		return -EBUSY;
883 	}
884 
885 	/* We can't close the device if there are outstanding sessions in use */
886 	if (dev->data->session_pool != NULL) {
887 		if (!rte_mempool_full(dev->data->session_pool)) {
888 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
889 					"has sessions still in use, free "
890 					"all sessions before calling close",
891 					(unsigned)dev_id);
892 			return -EBUSY;
893 		}
894 	}
895 
896 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
897 	retval = (*dev->dev_ops->dev_close)(dev);
898 
899 	if (retval < 0)
900 		return retval;
901 
902 	return 0;
903 }
904 
905 int
906 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
907 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
908 		struct rte_mempool *session_pool)
909 
910 {
911 	struct rte_cryptodev *dev;
912 
913 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
914 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
915 		return -EINVAL;
916 	}
917 
918 	dev = &rte_crypto_devices[dev_id];
919 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
920 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
921 		return -EINVAL;
922 	}
923 
924 	if (dev->data->dev_started) {
925 		CDEV_LOG_ERR(
926 		    "device %d must be stopped to allow configuration", dev_id);
927 		return -EBUSY;
928 	}
929 
930 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
931 
932 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
933 			socket_id, session_pool);
934 }
935 
936 
937 int
938 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
939 {
940 	struct rte_cryptodev *dev;
941 
942 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
943 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
944 		return -ENODEV;
945 	}
946 
947 	if (stats == NULL) {
948 		CDEV_LOG_ERR("Invalid stats ptr");
949 		return -EINVAL;
950 	}
951 
952 	dev = &rte_crypto_devices[dev_id];
953 	memset(stats, 0, sizeof(*stats));
954 
955 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
956 	(*dev->dev_ops->stats_get)(dev, stats);
957 	return 0;
958 }
959 
960 void
961 rte_cryptodev_stats_reset(uint8_t dev_id)
962 {
963 	struct rte_cryptodev *dev;
964 
965 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
966 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
967 		return;
968 	}
969 
970 	dev = &rte_crypto_devices[dev_id];
971 
972 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
973 	(*dev->dev_ops->stats_reset)(dev);
974 }
975 
976 
977 void
978 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
979 {
980 	struct rte_cryptodev *dev;
981 
982 	if (dev_id >= cryptodev_globals.nb_devs) {
983 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
984 		return;
985 	}
986 
987 	dev = &rte_crypto_devices[dev_id];
988 
989 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
990 
991 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
992 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
993 
994 	dev_info->driver_name = dev->device->driver->name;
995 }
996 
997 
998 int
999 rte_cryptodev_callback_register(uint8_t dev_id,
1000 			enum rte_cryptodev_event_type event,
1001 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1002 {
1003 	struct rte_cryptodev *dev;
1004 	struct rte_cryptodev_callback *user_cb;
1005 
1006 	if (!cb_fn)
1007 		return -EINVAL;
1008 
1009 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1010 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1011 		return -EINVAL;
1012 	}
1013 
1014 	dev = &rte_crypto_devices[dev_id];
1015 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1016 
1017 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1018 		if (user_cb->cb_fn == cb_fn &&
1019 			user_cb->cb_arg == cb_arg &&
1020 			user_cb->event == event) {
1021 			break;
1022 		}
1023 	}
1024 
1025 	/* create a new callback. */
1026 	if (user_cb == NULL) {
1027 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1028 				sizeof(struct rte_cryptodev_callback), 0);
1029 		if (user_cb != NULL) {
1030 			user_cb->cb_fn = cb_fn;
1031 			user_cb->cb_arg = cb_arg;
1032 			user_cb->event = event;
1033 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1034 		}
1035 	}
1036 
1037 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1038 	return (user_cb == NULL) ? -ENOMEM : 0;
1039 }
1040 
1041 int
1042 rte_cryptodev_callback_unregister(uint8_t dev_id,
1043 			enum rte_cryptodev_event_type event,
1044 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1045 {
1046 	int ret;
1047 	struct rte_cryptodev *dev;
1048 	struct rte_cryptodev_callback *cb, *next;
1049 
1050 	if (!cb_fn)
1051 		return -EINVAL;
1052 
1053 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1054 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1055 		return -EINVAL;
1056 	}
1057 
1058 	dev = &rte_crypto_devices[dev_id];
1059 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1060 
1061 	ret = 0;
1062 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1063 
1064 		next = TAILQ_NEXT(cb, next);
1065 
1066 		if (cb->cb_fn != cb_fn || cb->event != event ||
1067 				(cb->cb_arg != (void *)-1 &&
1068 				cb->cb_arg != cb_arg))
1069 			continue;
1070 
1071 		/*
1072 		 * if this callback is not executing right now,
1073 		 * then remove it.
1074 		 */
1075 		if (cb->active == 0) {
1076 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1077 			rte_free(cb);
1078 		} else {
1079 			ret = -EAGAIN;
1080 		}
1081 	}
1082 
1083 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1084 	return ret;
1085 }
1086 
1087 void
1088 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1089 	enum rte_cryptodev_event_type event)
1090 {
1091 	struct rte_cryptodev_callback *cb_lst;
1092 	struct rte_cryptodev_callback dev_cb;
1093 
1094 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1095 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1096 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1097 			continue;
1098 		dev_cb = *cb_lst;
1099 		cb_lst->active = 1;
1100 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1101 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1102 						dev_cb.cb_arg);
1103 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1104 		cb_lst->active = 0;
1105 	}
1106 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1107 }
1108 
1109 
1110 int
1111 rte_cryptodev_sym_session_init(uint8_t dev_id,
1112 		struct rte_cryptodev_sym_session *sess,
1113 		struct rte_crypto_sym_xform *xforms,
1114 		struct rte_mempool *mp)
1115 {
1116 	struct rte_cryptodev *dev;
1117 	uint8_t index;
1118 	int ret;
1119 
1120 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1121 
1122 	if (sess == NULL || xforms == NULL || dev == NULL)
1123 		return -EINVAL;
1124 
1125 	index = dev->driver_id;
1126 
1127 	if (sess->sess_private_data[index] == NULL) {
1128 		ret = dev->dev_ops->session_configure(dev, xforms, sess, mp);
1129 		if (ret < 0) {
1130 			CDEV_LOG_ERR(
1131 				"dev_id %d failed to configure session details",
1132 				dev_id);
1133 			return ret;
1134 		}
1135 	}
1136 
1137 	return 0;
1138 }
1139 
1140 struct rte_cryptodev_sym_session *
1141 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1142 {
1143 	struct rte_cryptodev_sym_session *sess;
1144 
1145 	/* Allocate a session structure from the session pool */
1146 	if (rte_mempool_get(mp, (void **)&sess)) {
1147 		CDEV_LOG_ERR("couldn't get object from session mempool");
1148 		return NULL;
1149 	}
1150 
1151 	/* Clear device session pointer */
1152 	memset(sess, 0, (sizeof(void *) * nb_drivers));
1153 
1154 	return sess;
1155 }
1156 
1157 int
1158 rte_cryptodev_queue_pair_attach_sym_session(uint8_t dev_id, uint16_t qp_id,
1159 		struct rte_cryptodev_sym_session *sess)
1160 {
1161 	struct rte_cryptodev *dev;
1162 
1163 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1164 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1165 		return -EINVAL;
1166 	}
1167 
1168 	dev = &rte_crypto_devices[dev_id];
1169 
1170 	/* The API is optional, not returning error if driver do not suuport */
1171 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0);
1172 
1173 	void *sess_priv = get_session_private_data(sess, dev->driver_id);
1174 
1175 	if (dev->dev_ops->qp_attach_session(dev, qp_id, sess_priv)) {
1176 		CDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session",
1177 				dev_id, qp_id);
1178 		return -EPERM;
1179 	}
1180 
1181 	return 0;
1182 }
1183 
1184 int
1185 rte_cryptodev_queue_pair_detach_sym_session(uint8_t dev_id, uint16_t qp_id,
1186 		struct rte_cryptodev_sym_session *sess)
1187 {
1188 	struct rte_cryptodev *dev;
1189 
1190 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1191 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1192 		return -EINVAL;
1193 	}
1194 
1195 	dev = &rte_crypto_devices[dev_id];
1196 
1197 	/* The API is optional, not returning error if driver do not suuport */
1198 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0);
1199 
1200 	void *sess_priv = get_session_private_data(sess, dev->driver_id);
1201 
1202 	if (dev->dev_ops->qp_detach_session(dev, qp_id, sess_priv)) {
1203 		CDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session",
1204 				dev_id, qp_id);
1205 		return -EPERM;
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 int
1212 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1213 		struct rte_cryptodev_sym_session *sess)
1214 {
1215 	struct rte_cryptodev *dev;
1216 
1217 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1218 
1219 	if (dev == NULL || sess == NULL)
1220 		return -EINVAL;
1221 
1222 	dev->dev_ops->session_clear(dev, sess);
1223 
1224 	return 0;
1225 }
1226 
1227 int
1228 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1229 {
1230 	uint8_t i;
1231 	void *sess_priv;
1232 	struct rte_mempool *sess_mp;
1233 
1234 	if (sess == NULL)
1235 		return -EINVAL;
1236 
1237 	/* Check that all device private data has been freed */
1238 	for (i = 0; i < nb_drivers; i++) {
1239 		sess_priv = get_session_private_data(sess, i);
1240 		if (sess_priv != NULL)
1241 			return -EBUSY;
1242 	}
1243 
1244 	/* Return session to mempool */
1245 	sess_mp = rte_mempool_from_obj(sess);
1246 	rte_mempool_put(sess_mp, sess);
1247 
1248 	return 0;
1249 }
1250 
1251 unsigned int
1252 rte_cryptodev_get_header_session_size(void)
1253 {
1254 	/*
1255 	 * Header contains pointers to the private data
1256 	 * of all registered drivers
1257 	 */
1258 	return (sizeof(void *) * nb_drivers);
1259 }
1260 
1261 unsigned int
1262 rte_cryptodev_get_private_session_size(uint8_t dev_id)
1263 {
1264 	struct rte_cryptodev *dev;
1265 	unsigned int header_size = sizeof(void *) * nb_drivers;
1266 	unsigned int priv_sess_size;
1267 
1268 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1269 		return 0;
1270 
1271 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1272 
1273 	if (*dev->dev_ops->session_get_size == NULL)
1274 		return 0;
1275 
1276 	priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
1277 
1278 	/*
1279 	 * If size is less than session header size,
1280 	 * return the latter, as this guarantees that
1281 	 * sessionless operations will work
1282 	 */
1283 	if (priv_sess_size < header_size)
1284 		return header_size;
1285 
1286 	return priv_sess_size;
1287 
1288 }
1289 
1290 /** Initialise rte_crypto_op mempool element */
1291 static void
1292 rte_crypto_op_init(struct rte_mempool *mempool,
1293 		void *opaque_arg,
1294 		void *_op_data,
1295 		__rte_unused unsigned i)
1296 {
1297 	struct rte_crypto_op *op = _op_data;
1298 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1299 
1300 	memset(_op_data, 0, mempool->elt_size);
1301 
1302 	__rte_crypto_op_reset(op, type);
1303 
1304 	op->phys_addr = rte_mem_virt2iova(_op_data);
1305 	op->mempool = mempool;
1306 }
1307 
1308 
1309 struct rte_mempool *
1310 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1311 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1312 		int socket_id)
1313 {
1314 	struct rte_crypto_op_pool_private *priv;
1315 
1316 	unsigned elt_size = sizeof(struct rte_crypto_op) +
1317 			sizeof(struct rte_crypto_sym_op) +
1318 			priv_size;
1319 
1320 	/* lookup mempool in case already allocated */
1321 	struct rte_mempool *mp = rte_mempool_lookup(name);
1322 
1323 	if (mp != NULL) {
1324 		priv = (struct rte_crypto_op_pool_private *)
1325 				rte_mempool_get_priv(mp);
1326 
1327 		if (mp->elt_size != elt_size ||
1328 				mp->cache_size < cache_size ||
1329 				mp->size < nb_elts ||
1330 				priv->priv_size <  priv_size) {
1331 			mp = NULL;
1332 			CDEV_LOG_ERR("Mempool %s already exists but with "
1333 					"incompatible parameters", name);
1334 			return NULL;
1335 		}
1336 		return mp;
1337 	}
1338 
1339 	mp = rte_mempool_create(
1340 			name,
1341 			nb_elts,
1342 			elt_size,
1343 			cache_size,
1344 			sizeof(struct rte_crypto_op_pool_private),
1345 			NULL,
1346 			NULL,
1347 			rte_crypto_op_init,
1348 			&type,
1349 			socket_id,
1350 			0);
1351 
1352 	if (mp == NULL) {
1353 		CDEV_LOG_ERR("Failed to create mempool %s", name);
1354 		return NULL;
1355 	}
1356 
1357 	priv = (struct rte_crypto_op_pool_private *)
1358 			rte_mempool_get_priv(mp);
1359 
1360 	priv->priv_size = priv_size;
1361 	priv->type = type;
1362 
1363 	return mp;
1364 }
1365 
1366 int
1367 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1368 {
1369 	struct rte_cryptodev *dev = NULL;
1370 	uint32_t i = 0;
1371 
1372 	if (name == NULL)
1373 		return -EINVAL;
1374 
1375 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1376 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1377 				"%s_%u", dev_name_prefix, i);
1378 
1379 		if (ret < 0)
1380 			return ret;
1381 
1382 		dev = rte_cryptodev_pmd_get_named_dev(name);
1383 		if (!dev)
1384 			return 0;
1385 	}
1386 
1387 	return -1;
1388 }
1389 
1390 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1391 
1392 static struct cryptodev_driver_list cryptodev_driver_list =
1393 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1394 
1395 int
1396 rte_cryptodev_driver_id_get(const char *name)
1397 {
1398 	struct cryptodev_driver *driver;
1399 	const char *driver_name;
1400 
1401 	if (name == NULL) {
1402 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1403 		return -1;
1404 	}
1405 
1406 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1407 		driver_name = driver->driver->name;
1408 		if (strncmp(driver_name, name, strlen(driver_name)) == 0)
1409 			return driver->id;
1410 	}
1411 	return -1;
1412 }
1413 
1414 const char *
1415 rte_cryptodev_name_get(uint8_t dev_id)
1416 {
1417 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1418 
1419 	if (dev == NULL)
1420 		return NULL;
1421 
1422 	return dev->data->name;
1423 }
1424 
1425 const char *
1426 rte_cryptodev_driver_name_get(uint8_t driver_id)
1427 {
1428 	struct cryptodev_driver *driver;
1429 
1430 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1431 		if (driver->id == driver_id)
1432 			return driver->driver->name;
1433 	return NULL;
1434 }
1435 
1436 uint8_t
1437 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1438 		const struct rte_driver *drv)
1439 {
1440 	crypto_drv->driver = drv;
1441 	crypto_drv->id = nb_drivers;
1442 
1443 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1444 
1445 	return nb_drivers++;
1446 }
1447