1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2017 Intel Corporation
3  */
4 
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16 
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 
44 static uint8_t nb_drivers;
45 
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
47 
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
49 
50 static struct rte_cryptodev_global cryptodev_globals = {
51 		.devs			= rte_crypto_devices,
52 		.data			= { NULL },
53 		.nb_devs		= 0
54 };
55 
56 /* spinlock for crypto device callbacks */
57 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
58 
59 
60 /**
61  * The user application callback description.
62  *
63  * It contains callback address to be registered by user application,
64  * the pointer to the parameters for callback, and the event type.
65  */
66 struct rte_cryptodev_callback {
67 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
68 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
69 	void *cb_arg;				/**< Parameter for callback */
70 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
71 	uint32_t active;			/**< Callback is executing */
72 };
73 
74 /**
75  * The crypto cipher algorithm strings identifiers.
76  * It could be used in application command line.
77  */
78 const char *
79 rte_crypto_cipher_algorithm_strings[] = {
80 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
81 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
82 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
83 
84 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
85 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
86 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
87 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
88 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
89 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
90 
91 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
92 
93 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
94 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
95 
96 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
97 
98 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
99 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
100 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
101 };
102 
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
110 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
111 };
112 
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * It could be used in application command line.
116  */
117 const char *
118 rte_crypto_auth_algorithm_strings[] = {
119 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
120 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
121 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
122 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
123 
124 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
125 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
126 
127 	[RTE_CRYPTO_AUTH_NULL]		= "null",
128 
129 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
130 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
131 
132 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
133 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
134 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
135 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
136 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
137 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
138 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
139 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
140 
141 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
142 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
143 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
144 };
145 
146 /**
147  * The crypto AEAD algorithm strings identifiers.
148  * It could be used in application command line.
149  */
150 const char *
151 rte_crypto_aead_algorithm_strings[] = {
152 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
153 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
154 };
155 
156 /**
157  * The crypto AEAD operation strings identifiers.
158  * It could be used in application command line.
159  */
160 const char *
161 rte_crypto_aead_operation_strings[] = {
162 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
163 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
164 };
165 
166 /**
167  * Asymmetric crypto transform operation strings identifiers.
168  */
169 const char *rte_crypto_asym_xform_strings[] = {
170 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
171 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
172 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
173 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
174 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
175 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
176 };
177 
178 /**
179  * Asymmetric crypto operation strings identifiers.
180  */
181 const char *rte_crypto_asym_op_strings[] = {
182 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
183 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
184 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
185 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify",
186 	[RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]	= "priv_key_generate",
187 	[RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
188 	[RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
189 };
190 
191 /**
192  * The private data structure stored in the session mempool private data.
193  */
194 struct rte_cryptodev_sym_session_pool_private_data {
195 	uint16_t nb_drivers;
196 	/**< number of elements in sess_data array */
197 	uint16_t user_data_sz;
198 	/**< session user data will be placed after sess_data */
199 };
200 
201 int
202 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
203 		const char *algo_string)
204 {
205 	unsigned int i;
206 
207 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
208 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
209 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
210 			return 0;
211 		}
212 	}
213 
214 	/* Invalid string */
215 	return -1;
216 }
217 
218 int
219 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
220 		const char *algo_string)
221 {
222 	unsigned int i;
223 
224 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
225 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
226 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
227 			return 0;
228 		}
229 	}
230 
231 	/* Invalid string */
232 	return -1;
233 }
234 
235 int
236 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
237 		const char *algo_string)
238 {
239 	unsigned int i;
240 
241 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
242 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
243 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
244 			return 0;
245 		}
246 	}
247 
248 	/* Invalid string */
249 	return -1;
250 }
251 
252 int
253 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
254 		const char *xform_string)
255 {
256 	unsigned int i;
257 
258 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
259 		if (strcmp(xform_string,
260 			rte_crypto_asym_xform_strings[i]) == 0) {
261 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
262 			return 0;
263 		}
264 	}
265 
266 	/* Invalid string */
267 	return -1;
268 }
269 
270 /**
271  * The crypto auth operation strings identifiers.
272  * It could be used in application command line.
273  */
274 const char *
275 rte_crypto_auth_operation_strings[] = {
276 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
277 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
278 };
279 
280 const struct rte_cryptodev_symmetric_capability *
281 rte_cryptodev_sym_capability_get(uint8_t dev_id,
282 		const struct rte_cryptodev_sym_capability_idx *idx)
283 {
284 	const struct rte_cryptodev_capabilities *capability;
285 	struct rte_cryptodev_info dev_info;
286 	int i = 0;
287 
288 	rte_cryptodev_info_get(dev_id, &dev_info);
289 
290 	while ((capability = &dev_info.capabilities[i++])->op !=
291 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
292 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
293 			continue;
294 
295 		if (capability->sym.xform_type != idx->type)
296 			continue;
297 
298 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
299 			capability->sym.auth.algo == idx->algo.auth)
300 			return &capability->sym;
301 
302 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
303 			capability->sym.cipher.algo == idx->algo.cipher)
304 			return &capability->sym;
305 
306 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
307 				capability->sym.aead.algo == idx->algo.aead)
308 			return &capability->sym;
309 	}
310 
311 	return NULL;
312 
313 }
314 
315 static int
316 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
317 {
318 	unsigned int next_size;
319 
320 	/* Check lower/upper bounds */
321 	if (size < range->min)
322 		return -1;
323 
324 	if (size > range->max)
325 		return -1;
326 
327 	/* If range is actually only one value, size is correct */
328 	if (range->increment == 0)
329 		return 0;
330 
331 	/* Check if value is one of the supported sizes */
332 	for (next_size = range->min; next_size <= range->max;
333 			next_size += range->increment)
334 		if (size == next_size)
335 			return 0;
336 
337 	return -1;
338 }
339 
340 const struct rte_cryptodev_asymmetric_xform_capability *
341 rte_cryptodev_asym_capability_get(uint8_t dev_id,
342 		const struct rte_cryptodev_asym_capability_idx *idx)
343 {
344 	const struct rte_cryptodev_capabilities *capability;
345 	struct rte_cryptodev_info dev_info;
346 	unsigned int i = 0;
347 
348 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
349 	rte_cryptodev_info_get(dev_id, &dev_info);
350 
351 	while ((capability = &dev_info.capabilities[i++])->op !=
352 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
353 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
354 			continue;
355 
356 		if (capability->asym.xform_capa.xform_type == idx->type)
357 			return &capability->asym.xform_capa;
358 	}
359 	return NULL;
360 };
361 
362 int
363 rte_cryptodev_sym_capability_check_cipher(
364 		const struct rte_cryptodev_symmetric_capability *capability,
365 		uint16_t key_size, uint16_t iv_size)
366 {
367 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
368 		return -1;
369 
370 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
371 		return -1;
372 
373 	return 0;
374 }
375 
376 int
377 rte_cryptodev_sym_capability_check_auth(
378 		const struct rte_cryptodev_symmetric_capability *capability,
379 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
380 {
381 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
382 		return -1;
383 
384 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
385 		return -1;
386 
387 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
388 		return -1;
389 
390 	return 0;
391 }
392 
393 int
394 rte_cryptodev_sym_capability_check_aead(
395 		const struct rte_cryptodev_symmetric_capability *capability,
396 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
397 		uint16_t iv_size)
398 {
399 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
400 		return -1;
401 
402 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
403 		return -1;
404 
405 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
406 		return -1;
407 
408 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
409 		return -1;
410 
411 	return 0;
412 }
413 int
414 rte_cryptodev_asym_xform_capability_check_optype(
415 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
416 	enum rte_crypto_asym_op_type op_type)
417 {
418 	if (capability->op_types & (1 << op_type))
419 		return 1;
420 
421 	return 0;
422 }
423 
424 int
425 rte_cryptodev_asym_xform_capability_check_modlen(
426 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
427 	uint16_t modlen)
428 {
429 	/* no need to check for limits, if min or max = 0 */
430 	if (capability->modlen.min != 0) {
431 		if (modlen < capability->modlen.min)
432 			return -1;
433 	}
434 
435 	if (capability->modlen.max != 0) {
436 		if (modlen > capability->modlen.max)
437 			return -1;
438 	}
439 
440 	/* in any case, check if given modlen is module increment */
441 	if (capability->modlen.increment != 0) {
442 		if (modlen % (capability->modlen.increment))
443 			return -1;
444 	}
445 
446 	return 0;
447 }
448 
449 
450 const char *
451 rte_cryptodev_get_feature_name(uint64_t flag)
452 {
453 	switch (flag) {
454 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
455 		return "SYMMETRIC_CRYPTO";
456 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
457 		return "ASYMMETRIC_CRYPTO";
458 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
459 		return "SYM_OPERATION_CHAINING";
460 	case RTE_CRYPTODEV_FF_CPU_SSE:
461 		return "CPU_SSE";
462 	case RTE_CRYPTODEV_FF_CPU_AVX:
463 		return "CPU_AVX";
464 	case RTE_CRYPTODEV_FF_CPU_AVX2:
465 		return "CPU_AVX2";
466 	case RTE_CRYPTODEV_FF_CPU_AVX512:
467 		return "CPU_AVX512";
468 	case RTE_CRYPTODEV_FF_CPU_AESNI:
469 		return "CPU_AESNI";
470 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
471 		return "HW_ACCELERATED";
472 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
473 		return "IN_PLACE_SGL";
474 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
475 		return "OOP_SGL_IN_SGL_OUT";
476 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
477 		return "OOP_SGL_IN_LB_OUT";
478 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
479 		return "OOP_LB_IN_SGL_OUT";
480 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
481 		return "OOP_LB_IN_LB_OUT";
482 	case RTE_CRYPTODEV_FF_CPU_NEON:
483 		return "CPU_NEON";
484 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
485 		return "CPU_ARM_CE";
486 	case RTE_CRYPTODEV_FF_SECURITY:
487 		return "SECURITY_PROTOCOL";
488 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
489 		return "RSA_PRIV_OP_KEY_EXP";
490 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
491 		return "RSA_PRIV_OP_KEY_QT";
492 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
493 		return "DIGEST_ENCRYPTED";
494 	default:
495 		return NULL;
496 	}
497 }
498 
499 struct rte_cryptodev *
500 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
501 {
502 	return &cryptodev_globals.devs[dev_id];
503 }
504 
505 struct rte_cryptodev *
506 rte_cryptodev_pmd_get_named_dev(const char *name)
507 {
508 	struct rte_cryptodev *dev;
509 	unsigned int i;
510 
511 	if (name == NULL)
512 		return NULL;
513 
514 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
515 		dev = &cryptodev_globals.devs[i];
516 
517 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
518 				(strcmp(dev->data->name, name) == 0))
519 			return dev;
520 	}
521 
522 	return NULL;
523 }
524 
525 static inline uint8_t
526 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
527 {
528 	if (rte_crypto_devices[dev_id].data == NULL)
529 		return 0;
530 
531 	return 1;
532 }
533 
534 unsigned int
535 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
536 {
537 	struct rte_cryptodev *dev = NULL;
538 
539 	if (!rte_cryptodev_is_valid_device_data(dev_id))
540 		return 0;
541 
542 	dev = rte_cryptodev_pmd_get_dev(dev_id);
543 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
544 		return 0;
545 	else
546 		return 1;
547 }
548 
549 
550 int
551 rte_cryptodev_get_dev_id(const char *name)
552 {
553 	unsigned i;
554 
555 	if (name == NULL)
556 		return -1;
557 
558 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
559 		if (!rte_cryptodev_is_valid_device_data(i))
560 			continue;
561 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
562 				== 0) &&
563 				(cryptodev_globals.devs[i].attached ==
564 						RTE_CRYPTODEV_ATTACHED))
565 			return i;
566 	}
567 
568 	return -1;
569 }
570 
571 uint8_t
572 rte_cryptodev_count(void)
573 {
574 	return cryptodev_globals.nb_devs;
575 }
576 
577 uint8_t
578 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
579 {
580 	uint8_t i, dev_count = 0;
581 
582 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
583 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
584 			cryptodev_globals.devs[i].attached ==
585 					RTE_CRYPTODEV_ATTACHED)
586 			dev_count++;
587 
588 	return dev_count;
589 }
590 
591 uint8_t
592 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
593 	uint8_t nb_devices)
594 {
595 	uint8_t i, count = 0;
596 	struct rte_cryptodev *devs = cryptodev_globals.devs;
597 
598 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
599 		if (!rte_cryptodev_is_valid_device_data(i))
600 			continue;
601 
602 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
603 			int cmp;
604 
605 			cmp = strncmp(devs[i].device->driver->name,
606 					driver_name,
607 					strlen(driver_name) + 1);
608 
609 			if (cmp == 0)
610 				devices[count++] = devs[i].data->dev_id;
611 		}
612 	}
613 
614 	return count;
615 }
616 
617 void *
618 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
619 {
620 	if (rte_crypto_devices[dev_id].feature_flags &
621 			RTE_CRYPTODEV_FF_SECURITY)
622 		return rte_crypto_devices[dev_id].security_ctx;
623 
624 	return NULL;
625 }
626 
627 int
628 rte_cryptodev_socket_id(uint8_t dev_id)
629 {
630 	struct rte_cryptodev *dev;
631 
632 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
633 		return -1;
634 
635 	dev = rte_cryptodev_pmd_get_dev(dev_id);
636 
637 	return dev->data->socket_id;
638 }
639 
640 static inline int
641 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
642 		int socket_id)
643 {
644 	char mz_name[RTE_MEMZONE_NAMESIZE];
645 	const struct rte_memzone *mz;
646 	int n;
647 
648 	/* generate memzone name */
649 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
650 	if (n >= (int)sizeof(mz_name))
651 		return -EINVAL;
652 
653 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
654 		mz = rte_memzone_reserve(mz_name,
655 				sizeof(struct rte_cryptodev_data),
656 				socket_id, 0);
657 	} else
658 		mz = rte_memzone_lookup(mz_name);
659 
660 	if (mz == NULL)
661 		return -ENOMEM;
662 
663 	*data = mz->addr;
664 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
665 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
666 
667 	return 0;
668 }
669 
670 static inline int
671 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
672 {
673 	char mz_name[RTE_MEMZONE_NAMESIZE];
674 	const struct rte_memzone *mz;
675 	int n;
676 
677 	/* generate memzone name */
678 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
679 	if (n >= (int)sizeof(mz_name))
680 		return -EINVAL;
681 
682 	mz = rte_memzone_lookup(mz_name);
683 	if (mz == NULL)
684 		return -ENOMEM;
685 
686 	RTE_ASSERT(*data == mz->addr);
687 	*data = NULL;
688 
689 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
690 		return rte_memzone_free(mz);
691 
692 	return 0;
693 }
694 
695 static uint8_t
696 rte_cryptodev_find_free_device_index(void)
697 {
698 	uint8_t dev_id;
699 
700 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
701 		if (rte_crypto_devices[dev_id].attached ==
702 				RTE_CRYPTODEV_DETACHED)
703 			return dev_id;
704 	}
705 	return RTE_CRYPTO_MAX_DEVS;
706 }
707 
708 struct rte_cryptodev *
709 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
710 {
711 	struct rte_cryptodev *cryptodev;
712 	uint8_t dev_id;
713 
714 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
715 		CDEV_LOG_ERR("Crypto device with name %s already "
716 				"allocated!", name);
717 		return NULL;
718 	}
719 
720 	dev_id = rte_cryptodev_find_free_device_index();
721 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
722 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
723 		return NULL;
724 	}
725 
726 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
727 
728 	if (cryptodev->data == NULL) {
729 		struct rte_cryptodev_data **cryptodev_data =
730 				&cryptodev_globals.data[dev_id];
731 
732 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
733 				socket_id);
734 
735 		if (retval < 0 || *cryptodev_data == NULL)
736 			return NULL;
737 
738 		cryptodev->data = *cryptodev_data;
739 
740 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
741 			strlcpy(cryptodev->data->name, name,
742 				RTE_CRYPTODEV_NAME_MAX_LEN);
743 
744 			cryptodev->data->dev_id = dev_id;
745 			cryptodev->data->socket_id = socket_id;
746 			cryptodev->data->dev_started = 0;
747 		}
748 
749 		/* init user callbacks */
750 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
751 
752 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
753 
754 		cryptodev_globals.nb_devs++;
755 	}
756 
757 	return cryptodev;
758 }
759 
760 int
761 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
762 {
763 	int ret;
764 	uint8_t dev_id;
765 
766 	if (cryptodev == NULL)
767 		return -EINVAL;
768 
769 	dev_id = cryptodev->data->dev_id;
770 
771 	/* Close device only if device operations have been set */
772 	if (cryptodev->dev_ops) {
773 		ret = rte_cryptodev_close(dev_id);
774 		if (ret < 0)
775 			return ret;
776 	}
777 
778 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
779 	if (ret < 0)
780 		return ret;
781 
782 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
783 	cryptodev_globals.nb_devs--;
784 	return 0;
785 }
786 
787 uint16_t
788 rte_cryptodev_queue_pair_count(uint8_t dev_id)
789 {
790 	struct rte_cryptodev *dev;
791 
792 	dev = &rte_crypto_devices[dev_id];
793 	return dev->data->nb_queue_pairs;
794 }
795 
796 static int
797 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
798 		int socket_id)
799 {
800 	struct rte_cryptodev_info dev_info;
801 	void **qp;
802 	unsigned i;
803 
804 	if ((dev == NULL) || (nb_qpairs < 1)) {
805 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
806 							dev, nb_qpairs);
807 		return -EINVAL;
808 	}
809 
810 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
811 			nb_qpairs, dev->data->dev_id);
812 
813 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
814 
815 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
816 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
817 
818 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
819 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
820 				nb_qpairs, dev->data->dev_id);
821 	    return -EINVAL;
822 	}
823 
824 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
825 		dev->data->queue_pairs = rte_zmalloc_socket(
826 				"cryptodev->queue_pairs",
827 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
828 				RTE_CACHE_LINE_SIZE, socket_id);
829 
830 		if (dev->data->queue_pairs == NULL) {
831 			dev->data->nb_queue_pairs = 0;
832 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
833 							"nb_queues %u",
834 							nb_qpairs);
835 			return -(ENOMEM);
836 		}
837 	} else { /* re-configure */
838 		int ret;
839 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
840 
841 		qp = dev->data->queue_pairs;
842 
843 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
844 				-ENOTSUP);
845 
846 		for (i = nb_qpairs; i < old_nb_queues; i++) {
847 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
848 			if (ret < 0)
849 				return ret;
850 		}
851 
852 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
853 				RTE_CACHE_LINE_SIZE);
854 		if (qp == NULL) {
855 			CDEV_LOG_ERR("failed to realloc qp meta data,"
856 						" nb_queues %u", nb_qpairs);
857 			return -(ENOMEM);
858 		}
859 
860 		if (nb_qpairs > old_nb_queues) {
861 			uint16_t new_qs = nb_qpairs - old_nb_queues;
862 
863 			memset(qp + old_nb_queues, 0,
864 				sizeof(qp[0]) * new_qs);
865 		}
866 
867 		dev->data->queue_pairs = qp;
868 
869 	}
870 	dev->data->nb_queue_pairs = nb_qpairs;
871 	return 0;
872 }
873 
874 int
875 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
876 {
877 	struct rte_cryptodev *dev;
878 	int diag;
879 
880 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
881 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
882 		return -EINVAL;
883 	}
884 
885 	dev = &rte_crypto_devices[dev_id];
886 
887 	if (dev->data->dev_started) {
888 		CDEV_LOG_ERR(
889 		    "device %d must be stopped to allow configuration", dev_id);
890 		return -EBUSY;
891 	}
892 
893 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
894 
895 	/* Setup new number of queue pairs and reconfigure device. */
896 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
897 			config->socket_id);
898 	if (diag != 0) {
899 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
900 				dev_id, diag);
901 		return diag;
902 	}
903 
904 	return (*dev->dev_ops->dev_configure)(dev, config);
905 }
906 
907 
908 int
909 rte_cryptodev_start(uint8_t dev_id)
910 {
911 	struct rte_cryptodev *dev;
912 	int diag;
913 
914 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
915 
916 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
917 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
918 		return -EINVAL;
919 	}
920 
921 	dev = &rte_crypto_devices[dev_id];
922 
923 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
924 
925 	if (dev->data->dev_started != 0) {
926 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
927 			dev_id);
928 		return 0;
929 	}
930 
931 	diag = (*dev->dev_ops->dev_start)(dev);
932 	if (diag == 0)
933 		dev->data->dev_started = 1;
934 	else
935 		return diag;
936 
937 	return 0;
938 }
939 
940 void
941 rte_cryptodev_stop(uint8_t dev_id)
942 {
943 	struct rte_cryptodev *dev;
944 
945 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
946 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
947 		return;
948 	}
949 
950 	dev = &rte_crypto_devices[dev_id];
951 
952 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
953 
954 	if (dev->data->dev_started == 0) {
955 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
956 			dev_id);
957 		return;
958 	}
959 
960 	(*dev->dev_ops->dev_stop)(dev);
961 	dev->data->dev_started = 0;
962 }
963 
964 int
965 rte_cryptodev_close(uint8_t dev_id)
966 {
967 	struct rte_cryptodev *dev;
968 	int retval;
969 
970 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
971 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
972 		return -1;
973 	}
974 
975 	dev = &rte_crypto_devices[dev_id];
976 
977 	/* Device must be stopped before it can be closed */
978 	if (dev->data->dev_started == 1) {
979 		CDEV_LOG_ERR("Device %u must be stopped before closing",
980 				dev_id);
981 		return -EBUSY;
982 	}
983 
984 	/* We can't close the device if there are outstanding sessions in use */
985 	if (dev->data->session_pool != NULL) {
986 		if (!rte_mempool_full(dev->data->session_pool)) {
987 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
988 					"has sessions still in use, free "
989 					"all sessions before calling close",
990 					(unsigned)dev_id);
991 			return -EBUSY;
992 		}
993 	}
994 
995 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
996 	retval = (*dev->dev_ops->dev_close)(dev);
997 
998 	if (retval < 0)
999 		return retval;
1000 
1001 	return 0;
1002 }
1003 
1004 int
1005 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1006 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1007 
1008 {
1009 	struct rte_cryptodev *dev;
1010 
1011 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1012 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1013 		return -EINVAL;
1014 	}
1015 
1016 	dev = &rte_crypto_devices[dev_id];
1017 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1018 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1019 		return -EINVAL;
1020 	}
1021 
1022 	if (!qp_conf) {
1023 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1024 		return -EINVAL;
1025 	}
1026 
1027 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1028 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1029 		CDEV_LOG_ERR("Invalid mempools\n");
1030 		return -EINVAL;
1031 	}
1032 
1033 	if (qp_conf->mp_session) {
1034 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1035 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1036 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1037 		struct rte_cryptodev_sym_session s = {0};
1038 
1039 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1040 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1041 				sizeof(*pool_priv)) {
1042 			CDEV_LOG_ERR("Invalid mempool\n");
1043 			return -EINVAL;
1044 		}
1045 
1046 		s.nb_drivers = pool_priv->nb_drivers;
1047 		s.user_data_sz = pool_priv->user_data_sz;
1048 
1049 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1050 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1051 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1052 				obj_priv_size) {
1053 			CDEV_LOG_ERR("Invalid mempool\n");
1054 			return -EINVAL;
1055 		}
1056 	}
1057 
1058 	if (dev->data->dev_started) {
1059 		CDEV_LOG_ERR(
1060 		    "device %d must be stopped to allow configuration", dev_id);
1061 		return -EBUSY;
1062 	}
1063 
1064 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1065 
1066 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1067 			socket_id);
1068 }
1069 
1070 
1071 int
1072 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1073 {
1074 	struct rte_cryptodev *dev;
1075 
1076 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1077 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1078 		return -ENODEV;
1079 	}
1080 
1081 	if (stats == NULL) {
1082 		CDEV_LOG_ERR("Invalid stats ptr");
1083 		return -EINVAL;
1084 	}
1085 
1086 	dev = &rte_crypto_devices[dev_id];
1087 	memset(stats, 0, sizeof(*stats));
1088 
1089 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1090 	(*dev->dev_ops->stats_get)(dev, stats);
1091 	return 0;
1092 }
1093 
1094 void
1095 rte_cryptodev_stats_reset(uint8_t dev_id)
1096 {
1097 	struct rte_cryptodev *dev;
1098 
1099 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1100 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1101 		return;
1102 	}
1103 
1104 	dev = &rte_crypto_devices[dev_id];
1105 
1106 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1107 	(*dev->dev_ops->stats_reset)(dev);
1108 }
1109 
1110 
1111 void
1112 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1113 {
1114 	struct rte_cryptodev *dev;
1115 
1116 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1117 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1118 		return;
1119 	}
1120 
1121 	dev = &rte_crypto_devices[dev_id];
1122 
1123 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1124 
1125 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1126 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1127 
1128 	dev_info->driver_name = dev->device->driver->name;
1129 	dev_info->device = dev->device;
1130 }
1131 
1132 
1133 int
1134 rte_cryptodev_callback_register(uint8_t dev_id,
1135 			enum rte_cryptodev_event_type event,
1136 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1137 {
1138 	struct rte_cryptodev *dev;
1139 	struct rte_cryptodev_callback *user_cb;
1140 
1141 	if (!cb_fn)
1142 		return -EINVAL;
1143 
1144 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1145 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1146 		return -EINVAL;
1147 	}
1148 
1149 	dev = &rte_crypto_devices[dev_id];
1150 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1151 
1152 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1153 		if (user_cb->cb_fn == cb_fn &&
1154 			user_cb->cb_arg == cb_arg &&
1155 			user_cb->event == event) {
1156 			break;
1157 		}
1158 	}
1159 
1160 	/* create a new callback. */
1161 	if (user_cb == NULL) {
1162 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1163 				sizeof(struct rte_cryptodev_callback), 0);
1164 		if (user_cb != NULL) {
1165 			user_cb->cb_fn = cb_fn;
1166 			user_cb->cb_arg = cb_arg;
1167 			user_cb->event = event;
1168 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1169 		}
1170 	}
1171 
1172 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1173 	return (user_cb == NULL) ? -ENOMEM : 0;
1174 }
1175 
1176 int
1177 rte_cryptodev_callback_unregister(uint8_t dev_id,
1178 			enum rte_cryptodev_event_type event,
1179 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1180 {
1181 	int ret;
1182 	struct rte_cryptodev *dev;
1183 	struct rte_cryptodev_callback *cb, *next;
1184 
1185 	if (!cb_fn)
1186 		return -EINVAL;
1187 
1188 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1189 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1190 		return -EINVAL;
1191 	}
1192 
1193 	dev = &rte_crypto_devices[dev_id];
1194 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1195 
1196 	ret = 0;
1197 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1198 
1199 		next = TAILQ_NEXT(cb, next);
1200 
1201 		if (cb->cb_fn != cb_fn || cb->event != event ||
1202 				(cb->cb_arg != (void *)-1 &&
1203 				cb->cb_arg != cb_arg))
1204 			continue;
1205 
1206 		/*
1207 		 * if this callback is not executing right now,
1208 		 * then remove it.
1209 		 */
1210 		if (cb->active == 0) {
1211 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1212 			rte_free(cb);
1213 		} else {
1214 			ret = -EAGAIN;
1215 		}
1216 	}
1217 
1218 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1219 	return ret;
1220 }
1221 
1222 void
1223 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1224 	enum rte_cryptodev_event_type event)
1225 {
1226 	struct rte_cryptodev_callback *cb_lst;
1227 	struct rte_cryptodev_callback dev_cb;
1228 
1229 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1230 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1231 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1232 			continue;
1233 		dev_cb = *cb_lst;
1234 		cb_lst->active = 1;
1235 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1236 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1237 						dev_cb.cb_arg);
1238 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1239 		cb_lst->active = 0;
1240 	}
1241 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1242 }
1243 
1244 
1245 int
1246 rte_cryptodev_sym_session_init(uint8_t dev_id,
1247 		struct rte_cryptodev_sym_session *sess,
1248 		struct rte_crypto_sym_xform *xforms,
1249 		struct rte_mempool *mp)
1250 {
1251 	struct rte_cryptodev *dev;
1252 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1253 			dev_id);
1254 	uint8_t index;
1255 	int ret;
1256 
1257 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1258 
1259 	if (sess == NULL || xforms == NULL || dev == NULL)
1260 		return -EINVAL;
1261 
1262 	if (mp->elt_size < sess_priv_sz)
1263 		return -EINVAL;
1264 
1265 	index = dev->driver_id;
1266 	if (index >= sess->nb_drivers)
1267 		return -EINVAL;
1268 
1269 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1270 
1271 	if (sess->sess_data[index].refcnt == 0) {
1272 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1273 							sess, mp);
1274 		if (ret < 0) {
1275 			CDEV_LOG_ERR(
1276 				"dev_id %d failed to configure session details",
1277 				dev_id);
1278 			return ret;
1279 		}
1280 	}
1281 
1282 	sess->sess_data[index].refcnt++;
1283 	return 0;
1284 }
1285 
1286 int
1287 rte_cryptodev_asym_session_init(uint8_t dev_id,
1288 		struct rte_cryptodev_asym_session *sess,
1289 		struct rte_crypto_asym_xform *xforms,
1290 		struct rte_mempool *mp)
1291 {
1292 	struct rte_cryptodev *dev;
1293 	uint8_t index;
1294 	int ret;
1295 
1296 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1297 
1298 	if (sess == NULL || xforms == NULL || dev == NULL)
1299 		return -EINVAL;
1300 
1301 	index = dev->driver_id;
1302 
1303 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1304 				-ENOTSUP);
1305 
1306 	if (sess->sess_private_data[index] == NULL) {
1307 		ret = dev->dev_ops->asym_session_configure(dev,
1308 							xforms,
1309 							sess, mp);
1310 		if (ret < 0) {
1311 			CDEV_LOG_ERR(
1312 				"dev_id %d failed to configure session details",
1313 				dev_id);
1314 			return ret;
1315 		}
1316 	}
1317 
1318 	return 0;
1319 }
1320 
1321 struct rte_mempool *
1322 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1323 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1324 	int socket_id)
1325 {
1326 	struct rte_mempool *mp;
1327 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1328 	uint32_t obj_sz;
1329 
1330 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1331 	if (obj_sz > elt_size)
1332 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1333 				obj_sz);
1334 	else
1335 		obj_sz = elt_size;
1336 
1337 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1338 			(uint32_t)(sizeof(*pool_priv)),
1339 			NULL, NULL, NULL, NULL,
1340 			socket_id, 0);
1341 	if (mp == NULL) {
1342 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1343 			__func__, name, rte_errno);
1344 		return NULL;
1345 	}
1346 
1347 	pool_priv = rte_mempool_get_priv(mp);
1348 	if (!pool_priv) {
1349 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1350 			__func__, name);
1351 		rte_mempool_free(mp);
1352 		return NULL;
1353 	}
1354 
1355 	pool_priv->nb_drivers = nb_drivers;
1356 	pool_priv->user_data_sz = user_data_size;
1357 
1358 	return mp;
1359 }
1360 
1361 static unsigned int
1362 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1363 {
1364 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1365 			sess->user_data_sz;
1366 }
1367 
1368 struct rte_cryptodev_sym_session *
1369 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1370 {
1371 	struct rte_cryptodev_sym_session *sess;
1372 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1373 
1374 	if (!mp) {
1375 		CDEV_LOG_ERR("Invalid mempool\n");
1376 		return NULL;
1377 	}
1378 
1379 	pool_priv = rte_mempool_get_priv(mp);
1380 
1381 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1382 		CDEV_LOG_ERR("Invalid mempool\n");
1383 		return NULL;
1384 	}
1385 
1386 	/* Allocate a session structure from the session pool */
1387 	if (rte_mempool_get(mp, (void **)&sess)) {
1388 		CDEV_LOG_ERR("couldn't get object from session mempool");
1389 		return NULL;
1390 	}
1391 
1392 	sess->nb_drivers = pool_priv->nb_drivers;
1393 	sess->user_data_sz = pool_priv->user_data_sz;
1394 	sess->opaque_data = 0;
1395 
1396 	/* Clear device session pointer.
1397 	 * Include the flag indicating presence of user data
1398 	 */
1399 	memset(sess->sess_data, 0,
1400 			rte_cryptodev_sym_session_data_size(sess));
1401 
1402 	return sess;
1403 }
1404 
1405 struct rte_cryptodev_asym_session *
1406 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1407 {
1408 	struct rte_cryptodev_asym_session *sess;
1409 
1410 	/* Allocate a session structure from the session pool */
1411 	if (rte_mempool_get(mp, (void **)&sess)) {
1412 		CDEV_LOG_ERR("couldn't get object from session mempool");
1413 		return NULL;
1414 	}
1415 
1416 	/* Clear device session pointer.
1417 	 * Include the flag indicating presence of private data
1418 	 */
1419 	memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1420 
1421 	return sess;
1422 }
1423 
1424 int
1425 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1426 		struct rte_cryptodev_sym_session *sess)
1427 {
1428 	struct rte_cryptodev *dev;
1429 	uint8_t driver_id;
1430 
1431 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1432 
1433 	if (dev == NULL || sess == NULL)
1434 		return -EINVAL;
1435 
1436 	driver_id = dev->driver_id;
1437 	if (sess->sess_data[driver_id].refcnt == 0)
1438 		return 0;
1439 	if (--sess->sess_data[driver_id].refcnt != 0)
1440 		return -EBUSY;
1441 
1442 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1443 
1444 	dev->dev_ops->sym_session_clear(dev, sess);
1445 
1446 	return 0;
1447 }
1448 
1449 int
1450 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1451 		struct rte_cryptodev_asym_session *sess)
1452 {
1453 	struct rte_cryptodev *dev;
1454 
1455 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1456 
1457 	if (dev == NULL || sess == NULL)
1458 		return -EINVAL;
1459 
1460 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1461 
1462 	dev->dev_ops->asym_session_clear(dev, sess);
1463 
1464 	return 0;
1465 }
1466 
1467 int
1468 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1469 {
1470 	uint8_t i;
1471 	struct rte_mempool *sess_mp;
1472 
1473 	if (sess == NULL)
1474 		return -EINVAL;
1475 
1476 	/* Check that all device private data has been freed */
1477 	for (i = 0; i < sess->nb_drivers; i++) {
1478 		if (sess->sess_data[i].refcnt != 0)
1479 			return -EBUSY;
1480 	}
1481 
1482 	/* Return session to mempool */
1483 	sess_mp = rte_mempool_from_obj(sess);
1484 	rte_mempool_put(sess_mp, sess);
1485 
1486 	return 0;
1487 }
1488 
1489 int
1490 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1491 {
1492 	uint8_t i;
1493 	void *sess_priv;
1494 	struct rte_mempool *sess_mp;
1495 
1496 	if (sess == NULL)
1497 		return -EINVAL;
1498 
1499 	/* Check that all device private data has been freed */
1500 	for (i = 0; i < nb_drivers; i++) {
1501 		sess_priv = get_asym_session_private_data(sess, i);
1502 		if (sess_priv != NULL)
1503 			return -EBUSY;
1504 	}
1505 
1506 	/* Return session to mempool */
1507 	sess_mp = rte_mempool_from_obj(sess);
1508 	rte_mempool_put(sess_mp, sess);
1509 
1510 	return 0;
1511 }
1512 
1513 unsigned int
1514 rte_cryptodev_sym_get_header_session_size(void)
1515 {
1516 	/*
1517 	 * Header contains pointers to the private data of all registered
1518 	 * drivers and all necessary information to ensure safely clear
1519 	 * or free al session.
1520 	 */
1521 	struct rte_cryptodev_sym_session s = {0};
1522 
1523 	s.nb_drivers = nb_drivers;
1524 
1525 	return (unsigned int)(sizeof(s) +
1526 			rte_cryptodev_sym_session_data_size(&s));
1527 }
1528 
1529 unsigned int
1530 rte_cryptodev_sym_get_existing_header_session_size(
1531 		struct rte_cryptodev_sym_session *sess)
1532 {
1533 	if (!sess)
1534 		return 0;
1535 	else
1536 		return (unsigned int)(sizeof(*sess) +
1537 				rte_cryptodev_sym_session_data_size(sess));
1538 }
1539 
1540 unsigned int
1541 rte_cryptodev_asym_get_header_session_size(void)
1542 {
1543 	/*
1544 	 * Header contains pointers to the private data
1545 	 * of all registered drivers, and a flag which
1546 	 * indicates presence of private data
1547 	 */
1548 	return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1549 }
1550 
1551 unsigned int
1552 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1553 {
1554 	struct rte_cryptodev *dev;
1555 	unsigned int priv_sess_size;
1556 
1557 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1558 		return 0;
1559 
1560 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1561 
1562 	if (*dev->dev_ops->sym_session_get_size == NULL)
1563 		return 0;
1564 
1565 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1566 
1567 	return priv_sess_size;
1568 }
1569 
1570 unsigned int
1571 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1572 {
1573 	struct rte_cryptodev *dev;
1574 	unsigned int header_size = sizeof(void *) * nb_drivers;
1575 	unsigned int priv_sess_size;
1576 
1577 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1578 		return 0;
1579 
1580 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1581 
1582 	if (*dev->dev_ops->asym_session_get_size == NULL)
1583 		return 0;
1584 
1585 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1586 	if (priv_sess_size < header_size)
1587 		return header_size;
1588 
1589 	return priv_sess_size;
1590 
1591 }
1592 
1593 int
1594 rte_cryptodev_sym_session_set_user_data(
1595 					struct rte_cryptodev_sym_session *sess,
1596 					void *data,
1597 					uint16_t size)
1598 {
1599 	if (sess == NULL)
1600 		return -EINVAL;
1601 
1602 	if (sess->user_data_sz < size)
1603 		return -ENOMEM;
1604 
1605 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1606 	return 0;
1607 }
1608 
1609 void *
1610 rte_cryptodev_sym_session_get_user_data(
1611 					struct rte_cryptodev_sym_session *sess)
1612 {
1613 	if (sess == NULL || sess->user_data_sz == 0)
1614 		return NULL;
1615 
1616 	return (void *)(sess->sess_data + sess->nb_drivers);
1617 }
1618 
1619 /** Initialise rte_crypto_op mempool element */
1620 static void
1621 rte_crypto_op_init(struct rte_mempool *mempool,
1622 		void *opaque_arg,
1623 		void *_op_data,
1624 		__rte_unused unsigned i)
1625 {
1626 	struct rte_crypto_op *op = _op_data;
1627 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1628 
1629 	memset(_op_data, 0, mempool->elt_size);
1630 
1631 	__rte_crypto_op_reset(op, type);
1632 
1633 	op->phys_addr = rte_mem_virt2iova(_op_data);
1634 	op->mempool = mempool;
1635 }
1636 
1637 
1638 struct rte_mempool *
1639 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1640 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1641 		int socket_id)
1642 {
1643 	struct rte_crypto_op_pool_private *priv;
1644 
1645 	unsigned elt_size = sizeof(struct rte_crypto_op) +
1646 			priv_size;
1647 
1648 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1649 		elt_size += sizeof(struct rte_crypto_sym_op);
1650 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1651 		elt_size += sizeof(struct rte_crypto_asym_op);
1652 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1653 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1654 		                    sizeof(struct rte_crypto_asym_op));
1655 	} else {
1656 		CDEV_LOG_ERR("Invalid op_type\n");
1657 		return NULL;
1658 	}
1659 
1660 	/* lookup mempool in case already allocated */
1661 	struct rte_mempool *mp = rte_mempool_lookup(name);
1662 
1663 	if (mp != NULL) {
1664 		priv = (struct rte_crypto_op_pool_private *)
1665 				rte_mempool_get_priv(mp);
1666 
1667 		if (mp->elt_size != elt_size ||
1668 				mp->cache_size < cache_size ||
1669 				mp->size < nb_elts ||
1670 				priv->priv_size <  priv_size) {
1671 			mp = NULL;
1672 			CDEV_LOG_ERR("Mempool %s already exists but with "
1673 					"incompatible parameters", name);
1674 			return NULL;
1675 		}
1676 		return mp;
1677 	}
1678 
1679 	mp = rte_mempool_create(
1680 			name,
1681 			nb_elts,
1682 			elt_size,
1683 			cache_size,
1684 			sizeof(struct rte_crypto_op_pool_private),
1685 			NULL,
1686 			NULL,
1687 			rte_crypto_op_init,
1688 			&type,
1689 			socket_id,
1690 			0);
1691 
1692 	if (mp == NULL) {
1693 		CDEV_LOG_ERR("Failed to create mempool %s", name);
1694 		return NULL;
1695 	}
1696 
1697 	priv = (struct rte_crypto_op_pool_private *)
1698 			rte_mempool_get_priv(mp);
1699 
1700 	priv->priv_size = priv_size;
1701 	priv->type = type;
1702 
1703 	return mp;
1704 }
1705 
1706 int
1707 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1708 {
1709 	struct rte_cryptodev *dev = NULL;
1710 	uint32_t i = 0;
1711 
1712 	if (name == NULL)
1713 		return -EINVAL;
1714 
1715 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1716 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1717 				"%s_%u", dev_name_prefix, i);
1718 
1719 		if (ret < 0)
1720 			return ret;
1721 
1722 		dev = rte_cryptodev_pmd_get_named_dev(name);
1723 		if (!dev)
1724 			return 0;
1725 	}
1726 
1727 	return -1;
1728 }
1729 
1730 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1731 
1732 static struct cryptodev_driver_list cryptodev_driver_list =
1733 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1734 
1735 int
1736 rte_cryptodev_driver_id_get(const char *name)
1737 {
1738 	struct cryptodev_driver *driver;
1739 	const char *driver_name;
1740 
1741 	if (name == NULL) {
1742 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1743 		return -1;
1744 	}
1745 
1746 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1747 		driver_name = driver->driver->name;
1748 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1749 			return driver->id;
1750 	}
1751 	return -1;
1752 }
1753 
1754 const char *
1755 rte_cryptodev_name_get(uint8_t dev_id)
1756 {
1757 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1758 
1759 	if (dev == NULL)
1760 		return NULL;
1761 
1762 	return dev->data->name;
1763 }
1764 
1765 const char *
1766 rte_cryptodev_driver_name_get(uint8_t driver_id)
1767 {
1768 	struct cryptodev_driver *driver;
1769 
1770 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1771 		if (driver->id == driver_id)
1772 			return driver->driver->name;
1773 	return NULL;
1774 }
1775 
1776 uint8_t
1777 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1778 		const struct rte_driver *drv)
1779 {
1780 	crypto_drv->driver = drv;
1781 	crypto_drv->id = nb_drivers;
1782 
1783 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1784 
1785 	return nb_drivers++;
1786 }
1787