1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2017 Intel Corporation
3  */
4 
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16 
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 
44 static uint8_t nb_drivers;
45 
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
47 
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
49 
50 static struct rte_cryptodev_global cryptodev_globals = {
51 		.devs			= rte_crypto_devices,
52 		.data			= { NULL },
53 		.nb_devs		= 0
54 };
55 
56 /* spinlock for crypto device callbacks */
57 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
58 
59 
60 /**
61  * The user application callback description.
62  *
63  * It contains callback address to be registered by user application,
64  * the pointer to the parameters for callback, and the event type.
65  */
66 struct rte_cryptodev_callback {
67 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
68 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
69 	void *cb_arg;				/**< Parameter for callback */
70 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
71 	uint32_t active;			/**< Callback is executing */
72 };
73 
74 /**
75  * The crypto cipher algorithm strings identifiers.
76  * It could be used in application command line.
77  */
78 const char *
79 rte_crypto_cipher_algorithm_strings[] = {
80 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
81 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
82 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
83 
84 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
85 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
86 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
87 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
88 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
89 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
90 
91 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
92 
93 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
94 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
95 
96 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
97 
98 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
99 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
100 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
101 };
102 
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
110 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
111 };
112 
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * It could be used in application command line.
116  */
117 const char *
118 rte_crypto_auth_algorithm_strings[] = {
119 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
120 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
121 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
122 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
123 
124 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
125 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
126 
127 	[RTE_CRYPTO_AUTH_NULL]		= "null",
128 
129 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
130 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
131 
132 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
133 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
134 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
135 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
136 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
137 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
138 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
139 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
140 
141 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
142 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
143 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
144 };
145 
146 /**
147  * The crypto AEAD algorithm strings identifiers.
148  * It could be used in application command line.
149  */
150 const char *
151 rte_crypto_aead_algorithm_strings[] = {
152 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
153 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
154 };
155 
156 /**
157  * The crypto AEAD operation strings identifiers.
158  * It could be used in application command line.
159  */
160 const char *
161 rte_crypto_aead_operation_strings[] = {
162 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
163 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
164 };
165 
166 /**
167  * Asymmetric crypto transform operation strings identifiers.
168  */
169 const char *rte_crypto_asym_xform_strings[] = {
170 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
171 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
172 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
173 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
174 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
175 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
176 };
177 
178 /**
179  * Asymmetric crypto operation strings identifiers.
180  */
181 const char *rte_crypto_asym_op_strings[] = {
182 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
183 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
184 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
185 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify",
186 	[RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]	= "priv_key_generate",
187 	[RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
188 	[RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
189 };
190 
191 /**
192  * The private data structure stored in the session mempool private data.
193  */
194 struct rte_cryptodev_sym_session_pool_private_data {
195 	uint16_t nb_drivers;
196 	/**< number of elements in sess_data array */
197 	uint16_t user_data_sz;
198 	/**< session user data will be placed after sess_data */
199 };
200 
201 int
202 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
203 		const char *algo_string)
204 {
205 	unsigned int i;
206 
207 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
208 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
209 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
210 			return 0;
211 		}
212 	}
213 
214 	/* Invalid string */
215 	return -1;
216 }
217 
218 int
219 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
220 		const char *algo_string)
221 {
222 	unsigned int i;
223 
224 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
225 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
226 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
227 			return 0;
228 		}
229 	}
230 
231 	/* Invalid string */
232 	return -1;
233 }
234 
235 int
236 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
237 		const char *algo_string)
238 {
239 	unsigned int i;
240 
241 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
242 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
243 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
244 			return 0;
245 		}
246 	}
247 
248 	/* Invalid string */
249 	return -1;
250 }
251 
252 int
253 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
254 		const char *xform_string)
255 {
256 	unsigned int i;
257 
258 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
259 		if (strcmp(xform_string,
260 			rte_crypto_asym_xform_strings[i]) == 0) {
261 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
262 			return 0;
263 		}
264 	}
265 
266 	/* Invalid string */
267 	return -1;
268 }
269 
270 /**
271  * The crypto auth operation strings identifiers.
272  * It could be used in application command line.
273  */
274 const char *
275 rte_crypto_auth_operation_strings[] = {
276 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
277 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
278 };
279 
280 const struct rte_cryptodev_symmetric_capability *
281 rte_cryptodev_sym_capability_get(uint8_t dev_id,
282 		const struct rte_cryptodev_sym_capability_idx *idx)
283 {
284 	const struct rte_cryptodev_capabilities *capability;
285 	struct rte_cryptodev_info dev_info;
286 	int i = 0;
287 
288 	rte_cryptodev_info_get(dev_id, &dev_info);
289 
290 	while ((capability = &dev_info.capabilities[i++])->op !=
291 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
292 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
293 			continue;
294 
295 		if (capability->sym.xform_type != idx->type)
296 			continue;
297 
298 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
299 			capability->sym.auth.algo == idx->algo.auth)
300 			return &capability->sym;
301 
302 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
303 			capability->sym.cipher.algo == idx->algo.cipher)
304 			return &capability->sym;
305 
306 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
307 				capability->sym.aead.algo == idx->algo.aead)
308 			return &capability->sym;
309 	}
310 
311 	return NULL;
312 
313 }
314 
315 static int
316 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
317 {
318 	unsigned int next_size;
319 
320 	/* Check lower/upper bounds */
321 	if (size < range->min)
322 		return -1;
323 
324 	if (size > range->max)
325 		return -1;
326 
327 	/* If range is actually only one value, size is correct */
328 	if (range->increment == 0)
329 		return 0;
330 
331 	/* Check if value is one of the supported sizes */
332 	for (next_size = range->min; next_size <= range->max;
333 			next_size += range->increment)
334 		if (size == next_size)
335 			return 0;
336 
337 	return -1;
338 }
339 
340 const struct rte_cryptodev_asymmetric_xform_capability *
341 rte_cryptodev_asym_capability_get(uint8_t dev_id,
342 		const struct rte_cryptodev_asym_capability_idx *idx)
343 {
344 	const struct rte_cryptodev_capabilities *capability;
345 	struct rte_cryptodev_info dev_info;
346 	unsigned int i = 0;
347 
348 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
349 	rte_cryptodev_info_get(dev_id, &dev_info);
350 
351 	while ((capability = &dev_info.capabilities[i++])->op !=
352 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
353 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
354 			continue;
355 
356 		if (capability->asym.xform_capa.xform_type == idx->type)
357 			return &capability->asym.xform_capa;
358 	}
359 	return NULL;
360 };
361 
362 int
363 rte_cryptodev_sym_capability_check_cipher(
364 		const struct rte_cryptodev_symmetric_capability *capability,
365 		uint16_t key_size, uint16_t iv_size)
366 {
367 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
368 		return -1;
369 
370 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
371 		return -1;
372 
373 	return 0;
374 }
375 
376 int
377 rte_cryptodev_sym_capability_check_auth(
378 		const struct rte_cryptodev_symmetric_capability *capability,
379 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
380 {
381 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
382 		return -1;
383 
384 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
385 		return -1;
386 
387 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
388 		return -1;
389 
390 	return 0;
391 }
392 
393 int
394 rte_cryptodev_sym_capability_check_aead(
395 		const struct rte_cryptodev_symmetric_capability *capability,
396 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
397 		uint16_t iv_size)
398 {
399 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
400 		return -1;
401 
402 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
403 		return -1;
404 
405 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
406 		return -1;
407 
408 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
409 		return -1;
410 
411 	return 0;
412 }
413 int
414 rte_cryptodev_asym_xform_capability_check_optype(
415 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
416 	enum rte_crypto_asym_op_type op_type)
417 {
418 	if (capability->op_types & (1 << op_type))
419 		return 1;
420 
421 	return 0;
422 }
423 
424 int
425 rte_cryptodev_asym_xform_capability_check_modlen(
426 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
427 	uint16_t modlen)
428 {
429 	/* no need to check for limits, if min or max = 0 */
430 	if (capability->modlen.min != 0) {
431 		if (modlen < capability->modlen.min)
432 			return -1;
433 	}
434 
435 	if (capability->modlen.max != 0) {
436 		if (modlen > capability->modlen.max)
437 			return -1;
438 	}
439 
440 	/* in any case, check if given modlen is module increment */
441 	if (capability->modlen.increment != 0) {
442 		if (modlen % (capability->modlen.increment))
443 			return -1;
444 	}
445 
446 	return 0;
447 }
448 
449 
450 const char *
451 rte_cryptodev_get_feature_name(uint64_t flag)
452 {
453 	switch (flag) {
454 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
455 		return "SYMMETRIC_CRYPTO";
456 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
457 		return "ASYMMETRIC_CRYPTO";
458 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
459 		return "SYM_OPERATION_CHAINING";
460 	case RTE_CRYPTODEV_FF_CPU_SSE:
461 		return "CPU_SSE";
462 	case RTE_CRYPTODEV_FF_CPU_AVX:
463 		return "CPU_AVX";
464 	case RTE_CRYPTODEV_FF_CPU_AVX2:
465 		return "CPU_AVX2";
466 	case RTE_CRYPTODEV_FF_CPU_AVX512:
467 		return "CPU_AVX512";
468 	case RTE_CRYPTODEV_FF_CPU_AESNI:
469 		return "CPU_AESNI";
470 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
471 		return "HW_ACCELERATED";
472 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
473 		return "IN_PLACE_SGL";
474 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
475 		return "OOP_SGL_IN_SGL_OUT";
476 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
477 		return "OOP_SGL_IN_LB_OUT";
478 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
479 		return "OOP_LB_IN_SGL_OUT";
480 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
481 		return "OOP_LB_IN_LB_OUT";
482 	case RTE_CRYPTODEV_FF_CPU_NEON:
483 		return "CPU_NEON";
484 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
485 		return "CPU_ARM_CE";
486 	case RTE_CRYPTODEV_FF_SECURITY:
487 		return "SECURITY_PROTOCOL";
488 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
489 		return "RSA_PRIV_OP_KEY_EXP";
490 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
491 		return "RSA_PRIV_OP_KEY_QT";
492 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
493 		return "DIGEST_ENCRYPTED";
494 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
495 		return "ASYM_SESSIONLESS";
496 	default:
497 		return NULL;
498 	}
499 }
500 
501 struct rte_cryptodev *
502 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
503 {
504 	return &cryptodev_globals.devs[dev_id];
505 }
506 
507 struct rte_cryptodev *
508 rte_cryptodev_pmd_get_named_dev(const char *name)
509 {
510 	struct rte_cryptodev *dev;
511 	unsigned int i;
512 
513 	if (name == NULL)
514 		return NULL;
515 
516 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
517 		dev = &cryptodev_globals.devs[i];
518 
519 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
520 				(strcmp(dev->data->name, name) == 0))
521 			return dev;
522 	}
523 
524 	return NULL;
525 }
526 
527 static inline uint8_t
528 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
529 {
530 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
531 			rte_crypto_devices[dev_id].data == NULL)
532 		return 0;
533 
534 	return 1;
535 }
536 
537 unsigned int
538 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
539 {
540 	struct rte_cryptodev *dev = NULL;
541 
542 	if (!rte_cryptodev_is_valid_device_data(dev_id))
543 		return 0;
544 
545 	dev = rte_cryptodev_pmd_get_dev(dev_id);
546 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
547 		return 0;
548 	else
549 		return 1;
550 }
551 
552 
553 int
554 rte_cryptodev_get_dev_id(const char *name)
555 {
556 	unsigned i;
557 
558 	if (name == NULL)
559 		return -1;
560 
561 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
562 		if (!rte_cryptodev_is_valid_device_data(i))
563 			continue;
564 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
565 				== 0) &&
566 				(cryptodev_globals.devs[i].attached ==
567 						RTE_CRYPTODEV_ATTACHED))
568 			return i;
569 	}
570 
571 	return -1;
572 }
573 
574 uint8_t
575 rte_cryptodev_count(void)
576 {
577 	return cryptodev_globals.nb_devs;
578 }
579 
580 uint8_t
581 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
582 {
583 	uint8_t i, dev_count = 0;
584 
585 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
586 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
587 			cryptodev_globals.devs[i].attached ==
588 					RTE_CRYPTODEV_ATTACHED)
589 			dev_count++;
590 
591 	return dev_count;
592 }
593 
594 uint8_t
595 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
596 	uint8_t nb_devices)
597 {
598 	uint8_t i, count = 0;
599 	struct rte_cryptodev *devs = cryptodev_globals.devs;
600 
601 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
602 		if (!rte_cryptodev_is_valid_device_data(i))
603 			continue;
604 
605 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
606 			int cmp;
607 
608 			cmp = strncmp(devs[i].device->driver->name,
609 					driver_name,
610 					strlen(driver_name) + 1);
611 
612 			if (cmp == 0)
613 				devices[count++] = devs[i].data->dev_id;
614 		}
615 	}
616 
617 	return count;
618 }
619 
620 void *
621 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
622 {
623 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
624 			(rte_crypto_devices[dev_id].feature_flags &
625 			RTE_CRYPTODEV_FF_SECURITY))
626 		return rte_crypto_devices[dev_id].security_ctx;
627 
628 	return NULL;
629 }
630 
631 int
632 rte_cryptodev_socket_id(uint8_t dev_id)
633 {
634 	struct rte_cryptodev *dev;
635 
636 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
637 		return -1;
638 
639 	dev = rte_cryptodev_pmd_get_dev(dev_id);
640 
641 	return dev->data->socket_id;
642 }
643 
644 static inline int
645 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
646 		int socket_id)
647 {
648 	char mz_name[RTE_MEMZONE_NAMESIZE];
649 	const struct rte_memzone *mz;
650 	int n;
651 
652 	/* generate memzone name */
653 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
654 	if (n >= (int)sizeof(mz_name))
655 		return -EINVAL;
656 
657 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
658 		mz = rte_memzone_reserve(mz_name,
659 				sizeof(struct rte_cryptodev_data),
660 				socket_id, 0);
661 	} else
662 		mz = rte_memzone_lookup(mz_name);
663 
664 	if (mz == NULL)
665 		return -ENOMEM;
666 
667 	*data = mz->addr;
668 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
669 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
670 
671 	return 0;
672 }
673 
674 static inline int
675 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
676 {
677 	char mz_name[RTE_MEMZONE_NAMESIZE];
678 	const struct rte_memzone *mz;
679 	int n;
680 
681 	/* generate memzone name */
682 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
683 	if (n >= (int)sizeof(mz_name))
684 		return -EINVAL;
685 
686 	mz = rte_memzone_lookup(mz_name);
687 	if (mz == NULL)
688 		return -ENOMEM;
689 
690 	RTE_ASSERT(*data == mz->addr);
691 	*data = NULL;
692 
693 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
694 		return rte_memzone_free(mz);
695 
696 	return 0;
697 }
698 
699 static uint8_t
700 rte_cryptodev_find_free_device_index(void)
701 {
702 	uint8_t dev_id;
703 
704 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
705 		if (rte_crypto_devices[dev_id].attached ==
706 				RTE_CRYPTODEV_DETACHED)
707 			return dev_id;
708 	}
709 	return RTE_CRYPTO_MAX_DEVS;
710 }
711 
712 struct rte_cryptodev *
713 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
714 {
715 	struct rte_cryptodev *cryptodev;
716 	uint8_t dev_id;
717 
718 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
719 		CDEV_LOG_ERR("Crypto device with name %s already "
720 				"allocated!", name);
721 		return NULL;
722 	}
723 
724 	dev_id = rte_cryptodev_find_free_device_index();
725 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
726 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
727 		return NULL;
728 	}
729 
730 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
731 
732 	if (cryptodev->data == NULL) {
733 		struct rte_cryptodev_data **cryptodev_data =
734 				&cryptodev_globals.data[dev_id];
735 
736 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
737 				socket_id);
738 
739 		if (retval < 0 || *cryptodev_data == NULL)
740 			return NULL;
741 
742 		cryptodev->data = *cryptodev_data;
743 
744 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
745 			strlcpy(cryptodev->data->name, name,
746 				RTE_CRYPTODEV_NAME_MAX_LEN);
747 
748 			cryptodev->data->dev_id = dev_id;
749 			cryptodev->data->socket_id = socket_id;
750 			cryptodev->data->dev_started = 0;
751 		}
752 
753 		/* init user callbacks */
754 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
755 
756 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
757 
758 		cryptodev_globals.nb_devs++;
759 	}
760 
761 	return cryptodev;
762 }
763 
764 int
765 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
766 {
767 	int ret;
768 	uint8_t dev_id;
769 
770 	if (cryptodev == NULL)
771 		return -EINVAL;
772 
773 	dev_id = cryptodev->data->dev_id;
774 
775 	/* Close device only if device operations have been set */
776 	if (cryptodev->dev_ops) {
777 		ret = rte_cryptodev_close(dev_id);
778 		if (ret < 0)
779 			return ret;
780 	}
781 
782 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
783 	if (ret < 0)
784 		return ret;
785 
786 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
787 	cryptodev_globals.nb_devs--;
788 	return 0;
789 }
790 
791 uint16_t
792 rte_cryptodev_queue_pair_count(uint8_t dev_id)
793 {
794 	struct rte_cryptodev *dev;
795 
796 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
797 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
798 		return 0;
799 	}
800 
801 	dev = &rte_crypto_devices[dev_id];
802 	return dev->data->nb_queue_pairs;
803 }
804 
805 static int
806 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
807 		int socket_id)
808 {
809 	struct rte_cryptodev_info dev_info;
810 	void **qp;
811 	unsigned i;
812 
813 	if ((dev == NULL) || (nb_qpairs < 1)) {
814 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
815 							dev, nb_qpairs);
816 		return -EINVAL;
817 	}
818 
819 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
820 			nb_qpairs, dev->data->dev_id);
821 
822 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
823 
824 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
825 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
826 
827 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
828 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
829 				nb_qpairs, dev->data->dev_id);
830 	    return -EINVAL;
831 	}
832 
833 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
834 		dev->data->queue_pairs = rte_zmalloc_socket(
835 				"cryptodev->queue_pairs",
836 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
837 				RTE_CACHE_LINE_SIZE, socket_id);
838 
839 		if (dev->data->queue_pairs == NULL) {
840 			dev->data->nb_queue_pairs = 0;
841 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
842 							"nb_queues %u",
843 							nb_qpairs);
844 			return -(ENOMEM);
845 		}
846 	} else { /* re-configure */
847 		int ret;
848 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
849 
850 		qp = dev->data->queue_pairs;
851 
852 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
853 				-ENOTSUP);
854 
855 		for (i = nb_qpairs; i < old_nb_queues; i++) {
856 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
857 			if (ret < 0)
858 				return ret;
859 		}
860 
861 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
862 				RTE_CACHE_LINE_SIZE);
863 		if (qp == NULL) {
864 			CDEV_LOG_ERR("failed to realloc qp meta data,"
865 						" nb_queues %u", nb_qpairs);
866 			return -(ENOMEM);
867 		}
868 
869 		if (nb_qpairs > old_nb_queues) {
870 			uint16_t new_qs = nb_qpairs - old_nb_queues;
871 
872 			memset(qp + old_nb_queues, 0,
873 				sizeof(qp[0]) * new_qs);
874 		}
875 
876 		dev->data->queue_pairs = qp;
877 
878 	}
879 	dev->data->nb_queue_pairs = nb_qpairs;
880 	return 0;
881 }
882 
883 int
884 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
885 {
886 	struct rte_cryptodev *dev;
887 	int diag;
888 
889 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
890 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
891 		return -EINVAL;
892 	}
893 
894 	dev = &rte_crypto_devices[dev_id];
895 
896 	if (dev->data->dev_started) {
897 		CDEV_LOG_ERR(
898 		    "device %d must be stopped to allow configuration", dev_id);
899 		return -EBUSY;
900 	}
901 
902 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
903 
904 	/* Setup new number of queue pairs and reconfigure device. */
905 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
906 			config->socket_id);
907 	if (diag != 0) {
908 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
909 				dev_id, diag);
910 		return diag;
911 	}
912 
913 	return (*dev->dev_ops->dev_configure)(dev, config);
914 }
915 
916 
917 int
918 rte_cryptodev_start(uint8_t dev_id)
919 {
920 	struct rte_cryptodev *dev;
921 	int diag;
922 
923 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
924 
925 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
926 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
927 		return -EINVAL;
928 	}
929 
930 	dev = &rte_crypto_devices[dev_id];
931 
932 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
933 
934 	if (dev->data->dev_started != 0) {
935 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
936 			dev_id);
937 		return 0;
938 	}
939 
940 	diag = (*dev->dev_ops->dev_start)(dev);
941 	if (diag == 0)
942 		dev->data->dev_started = 1;
943 	else
944 		return diag;
945 
946 	return 0;
947 }
948 
949 void
950 rte_cryptodev_stop(uint8_t dev_id)
951 {
952 	struct rte_cryptodev *dev;
953 
954 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
955 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
956 		return;
957 	}
958 
959 	dev = &rte_crypto_devices[dev_id];
960 
961 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
962 
963 	if (dev->data->dev_started == 0) {
964 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
965 			dev_id);
966 		return;
967 	}
968 
969 	(*dev->dev_ops->dev_stop)(dev);
970 	dev->data->dev_started = 0;
971 }
972 
973 int
974 rte_cryptodev_close(uint8_t dev_id)
975 {
976 	struct rte_cryptodev *dev;
977 	int retval;
978 
979 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
980 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
981 		return -1;
982 	}
983 
984 	dev = &rte_crypto_devices[dev_id];
985 
986 	/* Device must be stopped before it can be closed */
987 	if (dev->data->dev_started == 1) {
988 		CDEV_LOG_ERR("Device %u must be stopped before closing",
989 				dev_id);
990 		return -EBUSY;
991 	}
992 
993 	/* We can't close the device if there are outstanding sessions in use */
994 	if (dev->data->session_pool != NULL) {
995 		if (!rte_mempool_full(dev->data->session_pool)) {
996 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
997 					"has sessions still in use, free "
998 					"all sessions before calling close",
999 					(unsigned)dev_id);
1000 			return -EBUSY;
1001 		}
1002 	}
1003 
1004 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1005 	retval = (*dev->dev_ops->dev_close)(dev);
1006 
1007 	if (retval < 0)
1008 		return retval;
1009 
1010 	return 0;
1011 }
1012 
1013 int
1014 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1015 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1016 
1017 {
1018 	struct rte_cryptodev *dev;
1019 
1020 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1021 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1022 		return -EINVAL;
1023 	}
1024 
1025 	dev = &rte_crypto_devices[dev_id];
1026 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1027 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1028 		return -EINVAL;
1029 	}
1030 
1031 	if (!qp_conf) {
1032 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1033 		return -EINVAL;
1034 	}
1035 
1036 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1037 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1038 		CDEV_LOG_ERR("Invalid mempools\n");
1039 		return -EINVAL;
1040 	}
1041 
1042 	if (qp_conf->mp_session) {
1043 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1044 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1045 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1046 		struct rte_cryptodev_sym_session s = {0};
1047 
1048 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1049 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1050 				sizeof(*pool_priv)) {
1051 			CDEV_LOG_ERR("Invalid mempool\n");
1052 			return -EINVAL;
1053 		}
1054 
1055 		s.nb_drivers = pool_priv->nb_drivers;
1056 		s.user_data_sz = pool_priv->user_data_sz;
1057 
1058 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1059 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1060 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1061 				obj_priv_size) {
1062 			CDEV_LOG_ERR("Invalid mempool\n");
1063 			return -EINVAL;
1064 		}
1065 	}
1066 
1067 	if (dev->data->dev_started) {
1068 		CDEV_LOG_ERR(
1069 		    "device %d must be stopped to allow configuration", dev_id);
1070 		return -EBUSY;
1071 	}
1072 
1073 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1074 
1075 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1076 			socket_id);
1077 }
1078 
1079 
1080 int
1081 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1082 {
1083 	struct rte_cryptodev *dev;
1084 
1085 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1086 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1087 		return -ENODEV;
1088 	}
1089 
1090 	if (stats == NULL) {
1091 		CDEV_LOG_ERR("Invalid stats ptr");
1092 		return -EINVAL;
1093 	}
1094 
1095 	dev = &rte_crypto_devices[dev_id];
1096 	memset(stats, 0, sizeof(*stats));
1097 
1098 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1099 	(*dev->dev_ops->stats_get)(dev, stats);
1100 	return 0;
1101 }
1102 
1103 void
1104 rte_cryptodev_stats_reset(uint8_t dev_id)
1105 {
1106 	struct rte_cryptodev *dev;
1107 
1108 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1109 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1110 		return;
1111 	}
1112 
1113 	dev = &rte_crypto_devices[dev_id];
1114 
1115 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1116 	(*dev->dev_ops->stats_reset)(dev);
1117 }
1118 
1119 
1120 void
1121 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1122 {
1123 	struct rte_cryptodev *dev;
1124 
1125 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1126 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1127 		return;
1128 	}
1129 
1130 	dev = &rte_crypto_devices[dev_id];
1131 
1132 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1133 
1134 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1135 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1136 
1137 	dev_info->driver_name = dev->device->driver->name;
1138 	dev_info->device = dev->device;
1139 }
1140 
1141 
1142 int
1143 rte_cryptodev_callback_register(uint8_t dev_id,
1144 			enum rte_cryptodev_event_type event,
1145 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1146 {
1147 	struct rte_cryptodev *dev;
1148 	struct rte_cryptodev_callback *user_cb;
1149 
1150 	if (!cb_fn)
1151 		return -EINVAL;
1152 
1153 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1154 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1155 		return -EINVAL;
1156 	}
1157 
1158 	dev = &rte_crypto_devices[dev_id];
1159 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1160 
1161 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1162 		if (user_cb->cb_fn == cb_fn &&
1163 			user_cb->cb_arg == cb_arg &&
1164 			user_cb->event == event) {
1165 			break;
1166 		}
1167 	}
1168 
1169 	/* create a new callback. */
1170 	if (user_cb == NULL) {
1171 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1172 				sizeof(struct rte_cryptodev_callback), 0);
1173 		if (user_cb != NULL) {
1174 			user_cb->cb_fn = cb_fn;
1175 			user_cb->cb_arg = cb_arg;
1176 			user_cb->event = event;
1177 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1178 		}
1179 	}
1180 
1181 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1182 	return (user_cb == NULL) ? -ENOMEM : 0;
1183 }
1184 
1185 int
1186 rte_cryptodev_callback_unregister(uint8_t dev_id,
1187 			enum rte_cryptodev_event_type event,
1188 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1189 {
1190 	int ret;
1191 	struct rte_cryptodev *dev;
1192 	struct rte_cryptodev_callback *cb, *next;
1193 
1194 	if (!cb_fn)
1195 		return -EINVAL;
1196 
1197 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1198 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1199 		return -EINVAL;
1200 	}
1201 
1202 	dev = &rte_crypto_devices[dev_id];
1203 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1204 
1205 	ret = 0;
1206 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1207 
1208 		next = TAILQ_NEXT(cb, next);
1209 
1210 		if (cb->cb_fn != cb_fn || cb->event != event ||
1211 				(cb->cb_arg != (void *)-1 &&
1212 				cb->cb_arg != cb_arg))
1213 			continue;
1214 
1215 		/*
1216 		 * if this callback is not executing right now,
1217 		 * then remove it.
1218 		 */
1219 		if (cb->active == 0) {
1220 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1221 			rte_free(cb);
1222 		} else {
1223 			ret = -EAGAIN;
1224 		}
1225 	}
1226 
1227 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1228 	return ret;
1229 }
1230 
1231 void
1232 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1233 	enum rte_cryptodev_event_type event)
1234 {
1235 	struct rte_cryptodev_callback *cb_lst;
1236 	struct rte_cryptodev_callback dev_cb;
1237 
1238 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1239 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1240 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1241 			continue;
1242 		dev_cb = *cb_lst;
1243 		cb_lst->active = 1;
1244 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1245 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1246 						dev_cb.cb_arg);
1247 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1248 		cb_lst->active = 0;
1249 	}
1250 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1251 }
1252 
1253 
1254 int
1255 rte_cryptodev_sym_session_init(uint8_t dev_id,
1256 		struct rte_cryptodev_sym_session *sess,
1257 		struct rte_crypto_sym_xform *xforms,
1258 		struct rte_mempool *mp)
1259 {
1260 	struct rte_cryptodev *dev;
1261 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1262 			dev_id);
1263 	uint8_t index;
1264 	int ret;
1265 
1266 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1267 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1268 		return -EINVAL;
1269 	}
1270 
1271 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1272 
1273 	if (sess == NULL || xforms == NULL || dev == NULL)
1274 		return -EINVAL;
1275 
1276 	if (mp->elt_size < sess_priv_sz)
1277 		return -EINVAL;
1278 
1279 	index = dev->driver_id;
1280 	if (index >= sess->nb_drivers)
1281 		return -EINVAL;
1282 
1283 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1284 
1285 	if (sess->sess_data[index].refcnt == 0) {
1286 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1287 							sess, mp);
1288 		if (ret < 0) {
1289 			CDEV_LOG_ERR(
1290 				"dev_id %d failed to configure session details",
1291 				dev_id);
1292 			return ret;
1293 		}
1294 	}
1295 
1296 	sess->sess_data[index].refcnt++;
1297 	return 0;
1298 }
1299 
1300 int
1301 rte_cryptodev_asym_session_init(uint8_t dev_id,
1302 		struct rte_cryptodev_asym_session *sess,
1303 		struct rte_crypto_asym_xform *xforms,
1304 		struct rte_mempool *mp)
1305 {
1306 	struct rte_cryptodev *dev;
1307 	uint8_t index;
1308 	int ret;
1309 
1310 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1311 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1312 		return -EINVAL;
1313 	}
1314 
1315 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1316 
1317 	if (sess == NULL || xforms == NULL || dev == NULL)
1318 		return -EINVAL;
1319 
1320 	index = dev->driver_id;
1321 
1322 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1323 				-ENOTSUP);
1324 
1325 	if (sess->sess_private_data[index] == NULL) {
1326 		ret = dev->dev_ops->asym_session_configure(dev,
1327 							xforms,
1328 							sess, mp);
1329 		if (ret < 0) {
1330 			CDEV_LOG_ERR(
1331 				"dev_id %d failed to configure session details",
1332 				dev_id);
1333 			return ret;
1334 		}
1335 	}
1336 
1337 	return 0;
1338 }
1339 
1340 struct rte_mempool *
1341 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1342 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1343 	int socket_id)
1344 {
1345 	struct rte_mempool *mp;
1346 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1347 	uint32_t obj_sz;
1348 
1349 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1350 	if (obj_sz > elt_size)
1351 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1352 				obj_sz);
1353 	else
1354 		obj_sz = elt_size;
1355 
1356 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1357 			(uint32_t)(sizeof(*pool_priv)),
1358 			NULL, NULL, NULL, NULL,
1359 			socket_id, 0);
1360 	if (mp == NULL) {
1361 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1362 			__func__, name, rte_errno);
1363 		return NULL;
1364 	}
1365 
1366 	pool_priv = rte_mempool_get_priv(mp);
1367 	if (!pool_priv) {
1368 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1369 			__func__, name);
1370 		rte_mempool_free(mp);
1371 		return NULL;
1372 	}
1373 
1374 	pool_priv->nb_drivers = nb_drivers;
1375 	pool_priv->user_data_sz = user_data_size;
1376 
1377 	return mp;
1378 }
1379 
1380 static unsigned int
1381 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1382 {
1383 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1384 			sess->user_data_sz;
1385 }
1386 
1387 struct rte_cryptodev_sym_session *
1388 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1389 {
1390 	struct rte_cryptodev_sym_session *sess;
1391 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1392 
1393 	if (!mp) {
1394 		CDEV_LOG_ERR("Invalid mempool\n");
1395 		return NULL;
1396 	}
1397 
1398 	pool_priv = rte_mempool_get_priv(mp);
1399 
1400 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1401 		CDEV_LOG_ERR("Invalid mempool\n");
1402 		return NULL;
1403 	}
1404 
1405 	/* Allocate a session structure from the session pool */
1406 	if (rte_mempool_get(mp, (void **)&sess)) {
1407 		CDEV_LOG_ERR("couldn't get object from session mempool");
1408 		return NULL;
1409 	}
1410 
1411 	sess->nb_drivers = pool_priv->nb_drivers;
1412 	sess->user_data_sz = pool_priv->user_data_sz;
1413 	sess->opaque_data = 0;
1414 
1415 	/* Clear device session pointer.
1416 	 * Include the flag indicating presence of user data
1417 	 */
1418 	memset(sess->sess_data, 0,
1419 			rte_cryptodev_sym_session_data_size(sess));
1420 
1421 	return sess;
1422 }
1423 
1424 struct rte_cryptodev_asym_session *
1425 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1426 {
1427 	struct rte_cryptodev_asym_session *sess;
1428 
1429 	/* Allocate a session structure from the session pool */
1430 	if (rte_mempool_get(mp, (void **)&sess)) {
1431 		CDEV_LOG_ERR("couldn't get object from session mempool");
1432 		return NULL;
1433 	}
1434 
1435 	/* Clear device session pointer.
1436 	 * Include the flag indicating presence of private data
1437 	 */
1438 	memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1439 
1440 	return sess;
1441 }
1442 
1443 int
1444 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1445 		struct rte_cryptodev_sym_session *sess)
1446 {
1447 	struct rte_cryptodev *dev;
1448 	uint8_t driver_id;
1449 
1450 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1451 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1452 		return -EINVAL;
1453 	}
1454 
1455 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1456 
1457 	if (dev == NULL || sess == NULL)
1458 		return -EINVAL;
1459 
1460 	driver_id = dev->driver_id;
1461 	if (sess->sess_data[driver_id].refcnt == 0)
1462 		return 0;
1463 	if (--sess->sess_data[driver_id].refcnt != 0)
1464 		return -EBUSY;
1465 
1466 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1467 
1468 	dev->dev_ops->sym_session_clear(dev, sess);
1469 
1470 	return 0;
1471 }
1472 
1473 int
1474 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1475 		struct rte_cryptodev_asym_session *sess)
1476 {
1477 	struct rte_cryptodev *dev;
1478 
1479 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1480 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1481 		return -EINVAL;
1482 	}
1483 
1484 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1485 
1486 	if (dev == NULL || sess == NULL)
1487 		return -EINVAL;
1488 
1489 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1490 
1491 	dev->dev_ops->asym_session_clear(dev, sess);
1492 
1493 	return 0;
1494 }
1495 
1496 int
1497 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1498 {
1499 	uint8_t i;
1500 	struct rte_mempool *sess_mp;
1501 
1502 	if (sess == NULL)
1503 		return -EINVAL;
1504 
1505 	/* Check that all device private data has been freed */
1506 	for (i = 0; i < sess->nb_drivers; i++) {
1507 		if (sess->sess_data[i].refcnt != 0)
1508 			return -EBUSY;
1509 	}
1510 
1511 	/* Return session to mempool */
1512 	sess_mp = rte_mempool_from_obj(sess);
1513 	rte_mempool_put(sess_mp, sess);
1514 
1515 	return 0;
1516 }
1517 
1518 int
1519 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1520 {
1521 	uint8_t i;
1522 	void *sess_priv;
1523 	struct rte_mempool *sess_mp;
1524 
1525 	if (sess == NULL)
1526 		return -EINVAL;
1527 
1528 	/* Check that all device private data has been freed */
1529 	for (i = 0; i < nb_drivers; i++) {
1530 		sess_priv = get_asym_session_private_data(sess, i);
1531 		if (sess_priv != NULL)
1532 			return -EBUSY;
1533 	}
1534 
1535 	/* Return session to mempool */
1536 	sess_mp = rte_mempool_from_obj(sess);
1537 	rte_mempool_put(sess_mp, sess);
1538 
1539 	return 0;
1540 }
1541 
1542 unsigned int
1543 rte_cryptodev_sym_get_header_session_size(void)
1544 {
1545 	/*
1546 	 * Header contains pointers to the private data of all registered
1547 	 * drivers and all necessary information to ensure safely clear
1548 	 * or free al session.
1549 	 */
1550 	struct rte_cryptodev_sym_session s = {0};
1551 
1552 	s.nb_drivers = nb_drivers;
1553 
1554 	return (unsigned int)(sizeof(s) +
1555 			rte_cryptodev_sym_session_data_size(&s));
1556 }
1557 
1558 unsigned int
1559 rte_cryptodev_sym_get_existing_header_session_size(
1560 		struct rte_cryptodev_sym_session *sess)
1561 {
1562 	if (!sess)
1563 		return 0;
1564 	else
1565 		return (unsigned int)(sizeof(*sess) +
1566 				rte_cryptodev_sym_session_data_size(sess));
1567 }
1568 
1569 unsigned int
1570 rte_cryptodev_asym_get_header_session_size(void)
1571 {
1572 	/*
1573 	 * Header contains pointers to the private data
1574 	 * of all registered drivers, and a flag which
1575 	 * indicates presence of private data
1576 	 */
1577 	return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1578 }
1579 
1580 unsigned int
1581 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1582 {
1583 	struct rte_cryptodev *dev;
1584 	unsigned int priv_sess_size;
1585 
1586 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1587 		return 0;
1588 
1589 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1590 
1591 	if (*dev->dev_ops->sym_session_get_size == NULL)
1592 		return 0;
1593 
1594 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1595 
1596 	return priv_sess_size;
1597 }
1598 
1599 unsigned int
1600 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1601 {
1602 	struct rte_cryptodev *dev;
1603 	unsigned int header_size = sizeof(void *) * nb_drivers;
1604 	unsigned int priv_sess_size;
1605 
1606 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1607 		return 0;
1608 
1609 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1610 
1611 	if (*dev->dev_ops->asym_session_get_size == NULL)
1612 		return 0;
1613 
1614 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1615 	if (priv_sess_size < header_size)
1616 		return header_size;
1617 
1618 	return priv_sess_size;
1619 
1620 }
1621 
1622 int
1623 rte_cryptodev_sym_session_set_user_data(
1624 					struct rte_cryptodev_sym_session *sess,
1625 					void *data,
1626 					uint16_t size)
1627 {
1628 	if (sess == NULL)
1629 		return -EINVAL;
1630 
1631 	if (sess->user_data_sz < size)
1632 		return -ENOMEM;
1633 
1634 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1635 	return 0;
1636 }
1637 
1638 void *
1639 rte_cryptodev_sym_session_get_user_data(
1640 					struct rte_cryptodev_sym_session *sess)
1641 {
1642 	if (sess == NULL || sess->user_data_sz == 0)
1643 		return NULL;
1644 
1645 	return (void *)(sess->sess_data + sess->nb_drivers);
1646 }
1647 
1648 /** Initialise rte_crypto_op mempool element */
1649 static void
1650 rte_crypto_op_init(struct rte_mempool *mempool,
1651 		void *opaque_arg,
1652 		void *_op_data,
1653 		__rte_unused unsigned i)
1654 {
1655 	struct rte_crypto_op *op = _op_data;
1656 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1657 
1658 	memset(_op_data, 0, mempool->elt_size);
1659 
1660 	__rte_crypto_op_reset(op, type);
1661 
1662 	op->phys_addr = rte_mem_virt2iova(_op_data);
1663 	op->mempool = mempool;
1664 }
1665 
1666 
1667 struct rte_mempool *
1668 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1669 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1670 		int socket_id)
1671 {
1672 	struct rte_crypto_op_pool_private *priv;
1673 
1674 	unsigned elt_size = sizeof(struct rte_crypto_op) +
1675 			priv_size;
1676 
1677 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1678 		elt_size += sizeof(struct rte_crypto_sym_op);
1679 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1680 		elt_size += sizeof(struct rte_crypto_asym_op);
1681 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1682 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1683 		                    sizeof(struct rte_crypto_asym_op));
1684 	} else {
1685 		CDEV_LOG_ERR("Invalid op_type\n");
1686 		return NULL;
1687 	}
1688 
1689 	/* lookup mempool in case already allocated */
1690 	struct rte_mempool *mp = rte_mempool_lookup(name);
1691 
1692 	if (mp != NULL) {
1693 		priv = (struct rte_crypto_op_pool_private *)
1694 				rte_mempool_get_priv(mp);
1695 
1696 		if (mp->elt_size != elt_size ||
1697 				mp->cache_size < cache_size ||
1698 				mp->size < nb_elts ||
1699 				priv->priv_size <  priv_size) {
1700 			mp = NULL;
1701 			CDEV_LOG_ERR("Mempool %s already exists but with "
1702 					"incompatible parameters", name);
1703 			return NULL;
1704 		}
1705 		return mp;
1706 	}
1707 
1708 	mp = rte_mempool_create(
1709 			name,
1710 			nb_elts,
1711 			elt_size,
1712 			cache_size,
1713 			sizeof(struct rte_crypto_op_pool_private),
1714 			NULL,
1715 			NULL,
1716 			rte_crypto_op_init,
1717 			&type,
1718 			socket_id,
1719 			0);
1720 
1721 	if (mp == NULL) {
1722 		CDEV_LOG_ERR("Failed to create mempool %s", name);
1723 		return NULL;
1724 	}
1725 
1726 	priv = (struct rte_crypto_op_pool_private *)
1727 			rte_mempool_get_priv(mp);
1728 
1729 	priv->priv_size = priv_size;
1730 	priv->type = type;
1731 
1732 	return mp;
1733 }
1734 
1735 int
1736 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1737 {
1738 	struct rte_cryptodev *dev = NULL;
1739 	uint32_t i = 0;
1740 
1741 	if (name == NULL)
1742 		return -EINVAL;
1743 
1744 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1745 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1746 				"%s_%u", dev_name_prefix, i);
1747 
1748 		if (ret < 0)
1749 			return ret;
1750 
1751 		dev = rte_cryptodev_pmd_get_named_dev(name);
1752 		if (!dev)
1753 			return 0;
1754 	}
1755 
1756 	return -1;
1757 }
1758 
1759 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1760 
1761 static struct cryptodev_driver_list cryptodev_driver_list =
1762 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1763 
1764 int
1765 rte_cryptodev_driver_id_get(const char *name)
1766 {
1767 	struct cryptodev_driver *driver;
1768 	const char *driver_name;
1769 
1770 	if (name == NULL) {
1771 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1772 		return -1;
1773 	}
1774 
1775 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1776 		driver_name = driver->driver->name;
1777 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1778 			return driver->id;
1779 	}
1780 	return -1;
1781 }
1782 
1783 const char *
1784 rte_cryptodev_name_get(uint8_t dev_id)
1785 {
1786 	struct rte_cryptodev *dev;
1787 
1788 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1789 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1790 		return NULL;
1791 	}
1792 
1793 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1794 	if (dev == NULL)
1795 		return NULL;
1796 
1797 	return dev->data->name;
1798 }
1799 
1800 const char *
1801 rte_cryptodev_driver_name_get(uint8_t driver_id)
1802 {
1803 	struct cryptodev_driver *driver;
1804 
1805 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1806 		if (driver->id == driver_id)
1807 			return driver->driver->name;
1808 	return NULL;
1809 }
1810 
1811 uint8_t
1812 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1813 		const struct rte_driver *drv)
1814 {
1815 	crypto_drv->driver = drv;
1816 	crypto_drv->id = nb_drivers;
1817 
1818 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1819 
1820 	return nb_drivers++;
1821 }
1822