1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2017 Intel Corporation
3  */
4 
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16 
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 
44 static uint8_t nb_drivers;
45 
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
47 
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
49 
50 static struct rte_cryptodev_global cryptodev_globals = {
51 		.devs			= rte_crypto_devices,
52 		.data			= { NULL },
53 		.nb_devs		= 0,
54 		.max_devs		= RTE_CRYPTO_MAX_DEVS
55 };
56 
57 /* spinlock for crypto device callbacks */
58 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
59 
60 
61 /**
62  * The user application callback description.
63  *
64  * It contains callback address to be registered by user application,
65  * the pointer to the parameters for callback, and the event type.
66  */
67 struct rte_cryptodev_callback {
68 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
69 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
70 	void *cb_arg;				/**< Parameter for callback */
71 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
72 	uint32_t active;			/**< Callback is executing */
73 };
74 
75 /**
76  * The crypto cipher algorithm strings identifiers.
77  * It could be used in application command line.
78  */
79 const char *
80 rte_crypto_cipher_algorithm_strings[] = {
81 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
82 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
83 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
84 
85 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
86 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
87 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
88 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
89 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
90 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
91 
92 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
93 
94 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
95 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
96 
97 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
98 
99 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
100 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
101 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
102 };
103 
104 /**
105  * The crypto cipher operation strings identifiers.
106  * It could be used in application command line.
107  */
108 const char *
109 rte_crypto_cipher_operation_strings[] = {
110 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
111 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
112 };
113 
114 /**
115  * The crypto auth algorithm strings identifiers.
116  * It could be used in application command line.
117  */
118 const char *
119 rte_crypto_auth_algorithm_strings[] = {
120 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
121 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
122 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
123 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
124 
125 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
126 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
127 
128 	[RTE_CRYPTO_AUTH_NULL]		= "null",
129 
130 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
131 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
132 
133 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
134 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
135 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
136 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
137 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
138 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
139 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
140 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
141 
142 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
143 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
144 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
145 };
146 
147 /**
148  * The crypto AEAD algorithm strings identifiers.
149  * It could be used in application command line.
150  */
151 const char *
152 rte_crypto_aead_algorithm_strings[] = {
153 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
154 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
155 };
156 
157 /**
158  * The crypto AEAD operation strings identifiers.
159  * It could be used in application command line.
160  */
161 const char *
162 rte_crypto_aead_operation_strings[] = {
163 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
164 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
165 };
166 
167 /**
168  * Asymmetric crypto transform operation strings identifiers.
169  */
170 const char *rte_crypto_asym_xform_strings[] = {
171 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
172 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
173 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
174 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
175 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
176 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
177 };
178 
179 /**
180  * Asymmetric crypto operation strings identifiers.
181  */
182 const char *rte_crypto_asym_op_strings[] = {
183 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
184 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
185 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
186 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify",
187 	[RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]	= "priv_key_generate",
188 	[RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
189 	[RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
190 };
191 
192 int
193 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
194 		const char *algo_string)
195 {
196 	unsigned int i;
197 
198 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
199 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
200 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
201 			return 0;
202 		}
203 	}
204 
205 	/* Invalid string */
206 	return -1;
207 }
208 
209 int
210 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
211 		const char *algo_string)
212 {
213 	unsigned int i;
214 
215 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
216 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
217 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
218 			return 0;
219 		}
220 	}
221 
222 	/* Invalid string */
223 	return -1;
224 }
225 
226 int
227 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
228 		const char *algo_string)
229 {
230 	unsigned int i;
231 
232 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
233 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
234 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
235 			return 0;
236 		}
237 	}
238 
239 	/* Invalid string */
240 	return -1;
241 }
242 
243 int __rte_experimental
244 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
245 		const char *xform_string)
246 {
247 	unsigned int i;
248 
249 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
250 		if (strcmp(xform_string,
251 			rte_crypto_asym_xform_strings[i]) == 0) {
252 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
253 			return 0;
254 		}
255 	}
256 
257 	/* Invalid string */
258 	return -1;
259 }
260 
261 /**
262  * The crypto auth operation strings identifiers.
263  * It could be used in application command line.
264  */
265 const char *
266 rte_crypto_auth_operation_strings[] = {
267 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
268 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
269 };
270 
271 const struct rte_cryptodev_symmetric_capability *
272 rte_cryptodev_sym_capability_get(uint8_t dev_id,
273 		const struct rte_cryptodev_sym_capability_idx *idx)
274 {
275 	const struct rte_cryptodev_capabilities *capability;
276 	struct rte_cryptodev_info dev_info;
277 	int i = 0;
278 
279 	rte_cryptodev_info_get(dev_id, &dev_info);
280 
281 	while ((capability = &dev_info.capabilities[i++])->op !=
282 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
283 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
284 			continue;
285 
286 		if (capability->sym.xform_type != idx->type)
287 			continue;
288 
289 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
290 			capability->sym.auth.algo == idx->algo.auth)
291 			return &capability->sym;
292 
293 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
294 			capability->sym.cipher.algo == idx->algo.cipher)
295 			return &capability->sym;
296 
297 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
298 				capability->sym.aead.algo == idx->algo.aead)
299 			return &capability->sym;
300 	}
301 
302 	return NULL;
303 
304 }
305 
306 static int
307 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
308 {
309 	unsigned int next_size;
310 
311 	/* Check lower/upper bounds */
312 	if (size < range->min)
313 		return -1;
314 
315 	if (size > range->max)
316 		return -1;
317 
318 	/* If range is actually only one value, size is correct */
319 	if (range->increment == 0)
320 		return 0;
321 
322 	/* Check if value is one of the supported sizes */
323 	for (next_size = range->min; next_size <= range->max;
324 			next_size += range->increment)
325 		if (size == next_size)
326 			return 0;
327 
328 	return -1;
329 }
330 
331 const struct rte_cryptodev_asymmetric_xform_capability * __rte_experimental
332 rte_cryptodev_asym_capability_get(uint8_t dev_id,
333 		const struct rte_cryptodev_asym_capability_idx *idx)
334 {
335 	const struct rte_cryptodev_capabilities *capability;
336 	struct rte_cryptodev_info dev_info;
337 	unsigned int i = 0;
338 
339 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
340 	rte_cryptodev_info_get(dev_id, &dev_info);
341 
342 	while ((capability = &dev_info.capabilities[i++])->op !=
343 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
344 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
345 			continue;
346 
347 		if (capability->asym.xform_capa.xform_type == idx->type)
348 			return &capability->asym.xform_capa;
349 	}
350 	return NULL;
351 };
352 
353 int
354 rte_cryptodev_sym_capability_check_cipher(
355 		const struct rte_cryptodev_symmetric_capability *capability,
356 		uint16_t key_size, uint16_t iv_size)
357 {
358 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
359 		return -1;
360 
361 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
362 		return -1;
363 
364 	return 0;
365 }
366 
367 int
368 rte_cryptodev_sym_capability_check_auth(
369 		const struct rte_cryptodev_symmetric_capability *capability,
370 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
371 {
372 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
373 		return -1;
374 
375 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
376 		return -1;
377 
378 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
379 		return -1;
380 
381 	return 0;
382 }
383 
384 int
385 rte_cryptodev_sym_capability_check_aead(
386 		const struct rte_cryptodev_symmetric_capability *capability,
387 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
388 		uint16_t iv_size)
389 {
390 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
391 		return -1;
392 
393 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
394 		return -1;
395 
396 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
397 		return -1;
398 
399 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
400 		return -1;
401 
402 	return 0;
403 }
404 int __rte_experimental
405 rte_cryptodev_asym_xform_capability_check_optype(
406 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
407 	enum rte_crypto_asym_op_type op_type)
408 {
409 	if (capability->op_types & (1 << op_type))
410 		return 1;
411 
412 	return 0;
413 }
414 
415 int __rte_experimental
416 rte_cryptodev_asym_xform_capability_check_modlen(
417 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
418 	uint16_t modlen)
419 {
420 	/* no need to check for limits, if min or max = 0 */
421 	if (capability->modlen.min != 0) {
422 		if (modlen < capability->modlen.min)
423 			return -1;
424 	}
425 
426 	if (capability->modlen.max != 0) {
427 		if (modlen > capability->modlen.max)
428 			return -1;
429 	}
430 
431 	/* in any case, check if given modlen is module increment */
432 	if (capability->modlen.increment != 0) {
433 		if (modlen % (capability->modlen.increment))
434 			return -1;
435 	}
436 
437 	return 0;
438 }
439 
440 
441 const char *
442 rte_cryptodev_get_feature_name(uint64_t flag)
443 {
444 	switch (flag) {
445 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
446 		return "SYMMETRIC_CRYPTO";
447 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
448 		return "ASYMMETRIC_CRYPTO";
449 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
450 		return "SYM_OPERATION_CHAINING";
451 	case RTE_CRYPTODEV_FF_CPU_SSE:
452 		return "CPU_SSE";
453 	case RTE_CRYPTODEV_FF_CPU_AVX:
454 		return "CPU_AVX";
455 	case RTE_CRYPTODEV_FF_CPU_AVX2:
456 		return "CPU_AVX2";
457 	case RTE_CRYPTODEV_FF_CPU_AVX512:
458 		return "CPU_AVX512";
459 	case RTE_CRYPTODEV_FF_CPU_AESNI:
460 		return "CPU_AESNI";
461 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
462 		return "HW_ACCELERATED";
463 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
464 		return "IN_PLACE_SGL";
465 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
466 		return "OOP_SGL_IN_SGL_OUT";
467 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
468 		return "OOP_SGL_IN_LB_OUT";
469 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
470 		return "OOP_LB_IN_SGL_OUT";
471 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
472 		return "OOP_LB_IN_LB_OUT";
473 	case RTE_CRYPTODEV_FF_CPU_NEON:
474 		return "CPU_NEON";
475 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
476 		return "CPU_ARM_CE";
477 	case RTE_CRYPTODEV_FF_SECURITY:
478 		return "SECURITY_PROTOCOL";
479 	default:
480 		return NULL;
481 	}
482 }
483 
484 struct rte_cryptodev *
485 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
486 {
487 	return &cryptodev_globals.devs[dev_id];
488 }
489 
490 struct rte_cryptodev *
491 rte_cryptodev_pmd_get_named_dev(const char *name)
492 {
493 	struct rte_cryptodev *dev;
494 	unsigned int i;
495 
496 	if (name == NULL)
497 		return NULL;
498 
499 	for (i = 0; i < cryptodev_globals.max_devs; i++) {
500 		dev = &cryptodev_globals.devs[i];
501 
502 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
503 				(strcmp(dev->data->name, name) == 0))
504 			return dev;
505 	}
506 
507 	return NULL;
508 }
509 
510 unsigned int
511 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
512 {
513 	struct rte_cryptodev *dev = NULL;
514 
515 	if (dev_id >= cryptodev_globals.nb_devs)
516 		return 0;
517 
518 	dev = rte_cryptodev_pmd_get_dev(dev_id);
519 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
520 		return 0;
521 	else
522 		return 1;
523 }
524 
525 
526 int
527 rte_cryptodev_get_dev_id(const char *name)
528 {
529 	unsigned i;
530 
531 	if (name == NULL)
532 		return -1;
533 
534 	for (i = 0; i < cryptodev_globals.nb_devs; i++)
535 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
536 				== 0) &&
537 				(cryptodev_globals.devs[i].attached ==
538 						RTE_CRYPTODEV_ATTACHED))
539 			return i;
540 
541 	return -1;
542 }
543 
544 uint8_t
545 rte_cryptodev_count(void)
546 {
547 	return cryptodev_globals.nb_devs;
548 }
549 
550 uint8_t
551 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
552 {
553 	uint8_t i, dev_count = 0;
554 
555 	for (i = 0; i < cryptodev_globals.max_devs; i++)
556 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
557 			cryptodev_globals.devs[i].attached ==
558 					RTE_CRYPTODEV_ATTACHED)
559 			dev_count++;
560 
561 	return dev_count;
562 }
563 
564 uint8_t
565 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
566 	uint8_t nb_devices)
567 {
568 	uint8_t i, count = 0;
569 	struct rte_cryptodev *devs = cryptodev_globals.devs;
570 	uint8_t max_devs = cryptodev_globals.max_devs;
571 
572 	for (i = 0; i < max_devs && count < nb_devices;	i++) {
573 
574 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
575 			int cmp;
576 
577 			cmp = strncmp(devs[i].device->driver->name,
578 					driver_name,
579 					strlen(driver_name) + 1);
580 
581 			if (cmp == 0)
582 				devices[count++] = devs[i].data->dev_id;
583 		}
584 	}
585 
586 	return count;
587 }
588 
589 void *
590 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
591 {
592 	if (rte_crypto_devices[dev_id].feature_flags &
593 			RTE_CRYPTODEV_FF_SECURITY)
594 		return rte_crypto_devices[dev_id].security_ctx;
595 
596 	return NULL;
597 }
598 
599 int
600 rte_cryptodev_socket_id(uint8_t dev_id)
601 {
602 	struct rte_cryptodev *dev;
603 
604 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
605 		return -1;
606 
607 	dev = rte_cryptodev_pmd_get_dev(dev_id);
608 
609 	return dev->data->socket_id;
610 }
611 
612 static inline int
613 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
614 		int socket_id)
615 {
616 	char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
617 	const struct rte_memzone *mz;
618 	int n;
619 
620 	/* generate memzone name */
621 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
622 	if (n >= (int)sizeof(mz_name))
623 		return -EINVAL;
624 
625 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
626 		mz = rte_memzone_reserve(mz_name,
627 				sizeof(struct rte_cryptodev_data),
628 				socket_id, 0);
629 	} else
630 		mz = rte_memzone_lookup(mz_name);
631 
632 	if (mz == NULL)
633 		return -ENOMEM;
634 
635 	*data = mz->addr;
636 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
637 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
638 
639 	return 0;
640 }
641 
642 static uint8_t
643 rte_cryptodev_find_free_device_index(void)
644 {
645 	uint8_t dev_id;
646 
647 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
648 		if (rte_crypto_devices[dev_id].attached ==
649 				RTE_CRYPTODEV_DETACHED)
650 			return dev_id;
651 	}
652 	return RTE_CRYPTO_MAX_DEVS;
653 }
654 
655 struct rte_cryptodev *
656 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
657 {
658 	struct rte_cryptodev *cryptodev;
659 	uint8_t dev_id;
660 
661 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
662 		CDEV_LOG_ERR("Crypto device with name %s already "
663 				"allocated!", name);
664 		return NULL;
665 	}
666 
667 	dev_id = rte_cryptodev_find_free_device_index();
668 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
669 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
670 		return NULL;
671 	}
672 
673 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
674 
675 	if (cryptodev->data == NULL) {
676 		struct rte_cryptodev_data *cryptodev_data =
677 				cryptodev_globals.data[dev_id];
678 
679 		int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
680 				socket_id);
681 
682 		if (retval < 0 || cryptodev_data == NULL)
683 			return NULL;
684 
685 		cryptodev->data = cryptodev_data;
686 
687 		snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
688 				"%s", name);
689 
690 		cryptodev->data->dev_id = dev_id;
691 		cryptodev->data->socket_id = socket_id;
692 		cryptodev->data->dev_started = 0;
693 
694 		/* init user callbacks */
695 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
696 
697 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
698 
699 		cryptodev_globals.nb_devs++;
700 	}
701 
702 	return cryptodev;
703 }
704 
705 int
706 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
707 {
708 	int ret;
709 
710 	if (cryptodev == NULL)
711 		return -EINVAL;
712 
713 	/* Close device only if device operations have been set */
714 	if (cryptodev->dev_ops) {
715 		ret = rte_cryptodev_close(cryptodev->data->dev_id);
716 		if (ret < 0)
717 			return ret;
718 	}
719 
720 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
721 	cryptodev_globals.nb_devs--;
722 	return 0;
723 }
724 
725 uint16_t
726 rte_cryptodev_queue_pair_count(uint8_t dev_id)
727 {
728 	struct rte_cryptodev *dev;
729 
730 	dev = &rte_crypto_devices[dev_id];
731 	return dev->data->nb_queue_pairs;
732 }
733 
734 static int
735 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
736 		int socket_id)
737 {
738 	struct rte_cryptodev_info dev_info;
739 	void **qp;
740 	unsigned i;
741 
742 	if ((dev == NULL) || (nb_qpairs < 1)) {
743 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
744 							dev, nb_qpairs);
745 		return -EINVAL;
746 	}
747 
748 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
749 			nb_qpairs, dev->data->dev_id);
750 
751 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
752 
753 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
754 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
755 
756 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
757 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
758 				nb_qpairs, dev->data->dev_id);
759 	    return -EINVAL;
760 	}
761 
762 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
763 		dev->data->queue_pairs = rte_zmalloc_socket(
764 				"cryptodev->queue_pairs",
765 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
766 				RTE_CACHE_LINE_SIZE, socket_id);
767 
768 		if (dev->data->queue_pairs == NULL) {
769 			dev->data->nb_queue_pairs = 0;
770 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
771 							"nb_queues %u",
772 							nb_qpairs);
773 			return -(ENOMEM);
774 		}
775 	} else { /* re-configure */
776 		int ret;
777 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
778 
779 		qp = dev->data->queue_pairs;
780 
781 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
782 				-ENOTSUP);
783 
784 		for (i = nb_qpairs; i < old_nb_queues; i++) {
785 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
786 			if (ret < 0)
787 				return ret;
788 		}
789 
790 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
791 				RTE_CACHE_LINE_SIZE);
792 		if (qp == NULL) {
793 			CDEV_LOG_ERR("failed to realloc qp meta data,"
794 						" nb_queues %u", nb_qpairs);
795 			return -(ENOMEM);
796 		}
797 
798 		if (nb_qpairs > old_nb_queues) {
799 			uint16_t new_qs = nb_qpairs - old_nb_queues;
800 
801 			memset(qp + old_nb_queues, 0,
802 				sizeof(qp[0]) * new_qs);
803 		}
804 
805 		dev->data->queue_pairs = qp;
806 
807 	}
808 	dev->data->nb_queue_pairs = nb_qpairs;
809 	return 0;
810 }
811 
812 int
813 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
814 {
815 	struct rte_cryptodev *dev;
816 	int diag;
817 
818 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
819 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
820 		return -EINVAL;
821 	}
822 
823 	dev = &rte_crypto_devices[dev_id];
824 
825 	if (dev->data->dev_started) {
826 		CDEV_LOG_ERR(
827 		    "device %d must be stopped to allow configuration", dev_id);
828 		return -EBUSY;
829 	}
830 
831 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
832 
833 	/* Setup new number of queue pairs and reconfigure device. */
834 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
835 			config->socket_id);
836 	if (diag != 0) {
837 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
838 				dev_id, diag);
839 		return diag;
840 	}
841 
842 	return (*dev->dev_ops->dev_configure)(dev, config);
843 }
844 
845 
846 int
847 rte_cryptodev_start(uint8_t dev_id)
848 {
849 	struct rte_cryptodev *dev;
850 	int diag;
851 
852 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
853 
854 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
855 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
856 		return -EINVAL;
857 	}
858 
859 	dev = &rte_crypto_devices[dev_id];
860 
861 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
862 
863 	if (dev->data->dev_started != 0) {
864 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
865 			dev_id);
866 		return 0;
867 	}
868 
869 	diag = (*dev->dev_ops->dev_start)(dev);
870 	if (diag == 0)
871 		dev->data->dev_started = 1;
872 	else
873 		return diag;
874 
875 	return 0;
876 }
877 
878 void
879 rte_cryptodev_stop(uint8_t dev_id)
880 {
881 	struct rte_cryptodev *dev;
882 
883 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
884 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
885 		return;
886 	}
887 
888 	dev = &rte_crypto_devices[dev_id];
889 
890 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
891 
892 	if (dev->data->dev_started == 0) {
893 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
894 			dev_id);
895 		return;
896 	}
897 
898 	(*dev->dev_ops->dev_stop)(dev);
899 	dev->data->dev_started = 0;
900 }
901 
902 int
903 rte_cryptodev_close(uint8_t dev_id)
904 {
905 	struct rte_cryptodev *dev;
906 	int retval;
907 
908 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
909 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
910 		return -1;
911 	}
912 
913 	dev = &rte_crypto_devices[dev_id];
914 
915 	/* Device must be stopped before it can be closed */
916 	if (dev->data->dev_started == 1) {
917 		CDEV_LOG_ERR("Device %u must be stopped before closing",
918 				dev_id);
919 		return -EBUSY;
920 	}
921 
922 	/* We can't close the device if there are outstanding sessions in use */
923 	if (dev->data->session_pool != NULL) {
924 		if (!rte_mempool_full(dev->data->session_pool)) {
925 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
926 					"has sessions still in use, free "
927 					"all sessions before calling close",
928 					(unsigned)dev_id);
929 			return -EBUSY;
930 		}
931 	}
932 
933 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
934 	retval = (*dev->dev_ops->dev_close)(dev);
935 
936 	if (retval < 0)
937 		return retval;
938 
939 	return 0;
940 }
941 
942 int
943 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
944 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
945 		struct rte_mempool *session_pool)
946 
947 {
948 	struct rte_cryptodev *dev;
949 
950 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
951 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
952 		return -EINVAL;
953 	}
954 
955 	dev = &rte_crypto_devices[dev_id];
956 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
957 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
958 		return -EINVAL;
959 	}
960 
961 	if (dev->data->dev_started) {
962 		CDEV_LOG_ERR(
963 		    "device %d must be stopped to allow configuration", dev_id);
964 		return -EBUSY;
965 	}
966 
967 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
968 
969 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
970 			socket_id, session_pool);
971 }
972 
973 
974 int
975 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
976 {
977 	struct rte_cryptodev *dev;
978 
979 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
980 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
981 		return -ENODEV;
982 	}
983 
984 	if (stats == NULL) {
985 		CDEV_LOG_ERR("Invalid stats ptr");
986 		return -EINVAL;
987 	}
988 
989 	dev = &rte_crypto_devices[dev_id];
990 	memset(stats, 0, sizeof(*stats));
991 
992 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
993 	(*dev->dev_ops->stats_get)(dev, stats);
994 	return 0;
995 }
996 
997 void
998 rte_cryptodev_stats_reset(uint8_t dev_id)
999 {
1000 	struct rte_cryptodev *dev;
1001 
1002 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1003 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1004 		return;
1005 	}
1006 
1007 	dev = &rte_crypto_devices[dev_id];
1008 
1009 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1010 	(*dev->dev_ops->stats_reset)(dev);
1011 }
1012 
1013 
1014 void
1015 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1016 {
1017 	struct rte_cryptodev *dev;
1018 
1019 	if (dev_id >= cryptodev_globals.nb_devs) {
1020 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1021 		return;
1022 	}
1023 
1024 	dev = &rte_crypto_devices[dev_id];
1025 
1026 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1027 
1028 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1029 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1030 
1031 	dev_info->driver_name = dev->device->driver->name;
1032 	dev_info->device = dev->device;
1033 }
1034 
1035 
1036 int
1037 rte_cryptodev_callback_register(uint8_t dev_id,
1038 			enum rte_cryptodev_event_type event,
1039 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1040 {
1041 	struct rte_cryptodev *dev;
1042 	struct rte_cryptodev_callback *user_cb;
1043 
1044 	if (!cb_fn)
1045 		return -EINVAL;
1046 
1047 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1048 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1049 		return -EINVAL;
1050 	}
1051 
1052 	dev = &rte_crypto_devices[dev_id];
1053 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1054 
1055 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1056 		if (user_cb->cb_fn == cb_fn &&
1057 			user_cb->cb_arg == cb_arg &&
1058 			user_cb->event == event) {
1059 			break;
1060 		}
1061 	}
1062 
1063 	/* create a new callback. */
1064 	if (user_cb == NULL) {
1065 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1066 				sizeof(struct rte_cryptodev_callback), 0);
1067 		if (user_cb != NULL) {
1068 			user_cb->cb_fn = cb_fn;
1069 			user_cb->cb_arg = cb_arg;
1070 			user_cb->event = event;
1071 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1072 		}
1073 	}
1074 
1075 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1076 	return (user_cb == NULL) ? -ENOMEM : 0;
1077 }
1078 
1079 int
1080 rte_cryptodev_callback_unregister(uint8_t dev_id,
1081 			enum rte_cryptodev_event_type event,
1082 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1083 {
1084 	int ret;
1085 	struct rte_cryptodev *dev;
1086 	struct rte_cryptodev_callback *cb, *next;
1087 
1088 	if (!cb_fn)
1089 		return -EINVAL;
1090 
1091 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1092 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1093 		return -EINVAL;
1094 	}
1095 
1096 	dev = &rte_crypto_devices[dev_id];
1097 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1098 
1099 	ret = 0;
1100 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1101 
1102 		next = TAILQ_NEXT(cb, next);
1103 
1104 		if (cb->cb_fn != cb_fn || cb->event != event ||
1105 				(cb->cb_arg != (void *)-1 &&
1106 				cb->cb_arg != cb_arg))
1107 			continue;
1108 
1109 		/*
1110 		 * if this callback is not executing right now,
1111 		 * then remove it.
1112 		 */
1113 		if (cb->active == 0) {
1114 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1115 			rte_free(cb);
1116 		} else {
1117 			ret = -EAGAIN;
1118 		}
1119 	}
1120 
1121 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1122 	return ret;
1123 }
1124 
1125 void
1126 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1127 	enum rte_cryptodev_event_type event)
1128 {
1129 	struct rte_cryptodev_callback *cb_lst;
1130 	struct rte_cryptodev_callback dev_cb;
1131 
1132 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1133 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1134 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1135 			continue;
1136 		dev_cb = *cb_lst;
1137 		cb_lst->active = 1;
1138 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1139 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1140 						dev_cb.cb_arg);
1141 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1142 		cb_lst->active = 0;
1143 	}
1144 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1145 }
1146 
1147 
1148 int
1149 rte_cryptodev_sym_session_init(uint8_t dev_id,
1150 		struct rte_cryptodev_sym_session *sess,
1151 		struct rte_crypto_sym_xform *xforms,
1152 		struct rte_mempool *mp)
1153 {
1154 	struct rte_cryptodev *dev;
1155 	uint8_t index;
1156 	int ret;
1157 
1158 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1159 
1160 	if (sess == NULL || xforms == NULL || dev == NULL)
1161 		return -EINVAL;
1162 
1163 	index = dev->driver_id;
1164 
1165 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1166 
1167 	if (sess->sess_private_data[index] == NULL) {
1168 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1169 							sess, mp);
1170 		if (ret < 0) {
1171 			CDEV_LOG_ERR(
1172 				"dev_id %d failed to configure session details",
1173 				dev_id);
1174 			return ret;
1175 		}
1176 	}
1177 
1178 	return 0;
1179 }
1180 
1181 int __rte_experimental
1182 rte_cryptodev_asym_session_init(uint8_t dev_id,
1183 		struct rte_cryptodev_asym_session *sess,
1184 		struct rte_crypto_asym_xform *xforms,
1185 		struct rte_mempool *mp)
1186 {
1187 	struct rte_cryptodev *dev;
1188 	uint8_t index;
1189 	int ret;
1190 
1191 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1192 
1193 	if (sess == NULL || xforms == NULL || dev == NULL)
1194 		return -EINVAL;
1195 
1196 	index = dev->driver_id;
1197 
1198 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1199 				-ENOTSUP);
1200 
1201 	if (sess->sess_private_data[index] == NULL) {
1202 		ret = dev->dev_ops->asym_session_configure(dev,
1203 							xforms,
1204 							sess, mp);
1205 		if (ret < 0) {
1206 			CDEV_LOG_ERR(
1207 				"dev_id %d failed to configure session details",
1208 				dev_id);
1209 			return ret;
1210 		}
1211 	}
1212 
1213 	return 0;
1214 }
1215 
1216 struct rte_cryptodev_sym_session *
1217 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1218 {
1219 	struct rte_cryptodev_sym_session *sess;
1220 
1221 	/* Allocate a session structure from the session pool */
1222 	if (rte_mempool_get(mp, (void **)&sess)) {
1223 		CDEV_LOG_ERR("couldn't get object from session mempool");
1224 		return NULL;
1225 	}
1226 
1227 	/* Clear device session pointer.
1228 	 * Include the flag indicating presence of user data
1229 	 */
1230 	memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1231 
1232 	return sess;
1233 }
1234 
1235 struct rte_cryptodev_asym_session * __rte_experimental
1236 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1237 {
1238 	struct rte_cryptodev_asym_session *sess;
1239 
1240 	/* Allocate a session structure from the session pool */
1241 	if (rte_mempool_get(mp, (void **)&sess)) {
1242 		CDEV_LOG_ERR("couldn't get object from session mempool");
1243 		return NULL;
1244 	}
1245 
1246 	/* Clear device session pointer.
1247 	 * Include the flag indicating presence of private data
1248 	 */
1249 	memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1250 
1251 	return sess;
1252 }
1253 
1254 int
1255 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1256 		struct rte_cryptodev_sym_session *sess)
1257 {
1258 	struct rte_cryptodev *dev;
1259 
1260 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1261 
1262 	if (dev == NULL || sess == NULL)
1263 		return -EINVAL;
1264 
1265 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1266 
1267 	dev->dev_ops->sym_session_clear(dev, sess);
1268 
1269 	return 0;
1270 }
1271 
1272 int __rte_experimental
1273 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1274 		struct rte_cryptodev_asym_session *sess)
1275 {
1276 	struct rte_cryptodev *dev;
1277 
1278 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1279 
1280 	if (dev == NULL || sess == NULL)
1281 		return -EINVAL;
1282 
1283 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1284 
1285 	dev->dev_ops->asym_session_clear(dev, sess);
1286 
1287 	return 0;
1288 }
1289 
1290 int
1291 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1292 {
1293 	uint8_t i;
1294 	void *sess_priv;
1295 	struct rte_mempool *sess_mp;
1296 
1297 	if (sess == NULL)
1298 		return -EINVAL;
1299 
1300 	/* Check that all device private data has been freed */
1301 	for (i = 0; i < nb_drivers; i++) {
1302 		sess_priv = get_sym_session_private_data(sess, i);
1303 		if (sess_priv != NULL)
1304 			return -EBUSY;
1305 	}
1306 
1307 	/* Return session to mempool */
1308 	sess_mp = rte_mempool_from_obj(sess);
1309 	rte_mempool_put(sess_mp, sess);
1310 
1311 	return 0;
1312 }
1313 
1314 int __rte_experimental
1315 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1316 {
1317 	uint8_t i;
1318 	void *sess_priv;
1319 	struct rte_mempool *sess_mp;
1320 
1321 	if (sess == NULL)
1322 		return -EINVAL;
1323 
1324 	/* Check that all device private data has been freed */
1325 	for (i = 0; i < nb_drivers; i++) {
1326 		sess_priv = get_asym_session_private_data(sess, i);
1327 		if (sess_priv != NULL)
1328 			return -EBUSY;
1329 	}
1330 
1331 	/* Return session to mempool */
1332 	sess_mp = rte_mempool_from_obj(sess);
1333 	rte_mempool_put(sess_mp, sess);
1334 
1335 	return 0;
1336 }
1337 
1338 
1339 unsigned int
1340 rte_cryptodev_sym_get_header_session_size(void)
1341 {
1342 	/*
1343 	 * Header contains pointers to the private data
1344 	 * of all registered drivers, and a flag which
1345 	 * indicates presence of user data
1346 	 */
1347 	return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1348 }
1349 
1350 unsigned int __rte_experimental
1351 rte_cryptodev_asym_get_header_session_size(void)
1352 {
1353 	/*
1354 	 * Header contains pointers to the private data
1355 	 * of all registered drivers, and a flag which
1356 	 * indicates presence of private data
1357 	 */
1358 	return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1359 }
1360 
1361 unsigned int
1362 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1363 {
1364 	struct rte_cryptodev *dev;
1365 	unsigned int header_size = sizeof(void *) * nb_drivers;
1366 	unsigned int priv_sess_size;
1367 
1368 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1369 		return 0;
1370 
1371 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1372 
1373 	if (*dev->dev_ops->sym_session_get_size == NULL)
1374 		return 0;
1375 
1376 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1377 
1378 	/*
1379 	 * If size is less than session header size,
1380 	 * return the latter, as this guarantees that
1381 	 * sessionless operations will work
1382 	 */
1383 	if (priv_sess_size < header_size)
1384 		return header_size;
1385 
1386 	return priv_sess_size;
1387 
1388 }
1389 
1390 unsigned int __rte_experimental
1391 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1392 {
1393 	struct rte_cryptodev *dev;
1394 	unsigned int header_size = sizeof(void *) * nb_drivers;
1395 	unsigned int priv_sess_size;
1396 
1397 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1398 		return 0;
1399 
1400 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1401 
1402 	if (*dev->dev_ops->asym_session_get_size == NULL)
1403 		return 0;
1404 
1405 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1406 	if (priv_sess_size < header_size)
1407 		return header_size;
1408 
1409 	return priv_sess_size;
1410 
1411 }
1412 
1413 int __rte_experimental
1414 rte_cryptodev_sym_session_set_user_data(
1415 					struct rte_cryptodev_sym_session *sess,
1416 					void *data,
1417 					uint16_t size)
1418 {
1419 	uint16_t off_set = sizeof(void *) * nb_drivers;
1420 	uint8_t *user_data_present = (uint8_t *)sess + off_set;
1421 
1422 	if (sess == NULL)
1423 		return -EINVAL;
1424 
1425 	*user_data_present = 1;
1426 	off_set += sizeof(uint8_t);
1427 	rte_memcpy((uint8_t *)sess + off_set, data, size);
1428 	return 0;
1429 }
1430 
1431 void * __rte_experimental
1432 rte_cryptodev_sym_session_get_user_data(
1433 					struct rte_cryptodev_sym_session *sess)
1434 {
1435 	uint16_t off_set = sizeof(void *) * nb_drivers;
1436 	uint8_t *user_data_present = (uint8_t *)sess + off_set;
1437 
1438 	if (sess == NULL || !*user_data_present)
1439 		return NULL;
1440 
1441 	off_set += sizeof(uint8_t);
1442 	return (uint8_t *)sess + off_set;
1443 }
1444 
1445 /** Initialise rte_crypto_op mempool element */
1446 static void
1447 rte_crypto_op_init(struct rte_mempool *mempool,
1448 		void *opaque_arg,
1449 		void *_op_data,
1450 		__rte_unused unsigned i)
1451 {
1452 	struct rte_crypto_op *op = _op_data;
1453 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1454 
1455 	memset(_op_data, 0, mempool->elt_size);
1456 
1457 	__rte_crypto_op_reset(op, type);
1458 
1459 	op->phys_addr = rte_mem_virt2iova(_op_data);
1460 	op->mempool = mempool;
1461 }
1462 
1463 
1464 struct rte_mempool *
1465 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1466 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1467 		int socket_id)
1468 {
1469 	struct rte_crypto_op_pool_private *priv;
1470 
1471 	unsigned elt_size = sizeof(struct rte_crypto_op) +
1472 			priv_size;
1473 
1474 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1475 		elt_size += sizeof(struct rte_crypto_sym_op);
1476 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1477 		elt_size += sizeof(struct rte_crypto_asym_op);
1478 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1479 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1480 		                    sizeof(struct rte_crypto_asym_op));
1481 	} else {
1482 		CDEV_LOG_ERR("Invalid op_type\n");
1483 		return NULL;
1484 	}
1485 
1486 	/* lookup mempool in case already allocated */
1487 	struct rte_mempool *mp = rte_mempool_lookup(name);
1488 
1489 	if (mp != NULL) {
1490 		priv = (struct rte_crypto_op_pool_private *)
1491 				rte_mempool_get_priv(mp);
1492 
1493 		if (mp->elt_size != elt_size ||
1494 				mp->cache_size < cache_size ||
1495 				mp->size < nb_elts ||
1496 				priv->priv_size <  priv_size) {
1497 			mp = NULL;
1498 			CDEV_LOG_ERR("Mempool %s already exists but with "
1499 					"incompatible parameters", name);
1500 			return NULL;
1501 		}
1502 		return mp;
1503 	}
1504 
1505 	mp = rte_mempool_create(
1506 			name,
1507 			nb_elts,
1508 			elt_size,
1509 			cache_size,
1510 			sizeof(struct rte_crypto_op_pool_private),
1511 			NULL,
1512 			NULL,
1513 			rte_crypto_op_init,
1514 			&type,
1515 			socket_id,
1516 			0);
1517 
1518 	if (mp == NULL) {
1519 		CDEV_LOG_ERR("Failed to create mempool %s", name);
1520 		return NULL;
1521 	}
1522 
1523 	priv = (struct rte_crypto_op_pool_private *)
1524 			rte_mempool_get_priv(mp);
1525 
1526 	priv->priv_size = priv_size;
1527 	priv->type = type;
1528 
1529 	return mp;
1530 }
1531 
1532 int
1533 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1534 {
1535 	struct rte_cryptodev *dev = NULL;
1536 	uint32_t i = 0;
1537 
1538 	if (name == NULL)
1539 		return -EINVAL;
1540 
1541 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1542 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1543 				"%s_%u", dev_name_prefix, i);
1544 
1545 		if (ret < 0)
1546 			return ret;
1547 
1548 		dev = rte_cryptodev_pmd_get_named_dev(name);
1549 		if (!dev)
1550 			return 0;
1551 	}
1552 
1553 	return -1;
1554 }
1555 
1556 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1557 
1558 static struct cryptodev_driver_list cryptodev_driver_list =
1559 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1560 
1561 int
1562 rte_cryptodev_driver_id_get(const char *name)
1563 {
1564 	struct cryptodev_driver *driver;
1565 	const char *driver_name;
1566 
1567 	if (name == NULL) {
1568 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1569 		return -1;
1570 	}
1571 
1572 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1573 		driver_name = driver->driver->name;
1574 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1575 			return driver->id;
1576 	}
1577 	return -1;
1578 }
1579 
1580 const char *
1581 rte_cryptodev_name_get(uint8_t dev_id)
1582 {
1583 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1584 
1585 	if (dev == NULL)
1586 		return NULL;
1587 
1588 	return dev->data->name;
1589 }
1590 
1591 const char *
1592 rte_cryptodev_driver_name_get(uint8_t driver_id)
1593 {
1594 	struct cryptodev_driver *driver;
1595 
1596 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1597 		if (driver->id == driver_id)
1598 			return driver->driver->name;
1599 	return NULL;
1600 }
1601 
1602 uint8_t
1603 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1604 		const struct rte_driver *drv)
1605 {
1606 	crypto_drv->driver = drv;
1607 	crypto_drv->id = nb_drivers;
1608 
1609 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1610 
1611 	return nb_drivers++;
1612 }
1613