xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision d0b8a4e1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_telemetry.h>
39 
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "cryptodev_pmd.h"
43 #include "rte_cryptodev_trace.h"
44 
45 static uint8_t nb_drivers;
46 
47 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48 
49 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50 
51 static struct rte_cryptodev_global cryptodev_globals = {
52 		.devs			= rte_crypto_devices,
53 		.data			= { NULL },
54 		.nb_devs		= 0
55 };
56 
57 /* Public fastpath APIs. */
58 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
59 
60 /* spinlock for crypto device callbacks */
61 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
62 
63 /**
64  * The user application callback description.
65  *
66  * It contains callback address to be registered by user application,
67  * the pointer to the parameters for callback, and the event type.
68  */
69 struct rte_cryptodev_callback {
70 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
71 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
72 	void *cb_arg;				/**< Parameter for callback */
73 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
74 	uint32_t active;			/**< Callback is executing */
75 };
76 
77 /**
78  * The crypto cipher algorithm strings identifiers.
79  * It could be used in application command line.
80  */
81 const char *
82 rte_crypto_cipher_algorithm_strings[] = {
83 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
84 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
85 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
86 
87 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
88 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
89 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
90 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
91 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
92 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
93 
94 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
95 
96 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
97 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
98 
99 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
100 
101 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
102 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
103 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
104 };
105 
106 /**
107  * The crypto cipher operation strings identifiers.
108  * It could be used in application command line.
109  */
110 const char *
111 rte_crypto_cipher_operation_strings[] = {
112 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
113 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
114 };
115 
116 /**
117  * The crypto auth algorithm strings identifiers.
118  * It could be used in application command line.
119  */
120 const char *
121 rte_crypto_auth_algorithm_strings[] = {
122 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
123 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
124 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
125 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
126 
127 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
128 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
129 
130 	[RTE_CRYPTO_AUTH_NULL]		= "null",
131 
132 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
133 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
134 
135 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
136 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
137 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
138 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
139 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
140 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
141 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
142 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
143 
144 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
145 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
146 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
147 };
148 
149 /**
150  * The crypto AEAD algorithm strings identifiers.
151  * It could be used in application command line.
152  */
153 const char *
154 rte_crypto_aead_algorithm_strings[] = {
155 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
156 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
157 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
158 };
159 
160 /**
161  * The crypto AEAD operation strings identifiers.
162  * It could be used in application command line.
163  */
164 const char *
165 rte_crypto_aead_operation_strings[] = {
166 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
167 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
168 };
169 
170 /**
171  * Asymmetric crypto transform operation strings identifiers.
172  */
173 const char *rte_crypto_asym_xform_strings[] = {
174 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
175 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
176 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
177 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
178 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
179 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
180 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
181 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
182 };
183 
184 /**
185  * Asymmetric crypto operation strings identifiers.
186  */
187 const char *rte_crypto_asym_op_strings[] = {
188 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
189 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
190 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
191 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify",
192 	[RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]	= "priv_key_generate",
193 	[RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
194 	[RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
195 };
196 
197 /**
198  * The private data structure stored in the sym session mempool private data.
199  */
200 struct rte_cryptodev_sym_session_pool_private_data {
201 	uint16_t nb_drivers;
202 	/**< number of elements in sess_data array */
203 	uint16_t user_data_sz;
204 	/**< session user data will be placed after sess_data */
205 };
206 
207 /**
208  * The private data structure stored in the asym session mempool private data.
209  */
210 struct rte_cryptodev_asym_session_pool_private_data {
211 	uint16_t max_priv_session_sz;
212 	/**< Size of private session data used when creating mempool */
213 	uint16_t user_data_sz;
214 	/**< Session user data will be placed after sess_private_data */
215 };
216 
217 int
218 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
219 		const char *algo_string)
220 {
221 	unsigned int i;
222 
223 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
224 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
225 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
226 			return 0;
227 		}
228 	}
229 
230 	/* Invalid string */
231 	return -1;
232 }
233 
234 int
235 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
236 		const char *algo_string)
237 {
238 	unsigned int i;
239 
240 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
241 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
242 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
243 			return 0;
244 		}
245 	}
246 
247 	/* Invalid string */
248 	return -1;
249 }
250 
251 int
252 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
253 		const char *algo_string)
254 {
255 	unsigned int i;
256 
257 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
258 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
259 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
260 			return 0;
261 		}
262 	}
263 
264 	/* Invalid string */
265 	return -1;
266 }
267 
268 int
269 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
270 		const char *xform_string)
271 {
272 	unsigned int i;
273 
274 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
275 		if (strcmp(xform_string,
276 			rte_crypto_asym_xform_strings[i]) == 0) {
277 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
278 			return 0;
279 		}
280 	}
281 
282 	/* Invalid string */
283 	return -1;
284 }
285 
286 /**
287  * The crypto auth operation strings identifiers.
288  * It could be used in application command line.
289  */
290 const char *
291 rte_crypto_auth_operation_strings[] = {
292 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
293 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
294 };
295 
296 const struct rte_cryptodev_symmetric_capability *
297 rte_cryptodev_sym_capability_get(uint8_t dev_id,
298 		const struct rte_cryptodev_sym_capability_idx *idx)
299 {
300 	const struct rte_cryptodev_capabilities *capability;
301 	struct rte_cryptodev_info dev_info;
302 	int i = 0;
303 
304 	rte_cryptodev_info_get(dev_id, &dev_info);
305 
306 	while ((capability = &dev_info.capabilities[i++])->op !=
307 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
308 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
309 			continue;
310 
311 		if (capability->sym.xform_type != idx->type)
312 			continue;
313 
314 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
315 			capability->sym.auth.algo == idx->algo.auth)
316 			return &capability->sym;
317 
318 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
319 			capability->sym.cipher.algo == idx->algo.cipher)
320 			return &capability->sym;
321 
322 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
323 				capability->sym.aead.algo == idx->algo.aead)
324 			return &capability->sym;
325 	}
326 
327 	return NULL;
328 }
329 
330 static int
331 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
332 {
333 	unsigned int next_size;
334 
335 	/* Check lower/upper bounds */
336 	if (size < range->min)
337 		return -1;
338 
339 	if (size > range->max)
340 		return -1;
341 
342 	/* If range is actually only one value, size is correct */
343 	if (range->increment == 0)
344 		return 0;
345 
346 	/* Check if value is one of the supported sizes */
347 	for (next_size = range->min; next_size <= range->max;
348 			next_size += range->increment)
349 		if (size == next_size)
350 			return 0;
351 
352 	return -1;
353 }
354 
355 const struct rte_cryptodev_asymmetric_xform_capability *
356 rte_cryptodev_asym_capability_get(uint8_t dev_id,
357 		const struct rte_cryptodev_asym_capability_idx *idx)
358 {
359 	const struct rte_cryptodev_capabilities *capability;
360 	struct rte_cryptodev_info dev_info;
361 	unsigned int i = 0;
362 
363 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
364 	rte_cryptodev_info_get(dev_id, &dev_info);
365 
366 	while ((capability = &dev_info.capabilities[i++])->op !=
367 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
368 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
369 			continue;
370 
371 		if (capability->asym.xform_capa.xform_type == idx->type)
372 			return &capability->asym.xform_capa;
373 	}
374 	return NULL;
375 };
376 
377 int
378 rte_cryptodev_sym_capability_check_cipher(
379 		const struct rte_cryptodev_symmetric_capability *capability,
380 		uint16_t key_size, uint16_t iv_size)
381 {
382 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
383 		return -1;
384 
385 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
386 		return -1;
387 
388 	return 0;
389 }
390 
391 int
392 rte_cryptodev_sym_capability_check_auth(
393 		const struct rte_cryptodev_symmetric_capability *capability,
394 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
395 {
396 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
397 		return -1;
398 
399 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
400 		return -1;
401 
402 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
403 		return -1;
404 
405 	return 0;
406 }
407 
408 int
409 rte_cryptodev_sym_capability_check_aead(
410 		const struct rte_cryptodev_symmetric_capability *capability,
411 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
412 		uint16_t iv_size)
413 {
414 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
415 		return -1;
416 
417 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
418 		return -1;
419 
420 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
421 		return -1;
422 
423 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
424 		return -1;
425 
426 	return 0;
427 }
428 int
429 rte_cryptodev_asym_xform_capability_check_optype(
430 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
431 	enum rte_crypto_asym_op_type op_type)
432 {
433 	if (capability->op_types & (1 << op_type))
434 		return 1;
435 
436 	return 0;
437 }
438 
439 int
440 rte_cryptodev_asym_xform_capability_check_modlen(
441 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
442 	uint16_t modlen)
443 {
444 	/* no need to check for limits, if min or max = 0 */
445 	if (capability->modlen.min != 0) {
446 		if (modlen < capability->modlen.min)
447 			return -1;
448 	}
449 
450 	if (capability->modlen.max != 0) {
451 		if (modlen > capability->modlen.max)
452 			return -1;
453 	}
454 
455 	/* in any case, check if given modlen is module increment */
456 	if (capability->modlen.increment != 0) {
457 		if (modlen % (capability->modlen.increment))
458 			return -1;
459 	}
460 
461 	return 0;
462 }
463 
464 /* spinlock for crypto device enq callbacks */
465 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
466 
467 static void
468 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
469 {
470 	struct rte_cryptodev_cb_rcu *list;
471 	struct rte_cryptodev_cb *cb, *next;
472 	uint16_t qp_id;
473 
474 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
475 		return;
476 
477 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
478 		list = &dev->enq_cbs[qp_id];
479 		cb = list->next;
480 		while (cb != NULL) {
481 			next = cb->next;
482 			rte_free(cb);
483 			cb = next;
484 		}
485 
486 		rte_free(list->qsbr);
487 	}
488 
489 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
490 		list = &dev->deq_cbs[qp_id];
491 		cb = list->next;
492 		while (cb != NULL) {
493 			next = cb->next;
494 			rte_free(cb);
495 			cb = next;
496 		}
497 
498 		rte_free(list->qsbr);
499 	}
500 
501 	rte_free(dev->enq_cbs);
502 	dev->enq_cbs = NULL;
503 	rte_free(dev->deq_cbs);
504 	dev->deq_cbs = NULL;
505 }
506 
507 static int
508 cryptodev_cb_init(struct rte_cryptodev *dev)
509 {
510 	struct rte_cryptodev_cb_rcu *list;
511 	struct rte_rcu_qsbr *qsbr;
512 	uint16_t qp_id;
513 	size_t size;
514 
515 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
516 	const uint32_t max_threads = 1;
517 
518 	dev->enq_cbs = rte_zmalloc(NULL,
519 				   sizeof(struct rte_cryptodev_cb_rcu) *
520 				   dev->data->nb_queue_pairs, 0);
521 	if (dev->enq_cbs == NULL) {
522 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
523 		return -ENOMEM;
524 	}
525 
526 	dev->deq_cbs = rte_zmalloc(NULL,
527 				   sizeof(struct rte_cryptodev_cb_rcu) *
528 				   dev->data->nb_queue_pairs, 0);
529 	if (dev->deq_cbs == NULL) {
530 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
531 		rte_free(dev->enq_cbs);
532 		return -ENOMEM;
533 	}
534 
535 	/* Create RCU QSBR variable */
536 	size = rte_rcu_qsbr_get_memsize(max_threads);
537 
538 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
539 		list = &dev->enq_cbs[qp_id];
540 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
541 		if (qsbr == NULL) {
542 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
543 				"queue_pair_id=%d", qp_id);
544 			goto cb_init_err;
545 		}
546 
547 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
548 			CDEV_LOG_ERR("Failed to initialize for RCU on "
549 				"queue_pair_id=%d", qp_id);
550 			goto cb_init_err;
551 		}
552 
553 		list->qsbr = qsbr;
554 	}
555 
556 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
557 		list = &dev->deq_cbs[qp_id];
558 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
559 		if (qsbr == NULL) {
560 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
561 				"queue_pair_id=%d", qp_id);
562 			goto cb_init_err;
563 		}
564 
565 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
566 			CDEV_LOG_ERR("Failed to initialize for RCU on "
567 				"queue_pair_id=%d", qp_id);
568 			goto cb_init_err;
569 		}
570 
571 		list->qsbr = qsbr;
572 	}
573 
574 	return 0;
575 
576 cb_init_err:
577 	cryptodev_cb_cleanup(dev);
578 	return -ENOMEM;
579 }
580 
581 const char *
582 rte_cryptodev_get_feature_name(uint64_t flag)
583 {
584 	switch (flag) {
585 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
586 		return "SYMMETRIC_CRYPTO";
587 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
588 		return "ASYMMETRIC_CRYPTO";
589 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
590 		return "SYM_OPERATION_CHAINING";
591 	case RTE_CRYPTODEV_FF_CPU_SSE:
592 		return "CPU_SSE";
593 	case RTE_CRYPTODEV_FF_CPU_AVX:
594 		return "CPU_AVX";
595 	case RTE_CRYPTODEV_FF_CPU_AVX2:
596 		return "CPU_AVX2";
597 	case RTE_CRYPTODEV_FF_CPU_AVX512:
598 		return "CPU_AVX512";
599 	case RTE_CRYPTODEV_FF_CPU_AESNI:
600 		return "CPU_AESNI";
601 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
602 		return "HW_ACCELERATED";
603 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
604 		return "IN_PLACE_SGL";
605 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
606 		return "OOP_SGL_IN_SGL_OUT";
607 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
608 		return "OOP_SGL_IN_LB_OUT";
609 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
610 		return "OOP_LB_IN_SGL_OUT";
611 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
612 		return "OOP_LB_IN_LB_OUT";
613 	case RTE_CRYPTODEV_FF_CPU_NEON:
614 		return "CPU_NEON";
615 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
616 		return "CPU_ARM_CE";
617 	case RTE_CRYPTODEV_FF_SECURITY:
618 		return "SECURITY_PROTOCOL";
619 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
620 		return "RSA_PRIV_OP_KEY_EXP";
621 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
622 		return "RSA_PRIV_OP_KEY_QT";
623 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
624 		return "DIGEST_ENCRYPTED";
625 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
626 		return "SYM_CPU_CRYPTO";
627 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
628 		return "ASYM_SESSIONLESS";
629 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
630 		return "SYM_SESSIONLESS";
631 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
632 		return "NON_BYTE_ALIGNED_DATA";
633 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
634 		return "CIPHER_MULTIPLE_DATA_UNITS";
635 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
636 		return "CIPHER_WRAPPED_KEY";
637 	default:
638 		return NULL;
639 	}
640 }
641 
642 struct rte_cryptodev *
643 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
644 {
645 	return &cryptodev_globals.devs[dev_id];
646 }
647 
648 struct rte_cryptodev *
649 rte_cryptodev_pmd_get_named_dev(const char *name)
650 {
651 	struct rte_cryptodev *dev;
652 	unsigned int i;
653 
654 	if (name == NULL)
655 		return NULL;
656 
657 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
658 		dev = &cryptodev_globals.devs[i];
659 
660 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
661 				(strcmp(dev->data->name, name) == 0))
662 			return dev;
663 	}
664 
665 	return NULL;
666 }
667 
668 static inline uint8_t
669 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
670 {
671 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
672 			rte_crypto_devices[dev_id].data == NULL)
673 		return 0;
674 
675 	return 1;
676 }
677 
678 unsigned int
679 rte_cryptodev_is_valid_dev(uint8_t dev_id)
680 {
681 	struct rte_cryptodev *dev = NULL;
682 
683 	if (!rte_cryptodev_is_valid_device_data(dev_id))
684 		return 0;
685 
686 	dev = rte_cryptodev_pmd_get_dev(dev_id);
687 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
688 		return 0;
689 	else
690 		return 1;
691 }
692 
693 
694 int
695 rte_cryptodev_get_dev_id(const char *name)
696 {
697 	unsigned i;
698 
699 	if (name == NULL)
700 		return -1;
701 
702 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
703 		if (!rte_cryptodev_is_valid_device_data(i))
704 			continue;
705 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
706 				== 0) &&
707 				(cryptodev_globals.devs[i].attached ==
708 						RTE_CRYPTODEV_ATTACHED))
709 			return i;
710 	}
711 
712 	return -1;
713 }
714 
715 uint8_t
716 rte_cryptodev_count(void)
717 {
718 	return cryptodev_globals.nb_devs;
719 }
720 
721 uint8_t
722 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
723 {
724 	uint8_t i, dev_count = 0;
725 
726 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
727 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
728 			cryptodev_globals.devs[i].attached ==
729 					RTE_CRYPTODEV_ATTACHED)
730 			dev_count++;
731 
732 	return dev_count;
733 }
734 
735 uint8_t
736 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
737 	uint8_t nb_devices)
738 {
739 	uint8_t i, count = 0;
740 	struct rte_cryptodev *devs = cryptodev_globals.devs;
741 
742 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
743 		if (!rte_cryptodev_is_valid_device_data(i))
744 			continue;
745 
746 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
747 			int cmp;
748 
749 			cmp = strncmp(devs[i].device->driver->name,
750 					driver_name,
751 					strlen(driver_name) + 1);
752 
753 			if (cmp == 0)
754 				devices[count++] = devs[i].data->dev_id;
755 		}
756 	}
757 
758 	return count;
759 }
760 
761 void *
762 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
763 {
764 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
765 			(rte_crypto_devices[dev_id].feature_flags &
766 			RTE_CRYPTODEV_FF_SECURITY))
767 		return rte_crypto_devices[dev_id].security_ctx;
768 
769 	return NULL;
770 }
771 
772 int
773 rte_cryptodev_socket_id(uint8_t dev_id)
774 {
775 	struct rte_cryptodev *dev;
776 
777 	if (!rte_cryptodev_is_valid_dev(dev_id))
778 		return -1;
779 
780 	dev = rte_cryptodev_pmd_get_dev(dev_id);
781 
782 	return dev->data->socket_id;
783 }
784 
785 static inline int
786 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
787 		int socket_id)
788 {
789 	char mz_name[RTE_MEMZONE_NAMESIZE];
790 	const struct rte_memzone *mz;
791 	int n;
792 
793 	/* generate memzone name */
794 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
795 	if (n >= (int)sizeof(mz_name))
796 		return -EINVAL;
797 
798 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
799 		mz = rte_memzone_reserve(mz_name,
800 				sizeof(struct rte_cryptodev_data),
801 				socket_id, 0);
802 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
803 				mz_name, mz);
804 	} else {
805 		mz = rte_memzone_lookup(mz_name);
806 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
807 				mz_name, mz);
808 	}
809 
810 	if (mz == NULL)
811 		return -ENOMEM;
812 
813 	*data = mz->addr;
814 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
815 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
816 
817 	return 0;
818 }
819 
820 static inline int
821 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
822 {
823 	char mz_name[RTE_MEMZONE_NAMESIZE];
824 	const struct rte_memzone *mz;
825 	int n;
826 
827 	/* generate memzone name */
828 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
829 	if (n >= (int)sizeof(mz_name))
830 		return -EINVAL;
831 
832 	mz = rte_memzone_lookup(mz_name);
833 	if (mz == NULL)
834 		return -ENOMEM;
835 
836 	RTE_ASSERT(*data == mz->addr);
837 	*data = NULL;
838 
839 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
840 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
841 				mz_name, mz);
842 		return rte_memzone_free(mz);
843 	} else {
844 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
845 				mz_name, mz);
846 	}
847 
848 	return 0;
849 }
850 
851 static uint8_t
852 rte_cryptodev_find_free_device_index(void)
853 {
854 	uint8_t dev_id;
855 
856 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
857 		if (rte_crypto_devices[dev_id].attached ==
858 				RTE_CRYPTODEV_DETACHED)
859 			return dev_id;
860 	}
861 	return RTE_CRYPTO_MAX_DEVS;
862 }
863 
864 struct rte_cryptodev *
865 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
866 {
867 	struct rte_cryptodev *cryptodev;
868 	uint8_t dev_id;
869 
870 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
871 		CDEV_LOG_ERR("Crypto device with name %s already "
872 				"allocated!", name);
873 		return NULL;
874 	}
875 
876 	dev_id = rte_cryptodev_find_free_device_index();
877 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
878 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
879 		return NULL;
880 	}
881 
882 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
883 
884 	if (cryptodev->data == NULL) {
885 		struct rte_cryptodev_data **cryptodev_data =
886 				&cryptodev_globals.data[dev_id];
887 
888 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
889 				socket_id);
890 
891 		if (retval < 0 || *cryptodev_data == NULL)
892 			return NULL;
893 
894 		cryptodev->data = *cryptodev_data;
895 
896 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
897 			strlcpy(cryptodev->data->name, name,
898 				RTE_CRYPTODEV_NAME_MAX_LEN);
899 
900 			cryptodev->data->dev_id = dev_id;
901 			cryptodev->data->socket_id = socket_id;
902 			cryptodev->data->dev_started = 0;
903 			CDEV_LOG_DEBUG("PRIMARY:init data");
904 		}
905 
906 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
907 				cryptodev->data->name,
908 				cryptodev->data->dev_id,
909 				cryptodev->data->socket_id,
910 				cryptodev->data->dev_started);
911 
912 		/* init user callbacks */
913 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
914 
915 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
916 
917 		cryptodev_globals.nb_devs++;
918 	}
919 
920 	return cryptodev;
921 }
922 
923 int
924 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
925 {
926 	int ret;
927 	uint8_t dev_id;
928 
929 	if (cryptodev == NULL)
930 		return -EINVAL;
931 
932 	dev_id = cryptodev->data->dev_id;
933 
934 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
935 
936 	/* Close device only if device operations have been set */
937 	if (cryptodev->dev_ops) {
938 		ret = rte_cryptodev_close(dev_id);
939 		if (ret < 0)
940 			return ret;
941 	}
942 
943 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
944 	if (ret < 0)
945 		return ret;
946 
947 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
948 	cryptodev_globals.nb_devs--;
949 	return 0;
950 }
951 
952 uint16_t
953 rte_cryptodev_queue_pair_count(uint8_t dev_id)
954 {
955 	struct rte_cryptodev *dev;
956 
957 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
958 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
959 		return 0;
960 	}
961 
962 	dev = &rte_crypto_devices[dev_id];
963 	return dev->data->nb_queue_pairs;
964 }
965 
966 static int
967 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
968 		int socket_id)
969 {
970 	struct rte_cryptodev_info dev_info;
971 	void **qp;
972 	unsigned i;
973 
974 	if ((dev == NULL) || (nb_qpairs < 1)) {
975 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
976 							dev, nb_qpairs);
977 		return -EINVAL;
978 	}
979 
980 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
981 			nb_qpairs, dev->data->dev_id);
982 
983 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
984 
985 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
986 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
987 
988 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
989 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
990 				nb_qpairs, dev->data->dev_id);
991 	    return -EINVAL;
992 	}
993 
994 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
995 		dev->data->queue_pairs = rte_zmalloc_socket(
996 				"cryptodev->queue_pairs",
997 				sizeof(dev->data->queue_pairs[0]) *
998 				dev_info.max_nb_queue_pairs,
999 				RTE_CACHE_LINE_SIZE, socket_id);
1000 
1001 		if (dev->data->queue_pairs == NULL) {
1002 			dev->data->nb_queue_pairs = 0;
1003 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1004 							"nb_queues %u",
1005 							nb_qpairs);
1006 			return -(ENOMEM);
1007 		}
1008 	} else { /* re-configure */
1009 		int ret;
1010 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1011 
1012 		qp = dev->data->queue_pairs;
1013 
1014 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
1015 				-ENOTSUP);
1016 
1017 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1018 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1019 			if (ret < 0)
1020 				return ret;
1021 			qp[i] = NULL;
1022 		}
1023 
1024 	}
1025 	dev->data->nb_queue_pairs = nb_qpairs;
1026 	return 0;
1027 }
1028 
1029 int
1030 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1031 {
1032 	struct rte_cryptodev *dev;
1033 	int diag;
1034 
1035 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1036 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1037 		return -EINVAL;
1038 	}
1039 
1040 	dev = &rte_crypto_devices[dev_id];
1041 
1042 	if (dev->data->dev_started) {
1043 		CDEV_LOG_ERR(
1044 		    "device %d must be stopped to allow configuration", dev_id);
1045 		return -EBUSY;
1046 	}
1047 
1048 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1049 
1050 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1051 	cryptodev_cb_cleanup(dev);
1052 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1053 
1054 	/* Setup new number of queue pairs and reconfigure device. */
1055 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1056 			config->socket_id);
1057 	if (diag != 0) {
1058 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1059 				dev_id, diag);
1060 		return diag;
1061 	}
1062 
1063 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1064 	diag = cryptodev_cb_init(dev);
1065 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1066 	if (diag) {
1067 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1068 		return diag;
1069 	}
1070 
1071 	rte_cryptodev_trace_configure(dev_id, config);
1072 	return (*dev->dev_ops->dev_configure)(dev, config);
1073 }
1074 
1075 int
1076 rte_cryptodev_start(uint8_t dev_id)
1077 {
1078 	struct rte_cryptodev *dev;
1079 	int diag;
1080 
1081 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1082 
1083 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1084 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1085 		return -EINVAL;
1086 	}
1087 
1088 	dev = &rte_crypto_devices[dev_id];
1089 
1090 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1091 
1092 	if (dev->data->dev_started != 0) {
1093 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1094 			dev_id);
1095 		return 0;
1096 	}
1097 
1098 	diag = (*dev->dev_ops->dev_start)(dev);
1099 	/* expose selection of PMD fast-path functions */
1100 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1101 
1102 	rte_cryptodev_trace_start(dev_id, diag);
1103 	if (diag == 0)
1104 		dev->data->dev_started = 1;
1105 	else
1106 		return diag;
1107 
1108 	return 0;
1109 }
1110 
1111 void
1112 rte_cryptodev_stop(uint8_t dev_id)
1113 {
1114 	struct rte_cryptodev *dev;
1115 
1116 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1117 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1118 		return;
1119 	}
1120 
1121 	dev = &rte_crypto_devices[dev_id];
1122 
1123 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1124 
1125 	if (dev->data->dev_started == 0) {
1126 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1127 			dev_id);
1128 		return;
1129 	}
1130 
1131 	/* point fast-path functions to dummy ones */
1132 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1133 
1134 	(*dev->dev_ops->dev_stop)(dev);
1135 	rte_cryptodev_trace_stop(dev_id);
1136 	dev->data->dev_started = 0;
1137 }
1138 
1139 int
1140 rte_cryptodev_close(uint8_t dev_id)
1141 {
1142 	struct rte_cryptodev *dev;
1143 	int retval;
1144 
1145 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1146 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1147 		return -1;
1148 	}
1149 
1150 	dev = &rte_crypto_devices[dev_id];
1151 
1152 	/* Device must be stopped before it can be closed */
1153 	if (dev->data->dev_started == 1) {
1154 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1155 				dev_id);
1156 		return -EBUSY;
1157 	}
1158 
1159 	/* We can't close the device if there are outstanding sessions in use */
1160 	if (dev->data->session_pool != NULL) {
1161 		if (!rte_mempool_full(dev->data->session_pool)) {
1162 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1163 					"has sessions still in use, free "
1164 					"all sessions before calling close",
1165 					(unsigned)dev_id);
1166 			return -EBUSY;
1167 		}
1168 	}
1169 
1170 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1171 	retval = (*dev->dev_ops->dev_close)(dev);
1172 	rte_cryptodev_trace_close(dev_id, retval);
1173 
1174 	if (retval < 0)
1175 		return retval;
1176 
1177 	return 0;
1178 }
1179 
1180 int
1181 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1182 {
1183 	struct rte_cryptodev *dev;
1184 
1185 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1186 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1187 		return -EINVAL;
1188 	}
1189 
1190 	dev = &rte_crypto_devices[dev_id];
1191 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1192 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1193 		return -EINVAL;
1194 	}
1195 	void **qps = dev->data->queue_pairs;
1196 
1197 	if (qps[queue_pair_id])	{
1198 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1199 			queue_pair_id, dev_id);
1200 		return 1;
1201 	}
1202 
1203 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1204 		queue_pair_id, dev_id);
1205 
1206 	return 0;
1207 }
1208 
1209 int
1210 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1211 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1212 
1213 {
1214 	struct rte_cryptodev *dev;
1215 
1216 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1217 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1218 		return -EINVAL;
1219 	}
1220 
1221 	dev = &rte_crypto_devices[dev_id];
1222 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1223 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1224 		return -EINVAL;
1225 	}
1226 
1227 	if (!qp_conf) {
1228 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1229 		return -EINVAL;
1230 	}
1231 
1232 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1233 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1234 		CDEV_LOG_ERR("Invalid mempools\n");
1235 		return -EINVAL;
1236 	}
1237 
1238 	if (qp_conf->mp_session) {
1239 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1240 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1241 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1242 		struct rte_cryptodev_sym_session s = {0};
1243 
1244 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1245 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1246 				sizeof(*pool_priv)) {
1247 			CDEV_LOG_ERR("Invalid mempool\n");
1248 			return -EINVAL;
1249 		}
1250 
1251 		s.nb_drivers = pool_priv->nb_drivers;
1252 		s.user_data_sz = pool_priv->user_data_sz;
1253 
1254 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1255 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1256 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1257 				obj_priv_size) {
1258 			CDEV_LOG_ERR("Invalid mempool\n");
1259 			return -EINVAL;
1260 		}
1261 	}
1262 
1263 	if (dev->data->dev_started) {
1264 		CDEV_LOG_ERR(
1265 		    "device %d must be stopped to allow configuration", dev_id);
1266 		return -EBUSY;
1267 	}
1268 
1269 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1270 
1271 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1272 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1273 			socket_id);
1274 }
1275 
1276 struct rte_cryptodev_cb *
1277 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1278 			       uint16_t qp_id,
1279 			       rte_cryptodev_callback_fn cb_fn,
1280 			       void *cb_arg)
1281 {
1282 	struct rte_cryptodev *dev;
1283 	struct rte_cryptodev_cb_rcu *list;
1284 	struct rte_cryptodev_cb *cb, *tail;
1285 
1286 	if (!cb_fn) {
1287 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1288 		rte_errno = EINVAL;
1289 		return NULL;
1290 	}
1291 
1292 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1293 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1294 		rte_errno = ENODEV;
1295 		return NULL;
1296 	}
1297 
1298 	dev = &rte_crypto_devices[dev_id];
1299 	if (qp_id >= dev->data->nb_queue_pairs) {
1300 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1301 		rte_errno = ENODEV;
1302 		return NULL;
1303 	}
1304 
1305 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1306 	if (cb == NULL) {
1307 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1308 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1309 		rte_errno = ENOMEM;
1310 		return NULL;
1311 	}
1312 
1313 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1314 
1315 	cb->fn = cb_fn;
1316 	cb->arg = cb_arg;
1317 
1318 	/* Add the callbacks in fifo order. */
1319 	list = &dev->enq_cbs[qp_id];
1320 	tail = list->next;
1321 
1322 	if (tail) {
1323 		while (tail->next)
1324 			tail = tail->next;
1325 		/* Stores to cb->fn and cb->param should complete before
1326 		 * cb is visible to data plane.
1327 		 */
1328 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1329 	} else {
1330 		/* Stores to cb->fn and cb->param should complete before
1331 		 * cb is visible to data plane.
1332 		 */
1333 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1334 	}
1335 
1336 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1337 
1338 	return cb;
1339 }
1340 
1341 int
1342 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1343 				  uint16_t qp_id,
1344 				  struct rte_cryptodev_cb *cb)
1345 {
1346 	struct rte_cryptodev *dev;
1347 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1348 	struct rte_cryptodev_cb_rcu *list;
1349 	int ret;
1350 
1351 	ret = -EINVAL;
1352 
1353 	if (!cb) {
1354 		CDEV_LOG_ERR("Callback is NULL");
1355 		return -EINVAL;
1356 	}
1357 
1358 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1359 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1360 		return -ENODEV;
1361 	}
1362 
1363 	dev = &rte_crypto_devices[dev_id];
1364 	if (qp_id >= dev->data->nb_queue_pairs) {
1365 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1366 		return -ENODEV;
1367 	}
1368 
1369 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1370 	if (dev->enq_cbs == NULL) {
1371 		CDEV_LOG_ERR("Callback not initialized");
1372 		goto cb_err;
1373 	}
1374 
1375 	list = &dev->enq_cbs[qp_id];
1376 	if (list == NULL) {
1377 		CDEV_LOG_ERR("Callback list is NULL");
1378 		goto cb_err;
1379 	}
1380 
1381 	if (list->qsbr == NULL) {
1382 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1383 		goto cb_err;
1384 	}
1385 
1386 	prev_cb = &list->next;
1387 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1388 		curr_cb = *prev_cb;
1389 		if (curr_cb == cb) {
1390 			/* Remove the user cb from the callback list. */
1391 			__atomic_store_n(prev_cb, curr_cb->next,
1392 				__ATOMIC_RELAXED);
1393 			ret = 0;
1394 			break;
1395 		}
1396 	}
1397 
1398 	if (!ret) {
1399 		/* Call sync with invalid thread id as this is part of
1400 		 * control plane API
1401 		 */
1402 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1403 		rte_free(cb);
1404 	}
1405 
1406 cb_err:
1407 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1408 	return ret;
1409 }
1410 
1411 struct rte_cryptodev_cb *
1412 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1413 			       uint16_t qp_id,
1414 			       rte_cryptodev_callback_fn cb_fn,
1415 			       void *cb_arg)
1416 {
1417 	struct rte_cryptodev *dev;
1418 	struct rte_cryptodev_cb_rcu *list;
1419 	struct rte_cryptodev_cb *cb, *tail;
1420 
1421 	if (!cb_fn) {
1422 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1423 		rte_errno = EINVAL;
1424 		return NULL;
1425 	}
1426 
1427 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1428 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1429 		rte_errno = ENODEV;
1430 		return NULL;
1431 	}
1432 
1433 	dev = &rte_crypto_devices[dev_id];
1434 	if (qp_id >= dev->data->nb_queue_pairs) {
1435 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1436 		rte_errno = ENODEV;
1437 		return NULL;
1438 	}
1439 
1440 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1441 	if (cb == NULL) {
1442 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1443 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1444 		rte_errno = ENOMEM;
1445 		return NULL;
1446 	}
1447 
1448 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1449 
1450 	cb->fn = cb_fn;
1451 	cb->arg = cb_arg;
1452 
1453 	/* Add the callbacks in fifo order. */
1454 	list = &dev->deq_cbs[qp_id];
1455 	tail = list->next;
1456 
1457 	if (tail) {
1458 		while (tail->next)
1459 			tail = tail->next;
1460 		/* Stores to cb->fn and cb->param should complete before
1461 		 * cb is visible to data plane.
1462 		 */
1463 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1464 	} else {
1465 		/* Stores to cb->fn and cb->param should complete before
1466 		 * cb is visible to data plane.
1467 		 */
1468 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1469 	}
1470 
1471 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1472 
1473 	return cb;
1474 }
1475 
1476 int
1477 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1478 				  uint16_t qp_id,
1479 				  struct rte_cryptodev_cb *cb)
1480 {
1481 	struct rte_cryptodev *dev;
1482 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1483 	struct rte_cryptodev_cb_rcu *list;
1484 	int ret;
1485 
1486 	ret = -EINVAL;
1487 
1488 	if (!cb) {
1489 		CDEV_LOG_ERR("Callback is NULL");
1490 		return -EINVAL;
1491 	}
1492 
1493 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1494 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1495 		return -ENODEV;
1496 	}
1497 
1498 	dev = &rte_crypto_devices[dev_id];
1499 	if (qp_id >= dev->data->nb_queue_pairs) {
1500 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1501 		return -ENODEV;
1502 	}
1503 
1504 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1505 	if (dev->enq_cbs == NULL) {
1506 		CDEV_LOG_ERR("Callback not initialized");
1507 		goto cb_err;
1508 	}
1509 
1510 	list = &dev->deq_cbs[qp_id];
1511 	if (list == NULL) {
1512 		CDEV_LOG_ERR("Callback list is NULL");
1513 		goto cb_err;
1514 	}
1515 
1516 	if (list->qsbr == NULL) {
1517 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1518 		goto cb_err;
1519 	}
1520 
1521 	prev_cb = &list->next;
1522 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1523 		curr_cb = *prev_cb;
1524 		if (curr_cb == cb) {
1525 			/* Remove the user cb from the callback list. */
1526 			__atomic_store_n(prev_cb, curr_cb->next,
1527 				__ATOMIC_RELAXED);
1528 			ret = 0;
1529 			break;
1530 		}
1531 	}
1532 
1533 	if (!ret) {
1534 		/* Call sync with invalid thread id as this is part of
1535 		 * control plane API
1536 		 */
1537 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1538 		rte_free(cb);
1539 	}
1540 
1541 cb_err:
1542 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1543 	return ret;
1544 }
1545 
1546 int
1547 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1548 {
1549 	struct rte_cryptodev *dev;
1550 
1551 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1552 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1553 		return -ENODEV;
1554 	}
1555 
1556 	if (stats == NULL) {
1557 		CDEV_LOG_ERR("Invalid stats ptr");
1558 		return -EINVAL;
1559 	}
1560 
1561 	dev = &rte_crypto_devices[dev_id];
1562 	memset(stats, 0, sizeof(*stats));
1563 
1564 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1565 	(*dev->dev_ops->stats_get)(dev, stats);
1566 	return 0;
1567 }
1568 
1569 void
1570 rte_cryptodev_stats_reset(uint8_t dev_id)
1571 {
1572 	struct rte_cryptodev *dev;
1573 
1574 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1575 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1576 		return;
1577 	}
1578 
1579 	dev = &rte_crypto_devices[dev_id];
1580 
1581 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1582 	(*dev->dev_ops->stats_reset)(dev);
1583 }
1584 
1585 void
1586 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1587 {
1588 	struct rte_cryptodev *dev;
1589 
1590 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1591 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1592 		return;
1593 	}
1594 
1595 	dev = &rte_crypto_devices[dev_id];
1596 
1597 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1598 
1599 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1600 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1601 
1602 	dev_info->driver_name = dev->device->driver->name;
1603 	dev_info->device = dev->device;
1604 }
1605 
1606 int
1607 rte_cryptodev_callback_register(uint8_t dev_id,
1608 			enum rte_cryptodev_event_type event,
1609 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1610 {
1611 	struct rte_cryptodev *dev;
1612 	struct rte_cryptodev_callback *user_cb;
1613 
1614 	if (!cb_fn)
1615 		return -EINVAL;
1616 
1617 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1618 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1619 		return -EINVAL;
1620 	}
1621 
1622 	dev = &rte_crypto_devices[dev_id];
1623 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1624 
1625 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1626 		if (user_cb->cb_fn == cb_fn &&
1627 			user_cb->cb_arg == cb_arg &&
1628 			user_cb->event == event) {
1629 			break;
1630 		}
1631 	}
1632 
1633 	/* create a new callback. */
1634 	if (user_cb == NULL) {
1635 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1636 				sizeof(struct rte_cryptodev_callback), 0);
1637 		if (user_cb != NULL) {
1638 			user_cb->cb_fn = cb_fn;
1639 			user_cb->cb_arg = cb_arg;
1640 			user_cb->event = event;
1641 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1642 		}
1643 	}
1644 
1645 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1646 	return (user_cb == NULL) ? -ENOMEM : 0;
1647 }
1648 
1649 int
1650 rte_cryptodev_callback_unregister(uint8_t dev_id,
1651 			enum rte_cryptodev_event_type event,
1652 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1653 {
1654 	int ret;
1655 	struct rte_cryptodev *dev;
1656 	struct rte_cryptodev_callback *cb, *next;
1657 
1658 	if (!cb_fn)
1659 		return -EINVAL;
1660 
1661 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1662 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1663 		return -EINVAL;
1664 	}
1665 
1666 	dev = &rte_crypto_devices[dev_id];
1667 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1668 
1669 	ret = 0;
1670 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1671 
1672 		next = TAILQ_NEXT(cb, next);
1673 
1674 		if (cb->cb_fn != cb_fn || cb->event != event ||
1675 				(cb->cb_arg != (void *)-1 &&
1676 				cb->cb_arg != cb_arg))
1677 			continue;
1678 
1679 		/*
1680 		 * if this callback is not executing right now,
1681 		 * then remove it.
1682 		 */
1683 		if (cb->active == 0) {
1684 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1685 			rte_free(cb);
1686 		} else {
1687 			ret = -EAGAIN;
1688 		}
1689 	}
1690 
1691 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1692 	return ret;
1693 }
1694 
1695 void
1696 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1697 	enum rte_cryptodev_event_type event)
1698 {
1699 	struct rte_cryptodev_callback *cb_lst;
1700 	struct rte_cryptodev_callback dev_cb;
1701 
1702 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1703 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1704 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1705 			continue;
1706 		dev_cb = *cb_lst;
1707 		cb_lst->active = 1;
1708 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1709 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1710 						dev_cb.cb_arg);
1711 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1712 		cb_lst->active = 0;
1713 	}
1714 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1715 }
1716 
1717 int
1718 rte_cryptodev_sym_session_init(uint8_t dev_id,
1719 		struct rte_cryptodev_sym_session *sess,
1720 		struct rte_crypto_sym_xform *xforms,
1721 		struct rte_mempool *mp)
1722 {
1723 	struct rte_cryptodev *dev;
1724 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1725 			dev_id);
1726 	uint8_t index;
1727 	int ret;
1728 
1729 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1730 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1731 		return -EINVAL;
1732 	}
1733 
1734 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1735 
1736 	if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1737 		return -EINVAL;
1738 
1739 	if (mp->elt_size < sess_priv_sz)
1740 		return -EINVAL;
1741 
1742 	index = dev->driver_id;
1743 	if (index >= sess->nb_drivers)
1744 		return -EINVAL;
1745 
1746 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1747 
1748 	if (sess->sess_data[index].refcnt == 0) {
1749 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1750 							sess, mp);
1751 		if (ret < 0) {
1752 			CDEV_LOG_ERR(
1753 				"dev_id %d failed to configure session details",
1754 				dev_id);
1755 			return ret;
1756 		}
1757 	}
1758 
1759 	rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1760 	sess->sess_data[index].refcnt++;
1761 	return 0;
1762 }
1763 
1764 struct rte_mempool *
1765 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1766 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1767 	int socket_id)
1768 {
1769 	struct rte_mempool *mp;
1770 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1771 	uint32_t obj_sz;
1772 
1773 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1774 	if (obj_sz > elt_size)
1775 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1776 				obj_sz);
1777 	else
1778 		obj_sz = elt_size;
1779 
1780 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1781 			(uint32_t)(sizeof(*pool_priv)),
1782 			NULL, NULL, NULL, NULL,
1783 			socket_id, 0);
1784 	if (mp == NULL) {
1785 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1786 			__func__, name, rte_errno);
1787 		return NULL;
1788 	}
1789 
1790 	pool_priv = rte_mempool_get_priv(mp);
1791 	if (!pool_priv) {
1792 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1793 			__func__, name);
1794 		rte_mempool_free(mp);
1795 		return NULL;
1796 	}
1797 
1798 	pool_priv->nb_drivers = nb_drivers;
1799 	pool_priv->user_data_sz = user_data_size;
1800 
1801 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1802 		elt_size, cache_size, user_data_size, mp);
1803 	return mp;
1804 }
1805 
1806 struct rte_mempool *
1807 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1808 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
1809 {
1810 	struct rte_mempool *mp;
1811 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1812 	uint32_t obj_sz, obj_sz_aligned;
1813 	uint8_t dev_id, priv_sz, max_priv_sz = 0;
1814 
1815 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
1816 		if (rte_cryptodev_is_valid_dev(dev_id)) {
1817 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
1818 			if (priv_sz > max_priv_sz)
1819 				max_priv_sz = priv_sz;
1820 		}
1821 	if (max_priv_sz == 0) {
1822 		CDEV_LOG_INFO("Could not set max private session size\n");
1823 		return NULL;
1824 	}
1825 
1826 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
1827 			user_data_size;
1828 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
1829 
1830 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
1831 			(uint32_t)(sizeof(*pool_priv)),
1832 			NULL, NULL, NULL, NULL,
1833 			socket_id, 0);
1834 	if (mp == NULL) {
1835 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1836 			__func__, name, rte_errno);
1837 		return NULL;
1838 	}
1839 
1840 	pool_priv = rte_mempool_get_priv(mp);
1841 	if (!pool_priv) {
1842 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1843 			__func__, name);
1844 		rte_mempool_free(mp);
1845 		return NULL;
1846 	}
1847 	pool_priv->max_priv_session_sz = max_priv_sz;
1848 	pool_priv->user_data_sz = user_data_size;
1849 
1850 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
1851 		user_data_size, cache_size, mp);
1852 	return mp;
1853 }
1854 
1855 static unsigned int
1856 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1857 {
1858 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1859 			sess->user_data_sz;
1860 }
1861 
1862 static uint8_t
1863 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1864 {
1865 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1866 
1867 	if (!mp)
1868 		return 0;
1869 
1870 	pool_priv = rte_mempool_get_priv(mp);
1871 
1872 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1873 			pool_priv->nb_drivers != nb_drivers ||
1874 			mp->elt_size <
1875 				rte_cryptodev_sym_get_header_session_size()
1876 				+ pool_priv->user_data_sz)
1877 		return 0;
1878 
1879 	return 1;
1880 }
1881 
1882 struct rte_cryptodev_sym_session *
1883 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1884 {
1885 	struct rte_cryptodev_sym_session *sess;
1886 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1887 
1888 	if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1889 		CDEV_LOG_ERR("Invalid mempool\n");
1890 		return NULL;
1891 	}
1892 
1893 	pool_priv = rte_mempool_get_priv(mp);
1894 
1895 	/* Allocate a session structure from the session pool */
1896 	if (rte_mempool_get(mp, (void **)&sess)) {
1897 		CDEV_LOG_ERR("couldn't get object from session mempool");
1898 		return NULL;
1899 	}
1900 
1901 	sess->nb_drivers = pool_priv->nb_drivers;
1902 	sess->user_data_sz = pool_priv->user_data_sz;
1903 	sess->opaque_data = 0;
1904 
1905 	/* Clear device session pointer.
1906 	 * Include the flag indicating presence of user data
1907 	 */
1908 	memset(sess->sess_data, 0,
1909 			rte_cryptodev_sym_session_data_size(sess));
1910 
1911 	rte_cryptodev_trace_sym_session_create(mp, sess);
1912 	return sess;
1913 }
1914 
1915 int
1916 rte_cryptodev_asym_session_create(uint8_t dev_id,
1917 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1918 		void **session)
1919 {
1920 	struct rte_cryptodev_asym_session *sess;
1921 	uint32_t session_priv_data_sz;
1922 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1923 	unsigned int session_header_size =
1924 			rte_cryptodev_asym_get_header_session_size();
1925 	struct rte_cryptodev *dev;
1926 	int ret;
1927 
1928 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1929 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1930 		return -EINVAL;
1931 	}
1932 
1933 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1934 
1935 	if (dev == NULL)
1936 		return -EINVAL;
1937 
1938 	if (!mp) {
1939 		CDEV_LOG_ERR("invalid mempool\n");
1940 		return -EINVAL;
1941 	}
1942 
1943 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
1944 			dev_id);
1945 	pool_priv = rte_mempool_get_priv(mp);
1946 
1947 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
1948 		CDEV_LOG_DEBUG(
1949 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
1950 		return -EINVAL;
1951 	}
1952 
1953 	/* Verify if provided mempool can hold elements big enough. */
1954 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
1955 		CDEV_LOG_ERR(
1956 			"mempool elements too small to hold session objects");
1957 		return -EINVAL;
1958 	}
1959 
1960 	/* Allocate a session structure from the session pool */
1961 	if (rte_mempool_get(mp, session)) {
1962 		CDEV_LOG_ERR("couldn't get object from session mempool");
1963 		return -ENOMEM;
1964 	}
1965 
1966 	sess = *session;
1967 	sess->driver_id = dev->driver_id;
1968 	sess->user_data_sz = pool_priv->user_data_sz;
1969 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
1970 
1971 	/* Clear device session pointer.*/
1972 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
1973 
1974 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure, -ENOTSUP);
1975 
1976 	if (sess->sess_private_data[0] == 0) {
1977 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
1978 		if (ret < 0) {
1979 			CDEV_LOG_ERR(
1980 				"dev_id %d failed to configure session details",
1981 				dev_id);
1982 			return ret;
1983 		}
1984 	}
1985 
1986 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
1987 	return 0;
1988 }
1989 
1990 int
1991 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1992 		struct rte_cryptodev_sym_session *sess)
1993 {
1994 	struct rte_cryptodev *dev;
1995 	uint8_t driver_id;
1996 
1997 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1998 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1999 		return -EINVAL;
2000 	}
2001 
2002 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2003 
2004 	if (dev == NULL || sess == NULL)
2005 		return -EINVAL;
2006 
2007 	driver_id = dev->driver_id;
2008 	if (sess->sess_data[driver_id].refcnt == 0)
2009 		return 0;
2010 	if (--sess->sess_data[driver_id].refcnt != 0)
2011 		return -EBUSY;
2012 
2013 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
2014 
2015 	dev->dev_ops->sym_session_clear(dev, sess);
2016 
2017 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
2018 	return 0;
2019 }
2020 
2021 int
2022 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
2023 {
2024 	uint8_t i;
2025 	struct rte_mempool *sess_mp;
2026 
2027 	if (sess == NULL)
2028 		return -EINVAL;
2029 
2030 	/* Check that all device private data has been freed */
2031 	for (i = 0; i < sess->nb_drivers; i++) {
2032 		if (sess->sess_data[i].refcnt != 0)
2033 			return -EBUSY;
2034 	}
2035 
2036 	/* Return session to mempool */
2037 	sess_mp = rte_mempool_from_obj(sess);
2038 	rte_mempool_put(sess_mp, sess);
2039 
2040 	rte_cryptodev_trace_sym_session_free(sess);
2041 	return 0;
2042 }
2043 
2044 int
2045 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2046 {
2047 	struct rte_mempool *sess_mp;
2048 	struct rte_cryptodev *dev;
2049 
2050 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2051 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2052 		return -EINVAL;
2053 	}
2054 
2055 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2056 
2057 	if (dev == NULL || sess == NULL)
2058 		return -EINVAL;
2059 
2060 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
2061 
2062 	dev->dev_ops->asym_session_clear(dev, sess);
2063 
2064 	/* Return session to mempool */
2065 	sess_mp = rte_mempool_from_obj(sess);
2066 	rte_mempool_put(sess_mp, sess);
2067 
2068 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2069 	return 0;
2070 }
2071 
2072 unsigned int
2073 rte_cryptodev_sym_get_header_session_size(void)
2074 {
2075 	/*
2076 	 * Header contains pointers to the private data of all registered
2077 	 * drivers and all necessary information to ensure safely clear
2078 	 * or free al session.
2079 	 */
2080 	struct rte_cryptodev_sym_session s = {0};
2081 
2082 	s.nb_drivers = nb_drivers;
2083 
2084 	return (unsigned int)(sizeof(s) +
2085 			rte_cryptodev_sym_session_data_size(&s));
2086 }
2087 
2088 unsigned int
2089 rte_cryptodev_sym_get_existing_header_session_size(
2090 		struct rte_cryptodev_sym_session *sess)
2091 {
2092 	if (!sess)
2093 		return 0;
2094 	else
2095 		return (unsigned int)(sizeof(*sess) +
2096 				rte_cryptodev_sym_session_data_size(sess));
2097 }
2098 
2099 unsigned int
2100 rte_cryptodev_asym_get_header_session_size(void)
2101 {
2102 	return sizeof(struct rte_cryptodev_asym_session);
2103 }
2104 
2105 unsigned int
2106 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2107 {
2108 	struct rte_cryptodev *dev;
2109 	unsigned int priv_sess_size;
2110 
2111 	if (!rte_cryptodev_is_valid_dev(dev_id))
2112 		return 0;
2113 
2114 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2115 
2116 	if (*dev->dev_ops->sym_session_get_size == NULL)
2117 		return 0;
2118 
2119 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2120 
2121 	return priv_sess_size;
2122 }
2123 
2124 unsigned int
2125 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2126 {
2127 	struct rte_cryptodev *dev;
2128 	unsigned int priv_sess_size;
2129 
2130 	if (!rte_cryptodev_is_valid_dev(dev_id))
2131 		return 0;
2132 
2133 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2134 
2135 	if (*dev->dev_ops->asym_session_get_size == NULL)
2136 		return 0;
2137 
2138 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2139 
2140 	return priv_sess_size;
2141 }
2142 
2143 int
2144 rte_cryptodev_sym_session_set_user_data(
2145 					struct rte_cryptodev_sym_session *sess,
2146 					void *data,
2147 					uint16_t size)
2148 {
2149 	if (sess == NULL)
2150 		return -EINVAL;
2151 
2152 	if (sess->user_data_sz < size)
2153 		return -ENOMEM;
2154 
2155 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2156 	return 0;
2157 }
2158 
2159 void *
2160 rte_cryptodev_sym_session_get_user_data(
2161 					struct rte_cryptodev_sym_session *sess)
2162 {
2163 	if (sess == NULL || sess->user_data_sz == 0)
2164 		return NULL;
2165 
2166 	return (void *)(sess->sess_data + sess->nb_drivers);
2167 }
2168 
2169 int
2170 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2171 {
2172 	struct rte_cryptodev_asym_session *sess = session;
2173 	if (sess == NULL)
2174 		return -EINVAL;
2175 
2176 	if (sess->user_data_sz < size)
2177 		return -ENOMEM;
2178 
2179 	rte_memcpy(sess->sess_private_data +
2180 			sess->max_priv_data_sz,
2181 			data, size);
2182 	return 0;
2183 }
2184 
2185 void *
2186 rte_cryptodev_asym_session_get_user_data(void *session)
2187 {
2188 	struct rte_cryptodev_asym_session *sess = session;
2189 	if (sess == NULL || sess->user_data_sz == 0)
2190 		return NULL;
2191 
2192 	return (void *)(sess->sess_private_data +
2193 			sess->max_priv_data_sz);
2194 }
2195 
2196 static inline void
2197 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2198 {
2199 	uint32_t i;
2200 	for (i = 0; i < vec->num; i++)
2201 		vec->status[i] = errnum;
2202 }
2203 
2204 uint32_t
2205 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2206 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2207 	struct rte_crypto_sym_vec *vec)
2208 {
2209 	struct rte_cryptodev *dev;
2210 
2211 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2212 		sym_crypto_fill_status(vec, EINVAL);
2213 		return 0;
2214 	}
2215 
2216 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2217 
2218 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2219 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2220 		sym_crypto_fill_status(vec, ENOTSUP);
2221 		return 0;
2222 	}
2223 
2224 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2225 }
2226 
2227 int
2228 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2229 {
2230 	struct rte_cryptodev *dev;
2231 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2232 	int32_t priv_size;
2233 
2234 	if (!rte_cryptodev_is_valid_dev(dev_id))
2235 		return -EINVAL;
2236 
2237 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2238 
2239 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2240 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2241 		return -ENOTSUP;
2242 	}
2243 
2244 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2245 	if (priv_size < 0)
2246 		return -ENOTSUP;
2247 
2248 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2249 }
2250 
2251 int
2252 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2253 	struct rte_crypto_raw_dp_ctx *ctx,
2254 	enum rte_crypto_op_sess_type sess_type,
2255 	union rte_cryptodev_session_ctx session_ctx,
2256 	uint8_t is_update)
2257 {
2258 	struct rte_cryptodev *dev;
2259 
2260 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2261 		return -EINVAL;
2262 
2263 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2264 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2265 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2266 		return -ENOTSUP;
2267 
2268 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2269 			sess_type, session_ctx, is_update);
2270 }
2271 
2272 uint32_t
2273 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2274 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2275 	void **user_data, int *enqueue_status)
2276 {
2277 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2278 			ofs, user_data, enqueue_status);
2279 }
2280 
2281 int
2282 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2283 		uint32_t n)
2284 {
2285 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2286 }
2287 
2288 uint32_t
2289 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2290 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2291 	uint32_t max_nb_to_dequeue,
2292 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2293 	void **out_user_data, uint8_t is_user_data_array,
2294 	uint32_t *n_success_jobs, int *status)
2295 {
2296 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2297 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2298 		out_user_data, is_user_data_array, n_success_jobs, status);
2299 }
2300 
2301 int
2302 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2303 		uint32_t n)
2304 {
2305 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2306 }
2307 
2308 /** Initialise rte_crypto_op mempool element */
2309 static void
2310 rte_crypto_op_init(struct rte_mempool *mempool,
2311 		void *opaque_arg,
2312 		void *_op_data,
2313 		__rte_unused unsigned i)
2314 {
2315 	struct rte_crypto_op *op = _op_data;
2316 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2317 
2318 	memset(_op_data, 0, mempool->elt_size);
2319 
2320 	__rte_crypto_op_reset(op, type);
2321 
2322 	op->phys_addr = rte_mem_virt2iova(_op_data);
2323 	op->mempool = mempool;
2324 }
2325 
2326 
2327 struct rte_mempool *
2328 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2329 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2330 		int socket_id)
2331 {
2332 	struct rte_crypto_op_pool_private *priv;
2333 
2334 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2335 			priv_size;
2336 
2337 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2338 		elt_size += sizeof(struct rte_crypto_sym_op);
2339 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2340 		elt_size += sizeof(struct rte_crypto_asym_op);
2341 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2342 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2343 		                    sizeof(struct rte_crypto_asym_op));
2344 	} else {
2345 		CDEV_LOG_ERR("Invalid op_type\n");
2346 		return NULL;
2347 	}
2348 
2349 	/* lookup mempool in case already allocated */
2350 	struct rte_mempool *mp = rte_mempool_lookup(name);
2351 
2352 	if (mp != NULL) {
2353 		priv = (struct rte_crypto_op_pool_private *)
2354 				rte_mempool_get_priv(mp);
2355 
2356 		if (mp->elt_size != elt_size ||
2357 				mp->cache_size < cache_size ||
2358 				mp->size < nb_elts ||
2359 				priv->priv_size <  priv_size) {
2360 			mp = NULL;
2361 			CDEV_LOG_ERR("Mempool %s already exists but with "
2362 					"incompatible parameters", name);
2363 			return NULL;
2364 		}
2365 		return mp;
2366 	}
2367 
2368 	mp = rte_mempool_create(
2369 			name,
2370 			nb_elts,
2371 			elt_size,
2372 			cache_size,
2373 			sizeof(struct rte_crypto_op_pool_private),
2374 			NULL,
2375 			NULL,
2376 			rte_crypto_op_init,
2377 			&type,
2378 			socket_id,
2379 			0);
2380 
2381 	if (mp == NULL) {
2382 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2383 		return NULL;
2384 	}
2385 
2386 	priv = (struct rte_crypto_op_pool_private *)
2387 			rte_mempool_get_priv(mp);
2388 
2389 	priv->priv_size = priv_size;
2390 	priv->type = type;
2391 
2392 	return mp;
2393 }
2394 
2395 int
2396 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2397 {
2398 	struct rte_cryptodev *dev = NULL;
2399 	uint32_t i = 0;
2400 
2401 	if (name == NULL)
2402 		return -EINVAL;
2403 
2404 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2405 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2406 				"%s_%u", dev_name_prefix, i);
2407 
2408 		if (ret < 0)
2409 			return ret;
2410 
2411 		dev = rte_cryptodev_pmd_get_named_dev(name);
2412 		if (!dev)
2413 			return 0;
2414 	}
2415 
2416 	return -1;
2417 }
2418 
2419 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2420 
2421 static struct cryptodev_driver_list cryptodev_driver_list =
2422 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2423 
2424 int
2425 rte_cryptodev_driver_id_get(const char *name)
2426 {
2427 	struct cryptodev_driver *driver;
2428 	const char *driver_name;
2429 
2430 	if (name == NULL) {
2431 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2432 		return -1;
2433 	}
2434 
2435 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2436 		driver_name = driver->driver->name;
2437 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2438 			return driver->id;
2439 	}
2440 	return -1;
2441 }
2442 
2443 const char *
2444 rte_cryptodev_name_get(uint8_t dev_id)
2445 {
2446 	struct rte_cryptodev *dev;
2447 
2448 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2449 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2450 		return NULL;
2451 	}
2452 
2453 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2454 	if (dev == NULL)
2455 		return NULL;
2456 
2457 	return dev->data->name;
2458 }
2459 
2460 const char *
2461 rte_cryptodev_driver_name_get(uint8_t driver_id)
2462 {
2463 	struct cryptodev_driver *driver;
2464 
2465 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2466 		if (driver->id == driver_id)
2467 			return driver->driver->name;
2468 	return NULL;
2469 }
2470 
2471 uint8_t
2472 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2473 		const struct rte_driver *drv)
2474 {
2475 	crypto_drv->driver = drv;
2476 	crypto_drv->id = nb_drivers;
2477 
2478 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2479 
2480 	return nb_drivers++;
2481 }
2482 
2483 RTE_INIT(cryptodev_init_fp_ops)
2484 {
2485 	uint32_t i;
2486 
2487 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2488 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2489 }
2490 
2491 static int
2492 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2493 		const char *params __rte_unused,
2494 		struct rte_tel_data *d)
2495 {
2496 	int dev_id;
2497 
2498 	if (rte_cryptodev_count() < 1)
2499 		return -EINVAL;
2500 
2501 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2502 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2503 		if (rte_cryptodev_is_valid_dev(dev_id))
2504 			rte_tel_data_add_array_int(d, dev_id);
2505 
2506 	return 0;
2507 }
2508 
2509 static int
2510 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2511 		const char *params, struct rte_tel_data *d)
2512 {
2513 	struct rte_cryptodev_info cryptodev_info;
2514 	int dev_id;
2515 	char *end_param;
2516 
2517 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2518 		return -EINVAL;
2519 
2520 	dev_id = strtoul(params, &end_param, 0);
2521 	if (*end_param != '\0')
2522 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2523 	if (!rte_cryptodev_is_valid_dev(dev_id))
2524 		return -EINVAL;
2525 
2526 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2527 
2528 	rte_tel_data_start_dict(d);
2529 	rte_tel_data_add_dict_string(d, "device_name",
2530 		cryptodev_info.device->name);
2531 	rte_tel_data_add_dict_int(d, "max_nb_queue_pairs",
2532 		cryptodev_info.max_nb_queue_pairs);
2533 
2534 	return 0;
2535 }
2536 
2537 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s)
2538 
2539 static int
2540 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2541 		const char *params,
2542 		struct rte_tel_data *d)
2543 {
2544 	struct rte_cryptodev_stats cryptodev_stats;
2545 	int dev_id, ret;
2546 	char *end_param;
2547 
2548 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2549 		return -EINVAL;
2550 
2551 	dev_id = strtoul(params, &end_param, 0);
2552 	if (*end_param != '\0')
2553 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2554 	if (!rte_cryptodev_is_valid_dev(dev_id))
2555 		return -EINVAL;
2556 
2557 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2558 	if (ret < 0)
2559 		return ret;
2560 
2561 	rte_tel_data_start_dict(d);
2562 	ADD_DICT_STAT(enqueued_count);
2563 	ADD_DICT_STAT(dequeued_count);
2564 	ADD_DICT_STAT(enqueue_err_count);
2565 	ADD_DICT_STAT(dequeue_err_count);
2566 
2567 	return 0;
2568 }
2569 
2570 #define CRYPTO_CAPS_SZ                                             \
2571 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2572 					sizeof(uint64_t)) /        \
2573 	 sizeof(uint64_t))
2574 
2575 static int
2576 crypto_caps_array(struct rte_tel_data *d,
2577 		  const struct rte_cryptodev_capabilities *capabilities)
2578 {
2579 	const struct rte_cryptodev_capabilities *dev_caps;
2580 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2581 	unsigned int i = 0, j;
2582 
2583 	rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
2584 
2585 	while ((dev_caps = &capabilities[i++])->op !=
2586 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2587 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2588 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2589 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2590 			rte_tel_data_add_array_u64(d, caps_val[j]);
2591 	}
2592 
2593 	return i;
2594 }
2595 
2596 static int
2597 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2598 			  struct rte_tel_data *d)
2599 {
2600 	struct rte_cryptodev_info dev_info;
2601 	struct rte_tel_data *crypto_caps;
2602 	int crypto_caps_n;
2603 	char *end_param;
2604 	int dev_id;
2605 
2606 	if (!params || strlen(params) == 0 || !isdigit(*params))
2607 		return -EINVAL;
2608 
2609 	dev_id = strtoul(params, &end_param, 0);
2610 	if (*end_param != '\0')
2611 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2612 	if (!rte_cryptodev_is_valid_dev(dev_id))
2613 		return -EINVAL;
2614 
2615 	rte_tel_data_start_dict(d);
2616 	crypto_caps = rte_tel_data_alloc();
2617 	if (!crypto_caps)
2618 		return -ENOMEM;
2619 
2620 	rte_cryptodev_info_get(dev_id, &dev_info);
2621 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2622 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2623 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2624 
2625 	return 0;
2626 }
2627 
2628 RTE_INIT(cryptodev_init_telemetry)
2629 {
2630 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2631 			"Returns information for a cryptodev. Parameters: int dev_id");
2632 	rte_telemetry_register_cmd("/cryptodev/list",
2633 			cryptodev_handle_dev_list,
2634 			"Returns list of available crypto devices by IDs. No parameters.");
2635 	rte_telemetry_register_cmd("/cryptodev/stats",
2636 			cryptodev_handle_dev_stats,
2637 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2638 	rte_telemetry_register_cmd("/cryptodev/caps",
2639 			cryptodev_handle_dev_caps,
2640 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2641 }
2642