1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16 
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 #include "rte_cryptodev_trace.h"
44 
45 static uint8_t nb_drivers;
46 
47 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48 
49 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50 
51 static struct rte_cryptodev_global cryptodev_globals = {
52 		.devs			= rte_crypto_devices,
53 		.data			= { NULL },
54 		.nb_devs		= 0
55 };
56 
57 /* spinlock for crypto device callbacks */
58 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
59 
60 /**
61  * The user application callback description.
62  *
63  * It contains callback address to be registered by user application,
64  * the pointer to the parameters for callback, and the event type.
65  */
66 struct rte_cryptodev_callback {
67 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
68 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
69 	void *cb_arg;				/**< Parameter for callback */
70 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
71 	uint32_t active;			/**< Callback is executing */
72 };
73 
74 /**
75  * The crypto cipher algorithm strings identifiers.
76  * It could be used in application command line.
77  */
78 const char *
79 rte_crypto_cipher_algorithm_strings[] = {
80 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
81 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
82 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
83 
84 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
85 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
86 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
87 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
88 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
89 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
90 
91 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
92 
93 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
94 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
95 
96 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
97 
98 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
99 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
100 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
101 };
102 
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
110 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
111 };
112 
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * It could be used in application command line.
116  */
117 const char *
118 rte_crypto_auth_algorithm_strings[] = {
119 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
120 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
121 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
122 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
123 
124 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
125 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
126 
127 	[RTE_CRYPTO_AUTH_NULL]		= "null",
128 
129 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
130 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
131 
132 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
133 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
134 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
135 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
136 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
137 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
138 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
139 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
140 
141 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
142 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
143 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
144 };
145 
146 /**
147  * The crypto AEAD algorithm strings identifiers.
148  * It could be used in application command line.
149  */
150 const char *
151 rte_crypto_aead_algorithm_strings[] = {
152 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
153 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
154 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
155 };
156 
157 /**
158  * The crypto AEAD operation strings identifiers.
159  * It could be used in application command line.
160  */
161 const char *
162 rte_crypto_aead_operation_strings[] = {
163 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
164 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
165 };
166 
167 /**
168  * Asymmetric crypto transform operation strings identifiers.
169  */
170 const char *rte_crypto_asym_xform_strings[] = {
171 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
172 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
173 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
174 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
175 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
176 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
177 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
178 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
179 };
180 
181 /**
182  * Asymmetric crypto operation strings identifiers.
183  */
184 const char *rte_crypto_asym_op_strings[] = {
185 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
186 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
187 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
188 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify",
189 	[RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]	= "priv_key_generate",
190 	[RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
191 	[RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
192 };
193 
194 /**
195  * The private data structure stored in the session mempool private data.
196  */
197 struct rte_cryptodev_sym_session_pool_private_data {
198 	uint16_t nb_drivers;
199 	/**< number of elements in sess_data array */
200 	uint16_t user_data_sz;
201 	/**< session user data will be placed after sess_data */
202 };
203 
204 int
rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm * algo_enum,const char * algo_string)205 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
206 		const char *algo_string)
207 {
208 	unsigned int i;
209 
210 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
211 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
212 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
213 			return 0;
214 		}
215 	}
216 
217 	/* Invalid string */
218 	return -1;
219 }
220 
221 int
rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm * algo_enum,const char * algo_string)222 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
223 		const char *algo_string)
224 {
225 	unsigned int i;
226 
227 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
228 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
229 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
230 			return 0;
231 		}
232 	}
233 
234 	/* Invalid string */
235 	return -1;
236 }
237 
238 int
rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm * algo_enum,const char * algo_string)239 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
240 		const char *algo_string)
241 {
242 	unsigned int i;
243 
244 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
245 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
246 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
247 			return 0;
248 		}
249 	}
250 
251 	/* Invalid string */
252 	return -1;
253 }
254 
255 int
rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type * xform_enum,const char * xform_string)256 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
257 		const char *xform_string)
258 {
259 	unsigned int i;
260 
261 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
262 		if (strcmp(xform_string,
263 			rte_crypto_asym_xform_strings[i]) == 0) {
264 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
265 			return 0;
266 		}
267 	}
268 
269 	/* Invalid string */
270 	return -1;
271 }
272 
273 /**
274  * The crypto auth operation strings identifiers.
275  * It could be used in application command line.
276  */
277 const char *
278 rte_crypto_auth_operation_strings[] = {
279 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
280 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
281 };
282 
283 const struct rte_cryptodev_symmetric_capability *
rte_cryptodev_sym_capability_get(uint8_t dev_id,const struct rte_cryptodev_sym_capability_idx * idx)284 rte_cryptodev_sym_capability_get(uint8_t dev_id,
285 		const struct rte_cryptodev_sym_capability_idx *idx)
286 {
287 	const struct rte_cryptodev_capabilities *capability;
288 	struct rte_cryptodev_info dev_info;
289 	int i = 0;
290 
291 	rte_cryptodev_info_get(dev_id, &dev_info);
292 
293 	while ((capability = &dev_info.capabilities[i++])->op !=
294 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
295 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
296 			continue;
297 
298 		if (capability->sym.xform_type != idx->type)
299 			continue;
300 
301 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
302 			capability->sym.auth.algo == idx->algo.auth)
303 			return &capability->sym;
304 
305 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
306 			capability->sym.cipher.algo == idx->algo.cipher)
307 			return &capability->sym;
308 
309 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
310 				capability->sym.aead.algo == idx->algo.aead)
311 			return &capability->sym;
312 	}
313 
314 	return NULL;
315 }
316 
317 static int
param_range_check(uint16_t size,const struct rte_crypto_param_range * range)318 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
319 {
320 	unsigned int next_size;
321 
322 	/* Check lower/upper bounds */
323 	if (size < range->min)
324 		return -1;
325 
326 	if (size > range->max)
327 		return -1;
328 
329 	/* If range is actually only one value, size is correct */
330 	if (range->increment == 0)
331 		return 0;
332 
333 	/* Check if value is one of the supported sizes */
334 	for (next_size = range->min; next_size <= range->max;
335 			next_size += range->increment)
336 		if (size == next_size)
337 			return 0;
338 
339 	return -1;
340 }
341 
342 const struct rte_cryptodev_asymmetric_xform_capability *
rte_cryptodev_asym_capability_get(uint8_t dev_id,const struct rte_cryptodev_asym_capability_idx * idx)343 rte_cryptodev_asym_capability_get(uint8_t dev_id,
344 		const struct rte_cryptodev_asym_capability_idx *idx)
345 {
346 	const struct rte_cryptodev_capabilities *capability;
347 	struct rte_cryptodev_info dev_info;
348 	unsigned int i = 0;
349 
350 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
351 	rte_cryptodev_info_get(dev_id, &dev_info);
352 
353 	while ((capability = &dev_info.capabilities[i++])->op !=
354 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
355 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
356 			continue;
357 
358 		if (capability->asym.xform_capa.xform_type == idx->type)
359 			return &capability->asym.xform_capa;
360 	}
361 	return NULL;
362 };
363 
364 int
rte_cryptodev_sym_capability_check_cipher(const struct rte_cryptodev_symmetric_capability * capability,uint16_t key_size,uint16_t iv_size)365 rte_cryptodev_sym_capability_check_cipher(
366 		const struct rte_cryptodev_symmetric_capability *capability,
367 		uint16_t key_size, uint16_t iv_size)
368 {
369 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
370 		return -1;
371 
372 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
373 		return -1;
374 
375 	return 0;
376 }
377 
378 int
rte_cryptodev_sym_capability_check_auth(const struct rte_cryptodev_symmetric_capability * capability,uint16_t key_size,uint16_t digest_size,uint16_t iv_size)379 rte_cryptodev_sym_capability_check_auth(
380 		const struct rte_cryptodev_symmetric_capability *capability,
381 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
382 {
383 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
384 		return -1;
385 
386 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
387 		return -1;
388 
389 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
390 		return -1;
391 
392 	return 0;
393 }
394 
395 int
rte_cryptodev_sym_capability_check_aead(const struct rte_cryptodev_symmetric_capability * capability,uint16_t key_size,uint16_t digest_size,uint16_t aad_size,uint16_t iv_size)396 rte_cryptodev_sym_capability_check_aead(
397 		const struct rte_cryptodev_symmetric_capability *capability,
398 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
399 		uint16_t iv_size)
400 {
401 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
402 		return -1;
403 
404 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
405 		return -1;
406 
407 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
408 		return -1;
409 
410 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
411 		return -1;
412 
413 	return 0;
414 }
415 int
rte_cryptodev_asym_xform_capability_check_optype(const struct rte_cryptodev_asymmetric_xform_capability * capability,enum rte_crypto_asym_op_type op_type)416 rte_cryptodev_asym_xform_capability_check_optype(
417 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
418 	enum rte_crypto_asym_op_type op_type)
419 {
420 	if (capability->op_types & (1 << op_type))
421 		return 1;
422 
423 	return 0;
424 }
425 
426 int
rte_cryptodev_asym_xform_capability_check_modlen(const struct rte_cryptodev_asymmetric_xform_capability * capability,uint16_t modlen)427 rte_cryptodev_asym_xform_capability_check_modlen(
428 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
429 	uint16_t modlen)
430 {
431 	/* no need to check for limits, if min or max = 0 */
432 	if (capability->modlen.min != 0) {
433 		if (modlen < capability->modlen.min)
434 			return -1;
435 	}
436 
437 	if (capability->modlen.max != 0) {
438 		if (modlen > capability->modlen.max)
439 			return -1;
440 	}
441 
442 	/* in any case, check if given modlen is module increment */
443 	if (capability->modlen.increment != 0) {
444 		if (modlen % (capability->modlen.increment))
445 			return -1;
446 	}
447 
448 	return 0;
449 }
450 
451 
452 const char *
rte_cryptodev_get_feature_name(uint64_t flag)453 rte_cryptodev_get_feature_name(uint64_t flag)
454 {
455 	switch (flag) {
456 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
457 		return "SYMMETRIC_CRYPTO";
458 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
459 		return "ASYMMETRIC_CRYPTO";
460 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
461 		return "SYM_OPERATION_CHAINING";
462 	case RTE_CRYPTODEV_FF_CPU_SSE:
463 		return "CPU_SSE";
464 	case RTE_CRYPTODEV_FF_CPU_AVX:
465 		return "CPU_AVX";
466 	case RTE_CRYPTODEV_FF_CPU_AVX2:
467 		return "CPU_AVX2";
468 	case RTE_CRYPTODEV_FF_CPU_AVX512:
469 		return "CPU_AVX512";
470 	case RTE_CRYPTODEV_FF_CPU_AESNI:
471 		return "CPU_AESNI";
472 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
473 		return "HW_ACCELERATED";
474 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
475 		return "IN_PLACE_SGL";
476 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
477 		return "OOP_SGL_IN_SGL_OUT";
478 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
479 		return "OOP_SGL_IN_LB_OUT";
480 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
481 		return "OOP_LB_IN_SGL_OUT";
482 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
483 		return "OOP_LB_IN_LB_OUT";
484 	case RTE_CRYPTODEV_FF_CPU_NEON:
485 		return "CPU_NEON";
486 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
487 		return "CPU_ARM_CE";
488 	case RTE_CRYPTODEV_FF_SECURITY:
489 		return "SECURITY_PROTOCOL";
490 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
491 		return "RSA_PRIV_OP_KEY_EXP";
492 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
493 		return "RSA_PRIV_OP_KEY_QT";
494 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
495 		return "DIGEST_ENCRYPTED";
496 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
497 		return "SYM_CPU_CRYPTO";
498 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
499 		return "ASYM_SESSIONLESS";
500 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
501 		return "SYM_SESSIONLESS";
502 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
503 		return "NON_BYTE_ALIGNED_DATA";
504 	default:
505 		return NULL;
506 	}
507 }
508 
509 struct rte_cryptodev *
rte_cryptodev_pmd_get_dev(uint8_t dev_id)510 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
511 {
512 	return &cryptodev_globals.devs[dev_id];
513 }
514 
515 struct rte_cryptodev *
rte_cryptodev_pmd_get_named_dev(const char * name)516 rte_cryptodev_pmd_get_named_dev(const char *name)
517 {
518 	struct rte_cryptodev *dev;
519 	unsigned int i;
520 
521 	if (name == NULL)
522 		return NULL;
523 
524 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
525 		dev = &cryptodev_globals.devs[i];
526 
527 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
528 				(strcmp(dev->data->name, name) == 0))
529 			return dev;
530 	}
531 
532 	return NULL;
533 }
534 
535 static inline uint8_t
rte_cryptodev_is_valid_device_data(uint8_t dev_id)536 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
537 {
538 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
539 			rte_crypto_devices[dev_id].data == NULL)
540 		return 0;
541 
542 	return 1;
543 }
544 
545 unsigned int
rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)546 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
547 {
548 	struct rte_cryptodev *dev = NULL;
549 
550 	if (!rte_cryptodev_is_valid_device_data(dev_id))
551 		return 0;
552 
553 	dev = rte_cryptodev_pmd_get_dev(dev_id);
554 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
555 		return 0;
556 	else
557 		return 1;
558 }
559 
560 
561 int
rte_cryptodev_get_dev_id(const char * name)562 rte_cryptodev_get_dev_id(const char *name)
563 {
564 	unsigned i;
565 
566 	if (name == NULL)
567 		return -1;
568 
569 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
570 		if (!rte_cryptodev_is_valid_device_data(i))
571 			continue;
572 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
573 				== 0) &&
574 				(cryptodev_globals.devs[i].attached ==
575 						RTE_CRYPTODEV_ATTACHED))
576 			return i;
577 	}
578 
579 	return -1;
580 }
581 
582 uint8_t
rte_cryptodev_count(void)583 rte_cryptodev_count(void)
584 {
585 	return cryptodev_globals.nb_devs;
586 }
587 
588 uint8_t
rte_cryptodev_device_count_by_driver(uint8_t driver_id)589 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
590 {
591 	uint8_t i, dev_count = 0;
592 
593 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
594 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
595 			cryptodev_globals.devs[i].attached ==
596 					RTE_CRYPTODEV_ATTACHED)
597 			dev_count++;
598 
599 	return dev_count;
600 }
601 
602 uint8_t
rte_cryptodev_devices_get(const char * driver_name,uint8_t * devices,uint8_t nb_devices)603 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
604 	uint8_t nb_devices)
605 {
606 	uint8_t i, count = 0;
607 	struct rte_cryptodev *devs = cryptodev_globals.devs;
608 
609 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
610 		if (!rte_cryptodev_is_valid_device_data(i))
611 			continue;
612 
613 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
614 			int cmp;
615 
616 			cmp = strncmp(devs[i].device->driver->name,
617 					driver_name,
618 					strlen(driver_name) + 1);
619 
620 			if (cmp == 0)
621 				devices[count++] = devs[i].data->dev_id;
622 		}
623 	}
624 
625 	return count;
626 }
627 
628 void *
rte_cryptodev_get_sec_ctx(uint8_t dev_id)629 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
630 {
631 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
632 			(rte_crypto_devices[dev_id].feature_flags &
633 			RTE_CRYPTODEV_FF_SECURITY))
634 		return rte_crypto_devices[dev_id].security_ctx;
635 
636 	return NULL;
637 }
638 
639 int
rte_cryptodev_socket_id(uint8_t dev_id)640 rte_cryptodev_socket_id(uint8_t dev_id)
641 {
642 	struct rte_cryptodev *dev;
643 
644 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
645 		return -1;
646 
647 	dev = rte_cryptodev_pmd_get_dev(dev_id);
648 
649 	return dev->data->socket_id;
650 }
651 
652 static inline int
rte_cryptodev_data_alloc(uint8_t dev_id,struct rte_cryptodev_data ** data,int socket_id)653 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
654 		int socket_id)
655 {
656 	char mz_name[RTE_MEMZONE_NAMESIZE];
657 	const struct rte_memzone *mz;
658 	int n;
659 
660 	/* generate memzone name */
661 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
662 	if (n >= (int)sizeof(mz_name))
663 		return -EINVAL;
664 
665 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
666 		mz = rte_memzone_reserve(mz_name,
667 				sizeof(struct rte_cryptodev_data),
668 				socket_id, 0);
669 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
670 				mz_name, mz);
671 	} else {
672 		mz = rte_memzone_lookup(mz_name);
673 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
674 				mz_name, mz);
675 	}
676 
677 	if (mz == NULL)
678 		return -ENOMEM;
679 
680 	*data = mz->addr;
681 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
682 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
683 
684 	return 0;
685 }
686 
687 static inline int
rte_cryptodev_data_free(uint8_t dev_id,struct rte_cryptodev_data ** data)688 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
689 {
690 	char mz_name[RTE_MEMZONE_NAMESIZE];
691 	const struct rte_memzone *mz;
692 	int n;
693 
694 	/* generate memzone name */
695 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
696 	if (n >= (int)sizeof(mz_name))
697 		return -EINVAL;
698 
699 	mz = rte_memzone_lookup(mz_name);
700 	if (mz == NULL)
701 		return -ENOMEM;
702 
703 	RTE_ASSERT(*data == mz->addr);
704 	*data = NULL;
705 
706 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
707 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
708 				mz_name, mz);
709 		return rte_memzone_free(mz);
710 	} else {
711 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
712 				mz_name, mz);
713 	}
714 
715 	return 0;
716 }
717 
718 static uint8_t
rte_cryptodev_find_free_device_index(void)719 rte_cryptodev_find_free_device_index(void)
720 {
721 	uint8_t dev_id;
722 
723 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
724 		if (rte_crypto_devices[dev_id].attached ==
725 				RTE_CRYPTODEV_DETACHED)
726 			return dev_id;
727 	}
728 	return RTE_CRYPTO_MAX_DEVS;
729 }
730 
731 struct rte_cryptodev *
rte_cryptodev_pmd_allocate(const char * name,int socket_id)732 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
733 {
734 	struct rte_cryptodev *cryptodev;
735 	uint8_t dev_id;
736 
737 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
738 		CDEV_LOG_ERR("Crypto device with name %s already "
739 				"allocated!", name);
740 		return NULL;
741 	}
742 
743 	dev_id = rte_cryptodev_find_free_device_index();
744 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
745 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
746 		return NULL;
747 	}
748 
749 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
750 
751 	if (cryptodev->data == NULL) {
752 		struct rte_cryptodev_data **cryptodev_data =
753 				&cryptodev_globals.data[dev_id];
754 
755 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
756 				socket_id);
757 
758 		if (retval < 0 || *cryptodev_data == NULL)
759 			return NULL;
760 
761 		cryptodev->data = *cryptodev_data;
762 
763 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
764 			strlcpy(cryptodev->data->name, name,
765 				RTE_CRYPTODEV_NAME_MAX_LEN);
766 
767 			cryptodev->data->dev_id = dev_id;
768 			cryptodev->data->socket_id = socket_id;
769 			cryptodev->data->dev_started = 0;
770 			CDEV_LOG_DEBUG("PRIMARY:init data");
771 		}
772 
773 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
774 				cryptodev->data->name,
775 				cryptodev->data->dev_id,
776 				cryptodev->data->socket_id,
777 				cryptodev->data->dev_started);
778 
779 		/* init user callbacks */
780 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
781 
782 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
783 
784 		cryptodev_globals.nb_devs++;
785 	}
786 
787 	return cryptodev;
788 }
789 
790 int
rte_cryptodev_pmd_release_device(struct rte_cryptodev * cryptodev)791 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
792 {
793 	int ret;
794 	uint8_t dev_id;
795 
796 	if (cryptodev == NULL)
797 		return -EINVAL;
798 
799 	dev_id = cryptodev->data->dev_id;
800 
801 	/* Close device only if device operations have been set */
802 	if (cryptodev->dev_ops) {
803 		ret = rte_cryptodev_close(dev_id);
804 		if (ret < 0)
805 			return ret;
806 	}
807 
808 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
809 	if (ret < 0)
810 		return ret;
811 
812 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
813 	cryptodev_globals.nb_devs--;
814 	return 0;
815 }
816 
817 uint16_t
rte_cryptodev_queue_pair_count(uint8_t dev_id)818 rte_cryptodev_queue_pair_count(uint8_t dev_id)
819 {
820 	struct rte_cryptodev *dev;
821 
822 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
823 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
824 		return 0;
825 	}
826 
827 	dev = &rte_crypto_devices[dev_id];
828 	return dev->data->nb_queue_pairs;
829 }
830 
831 static int
rte_cryptodev_queue_pairs_config(struct rte_cryptodev * dev,uint16_t nb_qpairs,int socket_id)832 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
833 		int socket_id)
834 {
835 	struct rte_cryptodev_info dev_info;
836 	void **qp;
837 	unsigned i;
838 
839 	if ((dev == NULL) || (nb_qpairs < 1)) {
840 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
841 							dev, nb_qpairs);
842 		return -EINVAL;
843 	}
844 
845 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
846 			nb_qpairs, dev->data->dev_id);
847 
848 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
849 
850 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
851 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
852 
853 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
854 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
855 				nb_qpairs, dev->data->dev_id);
856 	    return -EINVAL;
857 	}
858 
859 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
860 		dev->data->queue_pairs = rte_zmalloc_socket(
861 				"cryptodev->queue_pairs",
862 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
863 				RTE_CACHE_LINE_SIZE, socket_id);
864 
865 		if (dev->data->queue_pairs == NULL) {
866 			dev->data->nb_queue_pairs = 0;
867 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
868 							"nb_queues %u",
869 							nb_qpairs);
870 			return -(ENOMEM);
871 		}
872 	} else { /* re-configure */
873 		int ret;
874 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
875 
876 		qp = dev->data->queue_pairs;
877 
878 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
879 				-ENOTSUP);
880 
881 		for (i = nb_qpairs; i < old_nb_queues; i++) {
882 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
883 			if (ret < 0)
884 				return ret;
885 		}
886 
887 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
888 				RTE_CACHE_LINE_SIZE);
889 		if (qp == NULL) {
890 			CDEV_LOG_ERR("failed to realloc qp meta data,"
891 						" nb_queues %u", nb_qpairs);
892 			return -(ENOMEM);
893 		}
894 
895 		if (nb_qpairs > old_nb_queues) {
896 			uint16_t new_qs = nb_qpairs - old_nb_queues;
897 
898 			memset(qp + old_nb_queues, 0,
899 				sizeof(qp[0]) * new_qs);
900 		}
901 
902 		dev->data->queue_pairs = qp;
903 
904 	}
905 	dev->data->nb_queue_pairs = nb_qpairs;
906 	return 0;
907 }
908 
909 int
rte_cryptodev_configure(uint8_t dev_id,struct rte_cryptodev_config * config)910 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
911 {
912 	struct rte_cryptodev *dev;
913 	int diag;
914 
915 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
916 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
917 		return -EINVAL;
918 	}
919 
920 	dev = &rte_crypto_devices[dev_id];
921 
922 	if (dev->data->dev_started) {
923 		CDEV_LOG_ERR(
924 		    "device %d must be stopped to allow configuration", dev_id);
925 		return -EBUSY;
926 	}
927 
928 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
929 
930 	/* Setup new number of queue pairs and reconfigure device. */
931 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
932 			config->socket_id);
933 	if (diag != 0) {
934 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
935 				dev_id, diag);
936 		return diag;
937 	}
938 
939 	rte_cryptodev_trace_configure(dev_id, config);
940 	return (*dev->dev_ops->dev_configure)(dev, config);
941 }
942 
943 
944 int
rte_cryptodev_start(uint8_t dev_id)945 rte_cryptodev_start(uint8_t dev_id)
946 {
947 	struct rte_cryptodev *dev;
948 	int diag;
949 
950 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
951 
952 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
953 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
954 		return -EINVAL;
955 	}
956 
957 	dev = &rte_crypto_devices[dev_id];
958 
959 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
960 
961 	if (dev->data->dev_started != 0) {
962 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
963 			dev_id);
964 		return 0;
965 	}
966 
967 	diag = (*dev->dev_ops->dev_start)(dev);
968 	rte_cryptodev_trace_start(dev_id, diag);
969 	if (diag == 0)
970 		dev->data->dev_started = 1;
971 	else
972 		return diag;
973 
974 	return 0;
975 }
976 
977 void
rte_cryptodev_stop(uint8_t dev_id)978 rte_cryptodev_stop(uint8_t dev_id)
979 {
980 	struct rte_cryptodev *dev;
981 
982 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
983 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
984 		return;
985 	}
986 
987 	dev = &rte_crypto_devices[dev_id];
988 
989 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
990 
991 	if (dev->data->dev_started == 0) {
992 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
993 			dev_id);
994 		return;
995 	}
996 
997 	(*dev->dev_ops->dev_stop)(dev);
998 	rte_cryptodev_trace_stop(dev_id);
999 	dev->data->dev_started = 0;
1000 }
1001 
1002 int
rte_cryptodev_close(uint8_t dev_id)1003 rte_cryptodev_close(uint8_t dev_id)
1004 {
1005 	struct rte_cryptodev *dev;
1006 	int retval;
1007 
1008 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1009 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1010 		return -1;
1011 	}
1012 
1013 	dev = &rte_crypto_devices[dev_id];
1014 
1015 	/* Device must be stopped before it can be closed */
1016 	if (dev->data->dev_started == 1) {
1017 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1018 				dev_id);
1019 		return -EBUSY;
1020 	}
1021 
1022 	/* We can't close the device if there are outstanding sessions in use */
1023 	if (dev->data->session_pool != NULL) {
1024 		if (!rte_mempool_full(dev->data->session_pool)) {
1025 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1026 					"has sessions still in use, free "
1027 					"all sessions before calling close",
1028 					(unsigned)dev_id);
1029 			return -EBUSY;
1030 		}
1031 	}
1032 
1033 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1034 	retval = (*dev->dev_ops->dev_close)(dev);
1035 	rte_cryptodev_trace_close(dev_id, retval);
1036 
1037 	if (retval < 0)
1038 		return retval;
1039 
1040 	return 0;
1041 }
1042 
1043 int
rte_cryptodev_get_qp_status(uint8_t dev_id,uint16_t queue_pair_id)1044 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1045 {
1046 	struct rte_cryptodev *dev;
1047 
1048 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1049 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1050 		return -EINVAL;
1051 	}
1052 
1053 	dev = &rte_crypto_devices[dev_id];
1054 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1055 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1056 		return -EINVAL;
1057 	}
1058 	void **qps = dev->data->queue_pairs;
1059 
1060 	if (qps[queue_pair_id])	{
1061 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1062 			queue_pair_id, dev_id);
1063 		return 1;
1064 	}
1065 
1066 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1067 		queue_pair_id, dev_id);
1068 
1069 	return 0;
1070 }
1071 
1072 int
rte_cryptodev_queue_pair_setup(uint8_t dev_id,uint16_t queue_pair_id,const struct rte_cryptodev_qp_conf * qp_conf,int socket_id)1073 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1074 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1075 
1076 {
1077 	struct rte_cryptodev *dev;
1078 
1079 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1080 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1081 		return -EINVAL;
1082 	}
1083 
1084 	dev = &rte_crypto_devices[dev_id];
1085 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1086 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1087 		return -EINVAL;
1088 	}
1089 
1090 	if (!qp_conf) {
1091 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1092 		return -EINVAL;
1093 	}
1094 
1095 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1096 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1097 		CDEV_LOG_ERR("Invalid mempools\n");
1098 		return -EINVAL;
1099 	}
1100 
1101 	if (qp_conf->mp_session) {
1102 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1103 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1104 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1105 		struct rte_cryptodev_sym_session s = {0};
1106 
1107 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1108 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1109 				sizeof(*pool_priv)) {
1110 			CDEV_LOG_ERR("Invalid mempool\n");
1111 			return -EINVAL;
1112 		}
1113 
1114 		s.nb_drivers = pool_priv->nb_drivers;
1115 		s.user_data_sz = pool_priv->user_data_sz;
1116 
1117 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1118 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1119 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1120 				obj_priv_size) {
1121 			CDEV_LOG_ERR("Invalid mempool\n");
1122 			return -EINVAL;
1123 		}
1124 	}
1125 
1126 	if (dev->data->dev_started) {
1127 		CDEV_LOG_ERR(
1128 		    "device %d must be stopped to allow configuration", dev_id);
1129 		return -EBUSY;
1130 	}
1131 
1132 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1133 
1134 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1135 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1136 			socket_id);
1137 }
1138 
1139 
1140 int
rte_cryptodev_stats_get(uint8_t dev_id,struct rte_cryptodev_stats * stats)1141 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1142 {
1143 	struct rte_cryptodev *dev;
1144 
1145 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1146 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1147 		return -ENODEV;
1148 	}
1149 
1150 	if (stats == NULL) {
1151 		CDEV_LOG_ERR("Invalid stats ptr");
1152 		return -EINVAL;
1153 	}
1154 
1155 	dev = &rte_crypto_devices[dev_id];
1156 	memset(stats, 0, sizeof(*stats));
1157 
1158 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1159 	(*dev->dev_ops->stats_get)(dev, stats);
1160 	return 0;
1161 }
1162 
1163 void
rte_cryptodev_stats_reset(uint8_t dev_id)1164 rte_cryptodev_stats_reset(uint8_t dev_id)
1165 {
1166 	struct rte_cryptodev *dev;
1167 
1168 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1169 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1170 		return;
1171 	}
1172 
1173 	dev = &rte_crypto_devices[dev_id];
1174 
1175 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1176 	(*dev->dev_ops->stats_reset)(dev);
1177 }
1178 
1179 void
rte_cryptodev_info_get(uint8_t dev_id,struct rte_cryptodev_info * dev_info)1180 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1181 {
1182 	struct rte_cryptodev *dev;
1183 
1184 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1185 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1186 		return;
1187 	}
1188 
1189 	dev = &rte_crypto_devices[dev_id];
1190 
1191 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1192 
1193 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1194 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1195 
1196 	dev_info->driver_name = dev->device->driver->name;
1197 	dev_info->device = dev->device;
1198 }
1199 
1200 int
rte_cryptodev_callback_register(uint8_t dev_id,enum rte_cryptodev_event_type event,rte_cryptodev_cb_fn cb_fn,void * cb_arg)1201 rte_cryptodev_callback_register(uint8_t dev_id,
1202 			enum rte_cryptodev_event_type event,
1203 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1204 {
1205 	struct rte_cryptodev *dev;
1206 	struct rte_cryptodev_callback *user_cb;
1207 
1208 	if (!cb_fn)
1209 		return -EINVAL;
1210 
1211 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1212 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1213 		return -EINVAL;
1214 	}
1215 
1216 	dev = &rte_crypto_devices[dev_id];
1217 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1218 
1219 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1220 		if (user_cb->cb_fn == cb_fn &&
1221 			user_cb->cb_arg == cb_arg &&
1222 			user_cb->event == event) {
1223 			break;
1224 		}
1225 	}
1226 
1227 	/* create a new callback. */
1228 	if (user_cb == NULL) {
1229 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1230 				sizeof(struct rte_cryptodev_callback), 0);
1231 		if (user_cb != NULL) {
1232 			user_cb->cb_fn = cb_fn;
1233 			user_cb->cb_arg = cb_arg;
1234 			user_cb->event = event;
1235 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1236 		}
1237 	}
1238 
1239 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1240 	return (user_cb == NULL) ? -ENOMEM : 0;
1241 }
1242 
1243 int
rte_cryptodev_callback_unregister(uint8_t dev_id,enum rte_cryptodev_event_type event,rte_cryptodev_cb_fn cb_fn,void * cb_arg)1244 rte_cryptodev_callback_unregister(uint8_t dev_id,
1245 			enum rte_cryptodev_event_type event,
1246 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1247 {
1248 	int ret;
1249 	struct rte_cryptodev *dev;
1250 	struct rte_cryptodev_callback *cb, *next;
1251 
1252 	if (!cb_fn)
1253 		return -EINVAL;
1254 
1255 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1256 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1257 		return -EINVAL;
1258 	}
1259 
1260 	dev = &rte_crypto_devices[dev_id];
1261 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1262 
1263 	ret = 0;
1264 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1265 
1266 		next = TAILQ_NEXT(cb, next);
1267 
1268 		if (cb->cb_fn != cb_fn || cb->event != event ||
1269 				(cb->cb_arg != (void *)-1 &&
1270 				cb->cb_arg != cb_arg))
1271 			continue;
1272 
1273 		/*
1274 		 * if this callback is not executing right now,
1275 		 * then remove it.
1276 		 */
1277 		if (cb->active == 0) {
1278 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1279 			rte_free(cb);
1280 		} else {
1281 			ret = -EAGAIN;
1282 		}
1283 	}
1284 
1285 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1286 	return ret;
1287 }
1288 
1289 void
rte_cryptodev_pmd_callback_process(struct rte_cryptodev * dev,enum rte_cryptodev_event_type event)1290 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1291 	enum rte_cryptodev_event_type event)
1292 {
1293 	struct rte_cryptodev_callback *cb_lst;
1294 	struct rte_cryptodev_callback dev_cb;
1295 
1296 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1297 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1298 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1299 			continue;
1300 		dev_cb = *cb_lst;
1301 		cb_lst->active = 1;
1302 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1303 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1304 						dev_cb.cb_arg);
1305 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1306 		cb_lst->active = 0;
1307 	}
1308 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1309 }
1310 
1311 int
rte_cryptodev_sym_session_init(uint8_t dev_id,struct rte_cryptodev_sym_session * sess,struct rte_crypto_sym_xform * xforms,struct rte_mempool * mp)1312 rte_cryptodev_sym_session_init(uint8_t dev_id,
1313 		struct rte_cryptodev_sym_session *sess,
1314 		struct rte_crypto_sym_xform *xforms,
1315 		struct rte_mempool *mp)
1316 {
1317 	struct rte_cryptodev *dev;
1318 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1319 			dev_id);
1320 	uint8_t index;
1321 	int ret;
1322 
1323 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1324 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1325 		return -EINVAL;
1326 	}
1327 
1328 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1329 
1330 	if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1331 		return -EINVAL;
1332 
1333 	if (mp->elt_size < sess_priv_sz)
1334 		return -EINVAL;
1335 
1336 	index = dev->driver_id;
1337 	if (index >= sess->nb_drivers)
1338 		return -EINVAL;
1339 
1340 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1341 
1342 	if (sess->sess_data[index].refcnt == 0) {
1343 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1344 							sess, mp);
1345 		if (ret < 0) {
1346 			CDEV_LOG_ERR(
1347 				"dev_id %d failed to configure session details",
1348 				dev_id);
1349 			return ret;
1350 		}
1351 	}
1352 
1353 	rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1354 	sess->sess_data[index].refcnt++;
1355 	return 0;
1356 }
1357 
1358 int
rte_cryptodev_asym_session_init(uint8_t dev_id,struct rte_cryptodev_asym_session * sess,struct rte_crypto_asym_xform * xforms,struct rte_mempool * mp)1359 rte_cryptodev_asym_session_init(uint8_t dev_id,
1360 		struct rte_cryptodev_asym_session *sess,
1361 		struct rte_crypto_asym_xform *xforms,
1362 		struct rte_mempool *mp)
1363 {
1364 	struct rte_cryptodev *dev;
1365 	uint8_t index;
1366 	int ret;
1367 
1368 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1369 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1370 		return -EINVAL;
1371 	}
1372 
1373 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1374 
1375 	if (sess == NULL || xforms == NULL || dev == NULL)
1376 		return -EINVAL;
1377 
1378 	index = dev->driver_id;
1379 
1380 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1381 				-ENOTSUP);
1382 
1383 	if (sess->sess_private_data[index] == NULL) {
1384 		ret = dev->dev_ops->asym_session_configure(dev,
1385 							xforms,
1386 							sess, mp);
1387 		if (ret < 0) {
1388 			CDEV_LOG_ERR(
1389 				"dev_id %d failed to configure session details",
1390 				dev_id);
1391 			return ret;
1392 		}
1393 	}
1394 
1395 	rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1396 	return 0;
1397 }
1398 
1399 struct rte_mempool *
rte_cryptodev_sym_session_pool_create(const char * name,uint32_t nb_elts,uint32_t elt_size,uint32_t cache_size,uint16_t user_data_size,int socket_id)1400 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1401 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1402 	int socket_id)
1403 {
1404 	struct rte_mempool *mp;
1405 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1406 	uint32_t obj_sz;
1407 
1408 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1409 	if (obj_sz > elt_size)
1410 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1411 				obj_sz);
1412 	else
1413 		obj_sz = elt_size;
1414 
1415 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1416 			(uint32_t)(sizeof(*pool_priv)),
1417 			NULL, NULL, NULL, NULL,
1418 			socket_id, 0);
1419 	if (mp == NULL) {
1420 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1421 			__func__, name, rte_errno);
1422 		return NULL;
1423 	}
1424 
1425 	pool_priv = rte_mempool_get_priv(mp);
1426 	if (!pool_priv) {
1427 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1428 			__func__, name);
1429 		rte_mempool_free(mp);
1430 		return NULL;
1431 	}
1432 
1433 	pool_priv->nb_drivers = nb_drivers;
1434 	pool_priv->user_data_sz = user_data_size;
1435 
1436 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1437 		elt_size, cache_size, user_data_size, mp);
1438 	return mp;
1439 }
1440 
1441 static unsigned int
rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session * sess)1442 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1443 {
1444 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1445 			sess->user_data_sz;
1446 }
1447 
1448 static uint8_t
rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool * mp)1449 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1450 {
1451 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1452 
1453 	if (!mp)
1454 		return 0;
1455 
1456 	pool_priv = rte_mempool_get_priv(mp);
1457 
1458 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1459 			pool_priv->nb_drivers != nb_drivers ||
1460 			mp->elt_size <
1461 				rte_cryptodev_sym_get_header_session_size()
1462 				+ pool_priv->user_data_sz)
1463 		return 0;
1464 
1465 	return 1;
1466 }
1467 
1468 struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_create(struct rte_mempool * mp)1469 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1470 {
1471 	struct rte_cryptodev_sym_session *sess;
1472 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1473 
1474 	if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1475 		CDEV_LOG_ERR("Invalid mempool\n");
1476 		return NULL;
1477 	}
1478 
1479 	pool_priv = rte_mempool_get_priv(mp);
1480 
1481 	/* Allocate a session structure from the session pool */
1482 	if (rte_mempool_get(mp, (void **)&sess)) {
1483 		CDEV_LOG_ERR("couldn't get object from session mempool");
1484 		return NULL;
1485 	}
1486 
1487 	sess->nb_drivers = pool_priv->nb_drivers;
1488 	sess->user_data_sz = pool_priv->user_data_sz;
1489 	sess->opaque_data = 0;
1490 
1491 	/* Clear device session pointer.
1492 	 * Include the flag indicating presence of user data
1493 	 */
1494 	memset(sess->sess_data, 0,
1495 			rte_cryptodev_sym_session_data_size(sess));
1496 
1497 	rte_cryptodev_trace_sym_session_create(mp, sess);
1498 	return sess;
1499 }
1500 
1501 struct rte_cryptodev_asym_session *
rte_cryptodev_asym_session_create(struct rte_mempool * mp)1502 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1503 {
1504 	struct rte_cryptodev_asym_session *sess;
1505 	unsigned int session_size =
1506 			rte_cryptodev_asym_get_header_session_size();
1507 
1508 	if (!mp) {
1509 		CDEV_LOG_ERR("invalid mempool\n");
1510 		return NULL;
1511 	}
1512 
1513 	/* Verify if provided mempool can hold elements big enough. */
1514 	if (mp->elt_size < session_size) {
1515 		CDEV_LOG_ERR(
1516 			"mempool elements too small to hold session objects");
1517 		return NULL;
1518 	}
1519 
1520 	/* Allocate a session structure from the session pool */
1521 	if (rte_mempool_get(mp, (void **)&sess)) {
1522 		CDEV_LOG_ERR("couldn't get object from session mempool");
1523 		return NULL;
1524 	}
1525 
1526 	/* Clear device session pointer.
1527 	 * Include the flag indicating presence of private data
1528 	 */
1529 	memset(sess, 0, session_size);
1530 
1531 	rte_cryptodev_trace_asym_session_create(mp, sess);
1532 	return sess;
1533 }
1534 
1535 int
rte_cryptodev_sym_session_clear(uint8_t dev_id,struct rte_cryptodev_sym_session * sess)1536 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1537 		struct rte_cryptodev_sym_session *sess)
1538 {
1539 	struct rte_cryptodev *dev;
1540 	uint8_t driver_id;
1541 
1542 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1543 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1544 		return -EINVAL;
1545 	}
1546 
1547 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1548 
1549 	if (dev == NULL || sess == NULL)
1550 		return -EINVAL;
1551 
1552 	driver_id = dev->driver_id;
1553 	if (sess->sess_data[driver_id].refcnt == 0)
1554 		return 0;
1555 	if (--sess->sess_data[driver_id].refcnt != 0)
1556 		return -EBUSY;
1557 
1558 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1559 
1560 	dev->dev_ops->sym_session_clear(dev, sess);
1561 
1562 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1563 	return 0;
1564 }
1565 
1566 int
rte_cryptodev_asym_session_clear(uint8_t dev_id,struct rte_cryptodev_asym_session * sess)1567 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1568 		struct rte_cryptodev_asym_session *sess)
1569 {
1570 	struct rte_cryptodev *dev;
1571 
1572 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1573 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1574 		return -EINVAL;
1575 	}
1576 
1577 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1578 
1579 	if (dev == NULL || sess == NULL)
1580 		return -EINVAL;
1581 
1582 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1583 
1584 	dev->dev_ops->asym_session_clear(dev, sess);
1585 
1586 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1587 	return 0;
1588 }
1589 
1590 int
rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session * sess)1591 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1592 {
1593 	uint8_t i;
1594 	struct rte_mempool *sess_mp;
1595 
1596 	if (sess == NULL)
1597 		return -EINVAL;
1598 
1599 	/* Check that all device private data has been freed */
1600 	for (i = 0; i < sess->nb_drivers; i++) {
1601 		if (sess->sess_data[i].refcnt != 0)
1602 			return -EBUSY;
1603 	}
1604 
1605 	/* Return session to mempool */
1606 	sess_mp = rte_mempool_from_obj(sess);
1607 	rte_mempool_put(sess_mp, sess);
1608 
1609 	rte_cryptodev_trace_sym_session_free(sess);
1610 	return 0;
1611 }
1612 
1613 int
rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session * sess)1614 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1615 {
1616 	uint8_t i;
1617 	void *sess_priv;
1618 	struct rte_mempool *sess_mp;
1619 
1620 	if (sess == NULL)
1621 		return -EINVAL;
1622 
1623 	/* Check that all device private data has been freed */
1624 	for (i = 0; i < nb_drivers; i++) {
1625 		sess_priv = get_asym_session_private_data(sess, i);
1626 		if (sess_priv != NULL)
1627 			return -EBUSY;
1628 	}
1629 
1630 	/* Return session to mempool */
1631 	sess_mp = rte_mempool_from_obj(sess);
1632 	rte_mempool_put(sess_mp, sess);
1633 
1634 	rte_cryptodev_trace_asym_session_free(sess);
1635 	return 0;
1636 }
1637 
1638 unsigned int
rte_cryptodev_sym_get_header_session_size(void)1639 rte_cryptodev_sym_get_header_session_size(void)
1640 {
1641 	/*
1642 	 * Header contains pointers to the private data of all registered
1643 	 * drivers and all necessary information to ensure safely clear
1644 	 * or free al session.
1645 	 */
1646 	struct rte_cryptodev_sym_session s = {0};
1647 
1648 	s.nb_drivers = nb_drivers;
1649 
1650 	return (unsigned int)(sizeof(s) +
1651 			rte_cryptodev_sym_session_data_size(&s));
1652 }
1653 
1654 unsigned int
rte_cryptodev_sym_get_existing_header_session_size(struct rte_cryptodev_sym_session * sess)1655 rte_cryptodev_sym_get_existing_header_session_size(
1656 		struct rte_cryptodev_sym_session *sess)
1657 {
1658 	if (!sess)
1659 		return 0;
1660 	else
1661 		return (unsigned int)(sizeof(*sess) +
1662 				rte_cryptodev_sym_session_data_size(sess));
1663 }
1664 
1665 unsigned int
rte_cryptodev_asym_get_header_session_size(void)1666 rte_cryptodev_asym_get_header_session_size(void)
1667 {
1668 	/*
1669 	 * Header contains pointers to the private data
1670 	 * of all registered drivers, and a flag which
1671 	 * indicates presence of private data
1672 	 */
1673 	return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1674 }
1675 
1676 unsigned int
rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)1677 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1678 {
1679 	struct rte_cryptodev *dev;
1680 	unsigned int priv_sess_size;
1681 
1682 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1683 		return 0;
1684 
1685 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1686 
1687 	if (*dev->dev_ops->sym_session_get_size == NULL)
1688 		return 0;
1689 
1690 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1691 
1692 	return priv_sess_size;
1693 }
1694 
1695 unsigned int
rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)1696 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1697 {
1698 	struct rte_cryptodev *dev;
1699 	unsigned int header_size = sizeof(void *) * nb_drivers;
1700 	unsigned int priv_sess_size;
1701 
1702 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1703 		return 0;
1704 
1705 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1706 
1707 	if (*dev->dev_ops->asym_session_get_size == NULL)
1708 		return 0;
1709 
1710 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1711 	if (priv_sess_size < header_size)
1712 		return header_size;
1713 
1714 	return priv_sess_size;
1715 
1716 }
1717 
1718 int
rte_cryptodev_sym_session_set_user_data(struct rte_cryptodev_sym_session * sess,void * data,uint16_t size)1719 rte_cryptodev_sym_session_set_user_data(
1720 					struct rte_cryptodev_sym_session *sess,
1721 					void *data,
1722 					uint16_t size)
1723 {
1724 	if (sess == NULL)
1725 		return -EINVAL;
1726 
1727 	if (sess->user_data_sz < size)
1728 		return -ENOMEM;
1729 
1730 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1731 	return 0;
1732 }
1733 
1734 void *
rte_cryptodev_sym_session_get_user_data(struct rte_cryptodev_sym_session * sess)1735 rte_cryptodev_sym_session_get_user_data(
1736 					struct rte_cryptodev_sym_session *sess)
1737 {
1738 	if (sess == NULL || sess->user_data_sz == 0)
1739 		return NULL;
1740 
1741 	return (void *)(sess->sess_data + sess->nb_drivers);
1742 }
1743 
1744 static inline void
sym_crypto_fill_status(struct rte_crypto_sym_vec * vec,int32_t errnum)1745 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1746 {
1747 	uint32_t i;
1748 	for (i = 0; i < vec->num; i++)
1749 		vec->status[i] = errnum;
1750 }
1751 
1752 uint32_t
rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,struct rte_cryptodev_sym_session * sess,union rte_crypto_sym_ofs ofs,struct rte_crypto_sym_vec * vec)1753 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1754 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1755 	struct rte_crypto_sym_vec *vec)
1756 {
1757 	struct rte_cryptodev *dev;
1758 
1759 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1760 		sym_crypto_fill_status(vec, EINVAL);
1761 		return 0;
1762 	}
1763 
1764 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1765 
1766 	if (*dev->dev_ops->sym_cpu_process == NULL ||
1767 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1768 		sym_crypto_fill_status(vec, ENOTSUP);
1769 		return 0;
1770 	}
1771 
1772 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1773 }
1774 
1775 int
rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)1776 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
1777 {
1778 	struct rte_cryptodev *dev;
1779 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
1780 	int32_t priv_size;
1781 
1782 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1783 		return -EINVAL;
1784 
1785 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1786 
1787 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
1788 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
1789 		return -ENOTSUP;
1790 	}
1791 
1792 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
1793 	if (priv_size < 0)
1794 		return -ENOTSUP;
1795 
1796 	return RTE_ALIGN_CEIL((size + priv_size), 8);
1797 }
1798 
1799 int
rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id,uint16_t qp_id,struct rte_crypto_raw_dp_ctx * ctx,enum rte_crypto_op_sess_type sess_type,union rte_cryptodev_session_ctx session_ctx,uint8_t is_update)1800 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1801 	struct rte_crypto_raw_dp_ctx *ctx,
1802 	enum rte_crypto_op_sess_type sess_type,
1803 	union rte_cryptodev_session_ctx session_ctx,
1804 	uint8_t is_update)
1805 {
1806 	struct rte_cryptodev *dev;
1807 
1808 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
1809 		return -EINVAL;
1810 
1811 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1812 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
1813 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
1814 		return -ENOTSUP;
1815 
1816 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
1817 			sess_type, session_ctx, is_update);
1818 }
1819 
1820 uint32_t
rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx * ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void ** user_data,int * enqueue_status)1821 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1822 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1823 	void **user_data, int *enqueue_status)
1824 {
1825 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
1826 			ofs, user_data, enqueue_status);
1827 }
1828 
1829 int
rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx * ctx,uint32_t n)1830 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1831 		uint32_t n)
1832 {
1833 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
1834 }
1835 
1836 uint32_t
rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx * ctx,rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,rte_cryptodev_raw_post_dequeue_t post_dequeue,void ** out_user_data,uint8_t is_user_data_array,uint32_t * n_success_jobs,int * status)1837 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1838 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1839 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1840 	void **out_user_data, uint8_t is_user_data_array,
1841 	uint32_t *n_success_jobs, int *status)
1842 {
1843 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
1844 		get_dequeue_count, post_dequeue, out_user_data,
1845 		is_user_data_array, n_success_jobs, status);
1846 }
1847 
1848 int
rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx * ctx,uint32_t n)1849 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1850 		uint32_t n)
1851 {
1852 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
1853 }
1854 
1855 /** Initialise rte_crypto_op mempool element */
1856 static void
rte_crypto_op_init(struct rte_mempool * mempool,void * opaque_arg,void * _op_data,__rte_unused unsigned i)1857 rte_crypto_op_init(struct rte_mempool *mempool,
1858 		void *opaque_arg,
1859 		void *_op_data,
1860 		__rte_unused unsigned i)
1861 {
1862 	struct rte_crypto_op *op = _op_data;
1863 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1864 
1865 	memset(_op_data, 0, mempool->elt_size);
1866 
1867 	__rte_crypto_op_reset(op, type);
1868 
1869 	op->phys_addr = rte_mem_virt2iova(_op_data);
1870 	op->mempool = mempool;
1871 }
1872 
1873 
1874 struct rte_mempool *
rte_crypto_op_pool_create(const char * name,enum rte_crypto_op_type type,unsigned nb_elts,unsigned cache_size,uint16_t priv_size,int socket_id)1875 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1876 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1877 		int socket_id)
1878 {
1879 	struct rte_crypto_op_pool_private *priv;
1880 
1881 	unsigned elt_size = sizeof(struct rte_crypto_op) +
1882 			priv_size;
1883 
1884 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1885 		elt_size += sizeof(struct rte_crypto_sym_op);
1886 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1887 		elt_size += sizeof(struct rte_crypto_asym_op);
1888 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1889 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1890 		                    sizeof(struct rte_crypto_asym_op));
1891 	} else {
1892 		CDEV_LOG_ERR("Invalid op_type\n");
1893 		return NULL;
1894 	}
1895 
1896 	/* lookup mempool in case already allocated */
1897 	struct rte_mempool *mp = rte_mempool_lookup(name);
1898 
1899 	if (mp != NULL) {
1900 		priv = (struct rte_crypto_op_pool_private *)
1901 				rte_mempool_get_priv(mp);
1902 
1903 		if (mp->elt_size != elt_size ||
1904 				mp->cache_size < cache_size ||
1905 				mp->size < nb_elts ||
1906 				priv->priv_size <  priv_size) {
1907 			mp = NULL;
1908 			CDEV_LOG_ERR("Mempool %s already exists but with "
1909 					"incompatible parameters", name);
1910 			return NULL;
1911 		}
1912 		return mp;
1913 	}
1914 
1915 	mp = rte_mempool_create(
1916 			name,
1917 			nb_elts,
1918 			elt_size,
1919 			cache_size,
1920 			sizeof(struct rte_crypto_op_pool_private),
1921 			NULL,
1922 			NULL,
1923 			rte_crypto_op_init,
1924 			&type,
1925 			socket_id,
1926 			0);
1927 
1928 	if (mp == NULL) {
1929 		CDEV_LOG_ERR("Failed to create mempool %s", name);
1930 		return NULL;
1931 	}
1932 
1933 	priv = (struct rte_crypto_op_pool_private *)
1934 			rte_mempool_get_priv(mp);
1935 
1936 	priv->priv_size = priv_size;
1937 	priv->type = type;
1938 
1939 	return mp;
1940 }
1941 
1942 int
rte_cryptodev_pmd_create_dev_name(char * name,const char * dev_name_prefix)1943 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1944 {
1945 	struct rte_cryptodev *dev = NULL;
1946 	uint32_t i = 0;
1947 
1948 	if (name == NULL)
1949 		return -EINVAL;
1950 
1951 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1952 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1953 				"%s_%u", dev_name_prefix, i);
1954 
1955 		if (ret < 0)
1956 			return ret;
1957 
1958 		dev = rte_cryptodev_pmd_get_named_dev(name);
1959 		if (!dev)
1960 			return 0;
1961 	}
1962 
1963 	return -1;
1964 }
1965 
1966 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1967 
1968 static struct cryptodev_driver_list cryptodev_driver_list =
1969 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1970 
1971 int
rte_cryptodev_driver_id_get(const char * name)1972 rte_cryptodev_driver_id_get(const char *name)
1973 {
1974 	struct cryptodev_driver *driver;
1975 	const char *driver_name;
1976 
1977 	if (name == NULL) {
1978 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1979 		return -1;
1980 	}
1981 
1982 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1983 		driver_name = driver->driver->name;
1984 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1985 			return driver->id;
1986 	}
1987 	return -1;
1988 }
1989 
1990 const char *
rte_cryptodev_name_get(uint8_t dev_id)1991 rte_cryptodev_name_get(uint8_t dev_id)
1992 {
1993 	struct rte_cryptodev *dev;
1994 
1995 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1996 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1997 		return NULL;
1998 	}
1999 
2000 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2001 	if (dev == NULL)
2002 		return NULL;
2003 
2004 	return dev->data->name;
2005 }
2006 
2007 const char *
rte_cryptodev_driver_name_get(uint8_t driver_id)2008 rte_cryptodev_driver_name_get(uint8_t driver_id)
2009 {
2010 	struct cryptodev_driver *driver;
2011 
2012 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2013 		if (driver->id == driver_id)
2014 			return driver->driver->name;
2015 	return NULL;
2016 }
2017 
2018 uint8_t
rte_cryptodev_allocate_driver(struct cryptodev_driver * crypto_drv,const struct rte_driver * drv)2019 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2020 		const struct rte_driver *drv)
2021 {
2022 	crypto_drv->driver = drv;
2023 	crypto_drv->id = nb_drivers;
2024 
2025 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2026 
2027 	return nb_drivers++;
2028 }
2029