1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2017 Intel Corporation
3  */
4 
5 #include <intel-ipsec-mb.h>
6 
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_per_lcore.h>
15 #include <rte_ether.h>
16 
17 #include "aesni_mb_pmd_private.h"
18 
19 #define AES_CCM_DIGEST_MIN_LEN 4
20 #define AES_CCM_DIGEST_MAX_LEN 16
21 #define HMAC_MAX_BLOCK_SIZE 128
22 static uint8_t cryptodev_driver_id;
23 
24 /*
25  * Needed to support CPU-CRYPTO API (rte_cryptodev_sym_cpu_crypto_process),
26  * as we still use JOB based API even for synchronous processing.
27  */
28 static RTE_DEFINE_PER_LCORE(MB_MGR *, sync_mb_mgr);
29 
30 typedef void (*hash_one_block_t)(const void *data, void *digest);
31 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
32 
33 /**
34  * Calculate the authentication pre-computes
35  *
36  * @param one_block_hash	Function pointer to calculate digest on ipad/opad
37  * @param ipad			Inner pad output byte array
38  * @param opad			Outer pad output byte array
39  * @param hkey			Authentication key
40  * @param hkey_len		Authentication key length
41  * @param blocksize		Block size of selected hash algo
42  */
43 static void
calculate_auth_precomputes(hash_one_block_t one_block_hash,uint8_t * ipad,uint8_t * opad,const uint8_t * hkey,uint16_t hkey_len,uint16_t blocksize)44 calculate_auth_precomputes(hash_one_block_t one_block_hash,
45 		uint8_t *ipad, uint8_t *opad,
46 		const uint8_t *hkey, uint16_t hkey_len,
47 		uint16_t blocksize)
48 {
49 	unsigned i, length;
50 
51 	uint8_t ipad_buf[blocksize] __rte_aligned(16);
52 	uint8_t opad_buf[blocksize] __rte_aligned(16);
53 
54 	/* Setup inner and outer pads */
55 	memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
56 	memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
57 
58 	/* XOR hash key with inner and outer pads */
59 	length = hkey_len > blocksize ? blocksize : hkey_len;
60 
61 	for (i = 0; i < length; i++) {
62 		ipad_buf[i] ^= hkey[i];
63 		opad_buf[i] ^= hkey[i];
64 	}
65 
66 	/* Compute partial hashes */
67 	(*one_block_hash)(ipad_buf, ipad);
68 	(*one_block_hash)(opad_buf, opad);
69 
70 	/* Clean up stack */
71 	memset(ipad_buf, 0, blocksize);
72 	memset(opad_buf, 0, blocksize);
73 }
74 
75 /** Get xform chain order */
76 static enum aesni_mb_operation
aesni_mb_get_chain_order(const struct rte_crypto_sym_xform * xform)77 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
78 {
79 	if (xform == NULL)
80 		return AESNI_MB_OP_NOT_SUPPORTED;
81 
82 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
83 		if (xform->next == NULL)
84 			return AESNI_MB_OP_CIPHER_ONLY;
85 		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
86 			return AESNI_MB_OP_CIPHER_HASH;
87 	}
88 
89 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
90 		if (xform->next == NULL)
91 			return AESNI_MB_OP_HASH_ONLY;
92 		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
93 			return AESNI_MB_OP_HASH_CIPHER;
94 	}
95 #if IMB_VERSION_NUM > IMB_VERSION(0, 52, 0)
96 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
97 		if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
98 			/*
99 			 * CCM requires to hash first and cipher later
100 			 * when encrypting
101 			 */
102 			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
103 				return AESNI_MB_OP_AEAD_HASH_CIPHER;
104 			else
105 				return AESNI_MB_OP_AEAD_CIPHER_HASH;
106 		} else {
107 			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
108 				return AESNI_MB_OP_AEAD_CIPHER_HASH;
109 			else
110 				return AESNI_MB_OP_AEAD_HASH_CIPHER;
111 		}
112 	}
113 #else
114 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
115 		if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
116 				xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
117 			if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
118 				return AESNI_MB_OP_AEAD_CIPHER_HASH;
119 			else
120 				return AESNI_MB_OP_AEAD_HASH_CIPHER;
121 		}
122 	}
123 #endif
124 
125 	return AESNI_MB_OP_NOT_SUPPORTED;
126 }
127 
128 static inline int
is_aead_algo(JOB_HASH_ALG hash_alg,JOB_CIPHER_MODE cipher_mode)129 is_aead_algo(JOB_HASH_ALG hash_alg, JOB_CIPHER_MODE cipher_mode)
130 {
131 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
132 	return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 || hash_alg == AES_CCM ||
133 		(hash_alg == AES_GMAC && cipher_mode == GCM));
134 #else
135 	return ((hash_alg == AES_GMAC && cipher_mode == GCM) ||
136 		hash_alg == AES_CCM);
137 #endif
138 }
139 
140 /** Set session authentication parameters */
141 static int
aesni_mb_set_session_auth_parameters(const MB_MGR * mb_mgr,struct aesni_mb_session * sess,const struct rte_crypto_sym_xform * xform)142 aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
143 		struct aesni_mb_session *sess,
144 		const struct rte_crypto_sym_xform *xform)
145 {
146 	hash_one_block_t hash_oneblock_fn = NULL;
147 	unsigned int key_larger_block_size = 0;
148 	uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
149 	uint32_t auth_precompute = 1;
150 
151 	if (xform == NULL) {
152 		sess->auth.algo = NULL_HASH;
153 		return 0;
154 	}
155 
156 	if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
157 		AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
158 		return -1;
159 	}
160 
161 	/* Set IV parameters */
162 	sess->auth_iv.offset = xform->auth.iv.offset;
163 	sess->auth_iv.length = xform->auth.iv.length;
164 
165 	/* Set the request digest size */
166 	sess->auth.req_digest_len = xform->auth.digest_length;
167 
168 	/* Select auth generate/verify */
169 	sess->auth.operation = xform->auth.op;
170 
171 	/* Set Authentication Parameters */
172 	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
173 		sess->auth.algo = AES_XCBC;
174 
175 		uint16_t xcbc_mac_digest_len =
176 			get_truncated_digest_byte_length(AES_XCBC);
177 		if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
178 			AESNI_MB_LOG(ERR, "Invalid digest size\n");
179 			return -EINVAL;
180 		}
181 		sess->auth.gen_digest_len = sess->auth.req_digest_len;
182 
183 		IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
184 				sess->auth.xcbc.k1_expanded,
185 				sess->auth.xcbc.k2, sess->auth.xcbc.k3);
186 		return 0;
187 	}
188 
189 	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
190 		uint32_t dust[4*15];
191 
192 		sess->auth.algo = AES_CMAC;
193 
194 		uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
195 
196 		if (sess->auth.req_digest_len > cmac_digest_len) {
197 			AESNI_MB_LOG(ERR, "Invalid digest size\n");
198 			return -EINVAL;
199 		}
200 		/*
201 		 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
202 		 * in version 0.50 and sizes of 12 and 16 bytes,
203 		 * in version 0.49.
204 		 * If size requested is different, generate the full digest
205 		 * (16 bytes) in a temporary location and then memcpy
206 		 * the requested number of bytes.
207 		 */
208 		if (sess->auth.req_digest_len < 4)
209 			sess->auth.gen_digest_len = cmac_digest_len;
210 		else
211 			sess->auth.gen_digest_len = sess->auth.req_digest_len;
212 
213 		IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
214 				sess->auth.cmac.expkey, dust);
215 		IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
216 				sess->auth.cmac.skey1, sess->auth.cmac.skey2);
217 		return 0;
218 	}
219 
220 	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
221 		if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
222 			sess->cipher.direction = ENCRYPT;
223 			sess->chain_order = CIPHER_HASH;
224 		} else
225 			sess->cipher.direction = DECRYPT;
226 
227 		sess->auth.algo = AES_GMAC;
228 		if (sess->auth.req_digest_len > get_digest_byte_length(AES_GMAC)) {
229 			AESNI_MB_LOG(ERR, "Invalid digest size\n");
230 			return -EINVAL;
231 		}
232 		sess->auth.gen_digest_len = sess->auth.req_digest_len;
233 		sess->iv.length = xform->auth.iv.length;
234 		sess->iv.offset = xform->auth.iv.offset;
235 
236 		switch (xform->auth.key.length) {
237 		case AES_128_BYTES:
238 			IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
239 				&sess->cipher.gcm_key);
240 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
241 			break;
242 		case AES_192_BYTES:
243 			IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
244 				&sess->cipher.gcm_key);
245 			sess->cipher.key_length_in_bytes = AES_192_BYTES;
246 			break;
247 		case AES_256_BYTES:
248 			IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
249 				&sess->cipher.gcm_key);
250 			sess->cipher.key_length_in_bytes = AES_256_BYTES;
251 			break;
252 		default:
253 			RTE_LOG(ERR, PMD, "failed to parse test type\n");
254 			return -EINVAL;
255 		}
256 
257 		return 0;
258 	}
259 
260 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
261 	if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
262 		sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN;
263 		uint16_t zuc_eia3_digest_len =
264 			get_truncated_digest_byte_length(IMB_AUTH_ZUC_EIA3_BITLEN);
265 		if (sess->auth.req_digest_len != zuc_eia3_digest_len) {
266 			AESNI_MB_LOG(ERR, "Invalid digest size\n");
267 			return -EINVAL;
268 		}
269 		sess->auth.gen_digest_len = sess->auth.req_digest_len;
270 
271 		memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 16);
272 		return 0;
273 	} else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
274 		sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN;
275 		uint16_t snow3g_uia2_digest_len =
276 			get_truncated_digest_byte_length(IMB_AUTH_SNOW3G_UIA2_BITLEN);
277 		if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
278 			AESNI_MB_LOG(ERR, "Invalid digest size\n");
279 			return -EINVAL;
280 		}
281 		sess->auth.gen_digest_len = sess->auth.req_digest_len;
282 
283 		IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
284 					&sess->auth.pKeySched_snow3g_auth);
285 		return 0;
286 	} else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
287 		sess->auth.algo = IMB_AUTH_KASUMI_UIA1;
288 		uint16_t kasumi_f9_digest_len =
289 			get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
290 		if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
291 			AESNI_MB_LOG(ERR, "Invalid digest size\n");
292 			return -EINVAL;
293 		}
294 		sess->auth.gen_digest_len = sess->auth.req_digest_len;
295 
296 		IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
297 					&sess->auth.pKeySched_kasumi_auth);
298 		return 0;
299 	}
300 #endif
301 
302 	switch (xform->auth.algo) {
303 	case RTE_CRYPTO_AUTH_MD5_HMAC:
304 		sess->auth.algo = MD5;
305 		hash_oneblock_fn = mb_mgr->md5_one_block;
306 		break;
307 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
308 		sess->auth.algo = SHA1;
309 		hash_oneblock_fn = mb_mgr->sha1_one_block;
310 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
311 			IMB_SHA1(mb_mgr,
312 				xform->auth.key.data,
313 				xform->auth.key.length,
314 				hashed_key);
315 			key_larger_block_size = 1;
316 		}
317 		break;
318 	case RTE_CRYPTO_AUTH_SHA1:
319 		sess->auth.algo = PLAIN_SHA1;
320 		auth_precompute = 0;
321 		break;
322 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
323 		sess->auth.algo = SHA_224;
324 		hash_oneblock_fn = mb_mgr->sha224_one_block;
325 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
326 			IMB_SHA224(mb_mgr,
327 				xform->auth.key.data,
328 				xform->auth.key.length,
329 				hashed_key);
330 			key_larger_block_size = 1;
331 		}
332 		break;
333 	case RTE_CRYPTO_AUTH_SHA224:
334 		sess->auth.algo = PLAIN_SHA_224;
335 		auth_precompute = 0;
336 		break;
337 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
338 		sess->auth.algo = SHA_256;
339 		hash_oneblock_fn = mb_mgr->sha256_one_block;
340 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
341 			IMB_SHA256(mb_mgr,
342 				xform->auth.key.data,
343 				xform->auth.key.length,
344 				hashed_key);
345 			key_larger_block_size = 1;
346 		}
347 		break;
348 	case RTE_CRYPTO_AUTH_SHA256:
349 		sess->auth.algo = PLAIN_SHA_256;
350 		auth_precompute = 0;
351 		break;
352 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
353 		sess->auth.algo = SHA_384;
354 		hash_oneblock_fn = mb_mgr->sha384_one_block;
355 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
356 			IMB_SHA384(mb_mgr,
357 				xform->auth.key.data,
358 				xform->auth.key.length,
359 				hashed_key);
360 			key_larger_block_size = 1;
361 		}
362 		break;
363 	case RTE_CRYPTO_AUTH_SHA384:
364 		sess->auth.algo = PLAIN_SHA_384;
365 		auth_precompute = 0;
366 		break;
367 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
368 		sess->auth.algo = SHA_512;
369 		hash_oneblock_fn = mb_mgr->sha512_one_block;
370 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
371 			IMB_SHA512(mb_mgr,
372 				xform->auth.key.data,
373 				xform->auth.key.length,
374 				hashed_key);
375 			key_larger_block_size = 1;
376 		}
377 		break;
378 	case RTE_CRYPTO_AUTH_SHA512:
379 		sess->auth.algo = PLAIN_SHA_512;
380 		auth_precompute = 0;
381 		break;
382 	default:
383 		AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
384 		return -ENOTSUP;
385 	}
386 	uint16_t trunc_digest_size =
387 			get_truncated_digest_byte_length(sess->auth.algo);
388 	uint16_t full_digest_size =
389 			get_digest_byte_length(sess->auth.algo);
390 
391 	if (sess->auth.req_digest_len > full_digest_size ||
392 			sess->auth.req_digest_len == 0) {
393 		AESNI_MB_LOG(ERR, "Invalid digest size\n");
394 		return -EINVAL;
395 	}
396 
397 	if (sess->auth.req_digest_len != trunc_digest_size &&
398 			sess->auth.req_digest_len != full_digest_size)
399 		sess->auth.gen_digest_len = full_digest_size;
400 	else
401 		sess->auth.gen_digest_len = sess->auth.req_digest_len;
402 
403 	/* Plain SHA does not require precompute key */
404 	if (auth_precompute == 0)
405 		return 0;
406 
407 	/* Calculate Authentication precomputes */
408 	if (key_larger_block_size) {
409 		calculate_auth_precomputes(hash_oneblock_fn,
410 			sess->auth.pads.inner, sess->auth.pads.outer,
411 			hashed_key,
412 			xform->auth.key.length,
413 			get_auth_algo_blocksize(sess->auth.algo));
414 	} else {
415 		calculate_auth_precomputes(hash_oneblock_fn,
416 			sess->auth.pads.inner, sess->auth.pads.outer,
417 			xform->auth.key.data,
418 			xform->auth.key.length,
419 			get_auth_algo_blocksize(sess->auth.algo));
420 	}
421 
422 	return 0;
423 }
424 
425 /** Set session cipher parameters */
426 static int
aesni_mb_set_session_cipher_parameters(const MB_MGR * mb_mgr,struct aesni_mb_session * sess,const struct rte_crypto_sym_xform * xform)427 aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
428 		struct aesni_mb_session *sess,
429 		const struct rte_crypto_sym_xform *xform)
430 {
431 	uint8_t is_aes = 0;
432 	uint8_t is_3DES = 0;
433 	uint8_t is_docsis = 0;
434 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
435 	uint8_t is_zuc = 0;
436 	uint8_t is_snow3g = 0;
437 	uint8_t is_kasumi = 0;
438 #endif
439 
440 	if (xform == NULL) {
441 		sess->cipher.mode = NULL_CIPHER;
442 		return 0;
443 	}
444 
445 	if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
446 		AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
447 		return -EINVAL;
448 	}
449 
450 	/* Select cipher direction */
451 	switch (xform->cipher.op) {
452 	case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
453 		sess->cipher.direction = ENCRYPT;
454 		break;
455 	case RTE_CRYPTO_CIPHER_OP_DECRYPT:
456 		sess->cipher.direction = DECRYPT;
457 		break;
458 	default:
459 		AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
460 		return -EINVAL;
461 	}
462 
463 	/* Select cipher mode */
464 	switch (xform->cipher.algo) {
465 	case RTE_CRYPTO_CIPHER_AES_CBC:
466 		sess->cipher.mode = CBC;
467 		is_aes = 1;
468 		break;
469 	case RTE_CRYPTO_CIPHER_AES_CTR:
470 		sess->cipher.mode = CNTR;
471 		is_aes = 1;
472 		break;
473 	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
474 		sess->cipher.mode = DOCSIS_SEC_BPI;
475 		is_docsis = 1;
476 		break;
477 	case RTE_CRYPTO_CIPHER_DES_CBC:
478 		sess->cipher.mode = DES;
479 		break;
480 	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
481 		sess->cipher.mode = DOCSIS_DES;
482 		break;
483 	case RTE_CRYPTO_CIPHER_3DES_CBC:
484 		sess->cipher.mode = DES3;
485 		is_3DES = 1;
486 		break;
487 #if IMB_VERSION(0, 53, 0) <= IMB_VERSION_NUM
488 	case RTE_CRYPTO_CIPHER_AES_ECB:
489 		sess->cipher.mode = ECB;
490 		is_aes = 1;
491 		break;
492 #endif
493 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
494 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
495 		sess->cipher.mode = IMB_CIPHER_ZUC_EEA3;
496 		is_zuc = 1;
497 		break;
498 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
499 		sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
500 		is_snow3g = 1;
501 		break;
502 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
503 		sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
504 		is_kasumi = 1;
505 		break;
506 #endif
507 	default:
508 		AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
509 		return -ENOTSUP;
510 	}
511 
512 	/* Set IV parameters */
513 	sess->iv.offset = xform->cipher.iv.offset;
514 	sess->iv.length = xform->cipher.iv.length;
515 
516 	/* Check key length and choose key expansion function for AES */
517 	if (is_aes) {
518 		switch (xform->cipher.key.length) {
519 		case AES_128_BYTES:
520 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
521 			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
522 					sess->cipher.expanded_aes_keys.encode,
523 					sess->cipher.expanded_aes_keys.decode);
524 			break;
525 		case AES_192_BYTES:
526 			sess->cipher.key_length_in_bytes = AES_192_BYTES;
527 			IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
528 					sess->cipher.expanded_aes_keys.encode,
529 					sess->cipher.expanded_aes_keys.decode);
530 			break;
531 		case AES_256_BYTES:
532 			sess->cipher.key_length_in_bytes = AES_256_BYTES;
533 			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
534 					sess->cipher.expanded_aes_keys.encode,
535 					sess->cipher.expanded_aes_keys.decode);
536 			break;
537 		default:
538 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
539 			return -EINVAL;
540 		}
541 	} else if (is_docsis) {
542 		switch (xform->cipher.key.length) {
543 		case AES_128_BYTES:
544 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
545 			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
546 					sess->cipher.expanded_aes_keys.encode,
547 					sess->cipher.expanded_aes_keys.decode);
548 			break;
549 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
550 		case AES_256_BYTES:
551 			sess->cipher.key_length_in_bytes = AES_256_BYTES;
552 			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
553 					sess->cipher.expanded_aes_keys.encode,
554 					sess->cipher.expanded_aes_keys.decode);
555 			break;
556 #endif
557 		default:
558 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
559 			return -EINVAL;
560 		}
561 	} else if (is_3DES) {
562 		uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
563 				sess->cipher.exp_3des_keys.key[1],
564 				sess->cipher.exp_3des_keys.key[2]};
565 
566 		switch (xform->cipher.key.length) {
567 		case  24:
568 			IMB_DES_KEYSCHED(mb_mgr, keys[0],
569 					xform->cipher.key.data);
570 			IMB_DES_KEYSCHED(mb_mgr, keys[1],
571 					xform->cipher.key.data + 8);
572 			IMB_DES_KEYSCHED(mb_mgr, keys[2],
573 					xform->cipher.key.data + 16);
574 
575 			/* Initialize keys - 24 bytes: [K1-K2-K3] */
576 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
577 			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
578 			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
579 			break;
580 		case 16:
581 			IMB_DES_KEYSCHED(mb_mgr, keys[0],
582 					xform->cipher.key.data);
583 			IMB_DES_KEYSCHED(mb_mgr, keys[1],
584 					xform->cipher.key.data + 8);
585 			/* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
586 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
587 			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
588 			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
589 			break;
590 		case 8:
591 			IMB_DES_KEYSCHED(mb_mgr, keys[0],
592 					xform->cipher.key.data);
593 
594 			/* Initialize keys - 8 bytes: [K1 = K2 = K3] */
595 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
596 			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
597 			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
598 			break;
599 		default:
600 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
601 			return -EINVAL;
602 		}
603 
604 		sess->cipher.key_length_in_bytes = 24;
605 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
606 	} else if (is_zuc) {
607 		if (xform->cipher.key.length != 16) {
608 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
609 			return -EINVAL;
610 		}
611 		sess->cipher.key_length_in_bytes = 16;
612 		memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
613 			16);
614 	} else if (is_snow3g) {
615 		if (xform->cipher.key.length != 16) {
616 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
617 			return -EINVAL;
618 		}
619 		sess->cipher.key_length_in_bytes = 16;
620 		IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
621 					&sess->cipher.pKeySched_snow3g_cipher);
622 	} else if (is_kasumi) {
623 		if (xform->cipher.key.length != 16) {
624 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
625 			return -EINVAL;
626 		}
627 		sess->cipher.key_length_in_bytes = 16;
628 		IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
629 					&sess->cipher.pKeySched_kasumi_cipher);
630 #endif
631 	} else {
632 		if (xform->cipher.key.length != 8) {
633 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
634 			return -EINVAL;
635 		}
636 		sess->cipher.key_length_in_bytes = 8;
637 
638 		IMB_DES_KEYSCHED(mb_mgr,
639 			(uint64_t *)sess->cipher.expanded_aes_keys.encode,
640 				xform->cipher.key.data);
641 		IMB_DES_KEYSCHED(mb_mgr,
642 			(uint64_t *)sess->cipher.expanded_aes_keys.decode,
643 				xform->cipher.key.data);
644 	}
645 
646 	return 0;
647 }
648 
649 static int
aesni_mb_set_session_aead_parameters(const MB_MGR * mb_mgr,struct aesni_mb_session * sess,const struct rte_crypto_sym_xform * xform)650 aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
651 		struct aesni_mb_session *sess,
652 		const struct rte_crypto_sym_xform *xform)
653 {
654 	switch (xform->aead.op) {
655 	case RTE_CRYPTO_AEAD_OP_ENCRYPT:
656 		sess->cipher.direction = ENCRYPT;
657 		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
658 		break;
659 	case RTE_CRYPTO_AEAD_OP_DECRYPT:
660 		sess->cipher.direction = DECRYPT;
661 		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
662 		break;
663 	default:
664 		AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
665 		return -EINVAL;
666 	}
667 
668 	/* Set IV parameters */
669 	sess->iv.offset = xform->aead.iv.offset;
670 	sess->iv.length = xform->aead.iv.length;
671 
672 	/* Set digest sizes */
673 	sess->auth.req_digest_len = xform->aead.digest_length;
674 	sess->auth.gen_digest_len = sess->auth.req_digest_len;
675 
676 	switch (xform->aead.algo) {
677 	case RTE_CRYPTO_AEAD_AES_CCM:
678 		sess->cipher.mode = CCM;
679 		sess->auth.algo = AES_CCM;
680 
681 		/* Check key length and choose key expansion function for AES */
682 		switch (xform->aead.key.length) {
683 		case AES_128_BYTES:
684 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
685 			IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
686 					sess->cipher.expanded_aes_keys.encode,
687 					sess->cipher.expanded_aes_keys.decode);
688 			break;
689 		case AES_256_BYTES:
690 			sess->cipher.key_length_in_bytes = AES_256_BYTES;
691 			IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data,
692 					sess->cipher.expanded_aes_keys.encode,
693 					sess->cipher.expanded_aes_keys.decode);
694 			break;
695 		default:
696 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
697 			return -EINVAL;
698 		}
699 
700 		/* CCM digests must be between 4 and 16 and an even number */
701 		if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
702 				sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
703 				(sess->auth.req_digest_len & 1) == 1) {
704 			AESNI_MB_LOG(ERR, "Invalid digest size\n");
705 			return -EINVAL;
706 		}
707 		break;
708 
709 	case RTE_CRYPTO_AEAD_AES_GCM:
710 		sess->cipher.mode = GCM;
711 		sess->auth.algo = AES_GMAC;
712 
713 		switch (xform->aead.key.length) {
714 		case AES_128_BYTES:
715 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
716 			IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
717 				&sess->cipher.gcm_key);
718 			break;
719 		case AES_192_BYTES:
720 			sess->cipher.key_length_in_bytes = AES_192_BYTES;
721 			IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
722 				&sess->cipher.gcm_key);
723 			break;
724 		case AES_256_BYTES:
725 			sess->cipher.key_length_in_bytes = AES_256_BYTES;
726 			IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
727 				&sess->cipher.gcm_key);
728 			break;
729 		default:
730 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
731 			return -EINVAL;
732 		}
733 
734 		/* GCM digest size must be between 1 and 16 */
735 		if (sess->auth.req_digest_len == 0 ||
736 				sess->auth.req_digest_len > 16) {
737 			AESNI_MB_LOG(ERR, "Invalid digest size\n");
738 			return -EINVAL;
739 		}
740 		break;
741 
742 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
743 	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
744 		sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305;
745 		sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305;
746 
747 		if (xform->aead.key.length != 32) {
748 			AESNI_MB_LOG(ERR, "Invalid key length");
749 			return -EINVAL;
750 		}
751 		sess->cipher.key_length_in_bytes = 32;
752 		memcpy(sess->cipher.expanded_aes_keys.encode,
753 			xform->aead.key.data, 32);
754 		if (sess->auth.req_digest_len != 16) {
755 			AESNI_MB_LOG(ERR, "Invalid digest size\n");
756 			return -EINVAL;
757 		}
758 		break;
759 #endif
760 	default:
761 		AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
762 		return -ENOTSUP;
763 	}
764 
765 	return 0;
766 }
767 
768 /** Parse crypto xform chain and set private session parameters */
769 int
aesni_mb_set_session_parameters(const MB_MGR * mb_mgr,struct aesni_mb_session * sess,const struct rte_crypto_sym_xform * xform)770 aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
771 		struct aesni_mb_session *sess,
772 		const struct rte_crypto_sym_xform *xform)
773 {
774 	const struct rte_crypto_sym_xform *auth_xform = NULL;
775 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
776 	const struct rte_crypto_sym_xform *aead_xform = NULL;
777 	int ret;
778 
779 	/* Select Crypto operation - hash then cipher / cipher then hash */
780 	switch (aesni_mb_get_chain_order(xform)) {
781 	case AESNI_MB_OP_HASH_CIPHER:
782 		sess->chain_order = HASH_CIPHER;
783 		auth_xform = xform;
784 		cipher_xform = xform->next;
785 		break;
786 	case AESNI_MB_OP_CIPHER_HASH:
787 		sess->chain_order = CIPHER_HASH;
788 		auth_xform = xform->next;
789 		cipher_xform = xform;
790 		break;
791 	case AESNI_MB_OP_HASH_ONLY:
792 		sess->chain_order = HASH_CIPHER;
793 		auth_xform = xform;
794 		cipher_xform = NULL;
795 		break;
796 	case AESNI_MB_OP_CIPHER_ONLY:
797 		/*
798 		 * Multi buffer library operates only at two modes,
799 		 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
800 		 * chain order depends on cipher operation: encryption is always
801 		 * the first operation and decryption the last one.
802 		 */
803 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
804 			sess->chain_order = CIPHER_HASH;
805 		else
806 			sess->chain_order = HASH_CIPHER;
807 		auth_xform = NULL;
808 		cipher_xform = xform;
809 		break;
810 	case AESNI_MB_OP_AEAD_CIPHER_HASH:
811 		sess->chain_order = CIPHER_HASH;
812 		sess->aead.aad_len = xform->aead.aad_length;
813 		aead_xform = xform;
814 		break;
815 	case AESNI_MB_OP_AEAD_HASH_CIPHER:
816 		sess->chain_order = HASH_CIPHER;
817 		sess->aead.aad_len = xform->aead.aad_length;
818 		aead_xform = xform;
819 		break;
820 	case AESNI_MB_OP_NOT_SUPPORTED:
821 	default:
822 		AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
823 		return -ENOTSUP;
824 	}
825 
826 	/* Default IV length = 0 */
827 	sess->iv.length = 0;
828 	sess->auth_iv.length = 0;
829 
830 	ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
831 	if (ret != 0) {
832 		AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
833 		return ret;
834 	}
835 
836 	ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
837 			cipher_xform);
838 	if (ret != 0) {
839 		AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
840 		return ret;
841 	}
842 
843 	if (aead_xform) {
844 		ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
845 				aead_xform);
846 		if (ret != 0) {
847 			AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
848 			return ret;
849 		}
850 	}
851 
852 	return 0;
853 }
854 
855 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
856 /** Check DOCSIS security session configuration is valid */
857 static int
check_docsis_sec_session(struct rte_security_session_conf * conf)858 check_docsis_sec_session(struct rte_security_session_conf *conf)
859 {
860 	struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
861 	struct rte_security_docsis_xform *docsis = &conf->docsis;
862 
863 	/* Downlink: CRC generate -> Cipher encrypt */
864 	if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
865 
866 		if (crypto_sym != NULL &&
867 		    crypto_sym->type ==	RTE_CRYPTO_SYM_XFORM_CIPHER &&
868 		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
869 		    crypto_sym->cipher.algo ==
870 					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
871 		    (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
872 		     crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
873 		    crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
874 		    crypto_sym->next == NULL) {
875 			return 0;
876 		}
877 	/* Uplink: Cipher decrypt -> CRC verify */
878 	} else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
879 
880 		if (crypto_sym != NULL &&
881 		    crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
882 		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
883 		    crypto_sym->cipher.algo ==
884 					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
885 		    (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
886 		     crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
887 		    crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
888 		    crypto_sym->next == NULL) {
889 			return 0;
890 		}
891 	}
892 
893 	return -EINVAL;
894 }
895 
896 /** Set DOCSIS security session auth (CRC) parameters */
897 static int
aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session * sess,struct rte_security_docsis_xform * xform)898 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
899 		struct rte_security_docsis_xform *xform)
900 {
901 	if (xform == NULL) {
902 		AESNI_MB_LOG(ERR, "Invalid DOCSIS xform");
903 		return -EINVAL;
904 	}
905 
906 	/* Select CRC generate/verify */
907 	if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
908 		sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
909 		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
910 	} else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
911 		sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
912 		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
913 	} else {
914 		AESNI_MB_LOG(ERR, "Unsupported DOCSIS direction");
915 		return -ENOTSUP;
916 	}
917 
918 	sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
919 	sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
920 
921 	return 0;
922 }
923 
924 /**
925  * Parse DOCSIS security session configuration and set private session
926  * parameters
927  */
928 int
aesni_mb_set_docsis_sec_session_parameters(__rte_unused struct rte_cryptodev * dev,struct rte_security_session_conf * conf,void * sess)929 aesni_mb_set_docsis_sec_session_parameters(
930 		__rte_unused struct rte_cryptodev *dev,
931 		struct rte_security_session_conf *conf,
932 		void *sess)
933 {
934 	struct rte_security_docsis_xform *docsis_xform;
935 	struct rte_crypto_sym_xform *cipher_xform;
936 	struct aesni_mb_session *aesni_sess = sess;
937 	struct aesni_mb_private *internals = dev->data->dev_private;
938 	int ret;
939 
940 	ret = check_docsis_sec_session(conf);
941 	if (ret) {
942 		AESNI_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
943 		return ret;
944 	}
945 
946 	switch (conf->docsis.direction) {
947 	case RTE_SECURITY_DOCSIS_UPLINK:
948 		aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
949 		docsis_xform = &conf->docsis;
950 		cipher_xform = conf->crypto_xform;
951 		break;
952 	case RTE_SECURITY_DOCSIS_DOWNLINK:
953 		aesni_sess->chain_order = IMB_ORDER_HASH_CIPHER;
954 		cipher_xform = conf->crypto_xform;
955 		docsis_xform = &conf->docsis;
956 		break;
957 	default:
958 		return -EINVAL;
959 	}
960 
961 	/* Default IV length = 0 */
962 	aesni_sess->iv.length = 0;
963 
964 	ret = aesni_mb_set_docsis_sec_session_auth_parameters(aesni_sess,
965 			docsis_xform);
966 	if (ret != 0) {
967 		AESNI_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
968 		return -EINVAL;
969 	}
970 
971 	ret = aesni_mb_set_session_cipher_parameters(internals->mb_mgr,
972 			aesni_sess, cipher_xform);
973 
974 	if (ret != 0) {
975 		AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
976 		return -EINVAL;
977 	}
978 
979 	return 0;
980 }
981 #endif
982 
983 /**
984  * burst enqueue, place crypto operations on ingress queue for processing.
985  *
986  * @param __qp         Queue Pair to process
987  * @param ops          Crypto operations for processing
988  * @param nb_ops       Number of crypto operations for processing
989  *
990  * @return
991  * - Number of crypto operations enqueued
992  */
993 static uint16_t
aesni_mb_pmd_enqueue_burst(void * __qp,struct rte_crypto_op ** ops,uint16_t nb_ops)994 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
995 		uint16_t nb_ops)
996 {
997 	struct aesni_mb_qp *qp = __qp;
998 
999 	unsigned int nb_enqueued;
1000 
1001 	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
1002 			(void **)ops, nb_ops, NULL);
1003 
1004 	qp->stats.enqueued_count += nb_enqueued;
1005 
1006 	return nb_enqueued;
1007 }
1008 
1009 /** Get multi buffer session */
1010 static inline struct aesni_mb_session *
get_session(struct aesni_mb_qp * qp,struct rte_crypto_op * op)1011 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
1012 {
1013 	struct aesni_mb_session *sess = NULL;
1014 
1015 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
1016 		if (likely(op->sym->session != NULL))
1017 			sess = (struct aesni_mb_session *)
1018 					get_sym_session_private_data(
1019 					op->sym->session,
1020 					cryptodev_driver_id);
1021 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1022 	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1023 		if (likely(op->sym->sec_session != NULL))
1024 			sess = (struct aesni_mb_session *)
1025 					get_sec_session_private_data(
1026 						op->sym->sec_session);
1027 #endif
1028 	} else {
1029 		void *_sess = rte_cryptodev_sym_session_create(qp->sess_mp);
1030 		void *_sess_private_data = NULL;
1031 
1032 		if (_sess == NULL)
1033 			return NULL;
1034 
1035 		if (rte_mempool_get(qp->sess_mp_priv,
1036 				(void **)&_sess_private_data))
1037 			return NULL;
1038 
1039 		sess = (struct aesni_mb_session *)_sess_private_data;
1040 
1041 		if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
1042 				sess, op->sym->xform) != 0)) {
1043 			rte_mempool_put(qp->sess_mp, _sess);
1044 			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
1045 			sess = NULL;
1046 		}
1047 		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
1048 		set_sym_session_private_data(op->sym->session,
1049 				cryptodev_driver_id, _sess_private_data);
1050 	}
1051 
1052 	if (unlikely(sess == NULL))
1053 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1054 
1055 	return sess;
1056 }
1057 
1058 static inline uint64_t
auth_start_offset(struct rte_crypto_op * op,struct aesni_mb_session * session,uint32_t oop)1059 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
1060 		uint32_t oop)
1061 {
1062 	struct rte_mbuf *m_src, *m_dst;
1063 	uint8_t *p_src, *p_dst;
1064 	uintptr_t u_src, u_dst;
1065 	uint32_t cipher_end, auth_end;
1066 
1067 	/* Only cipher then hash needs special calculation. */
1068 	if (!oop || session->chain_order != CIPHER_HASH)
1069 		return op->sym->auth.data.offset;
1070 
1071 	m_src = op->sym->m_src;
1072 	m_dst = op->sym->m_dst;
1073 
1074 	p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
1075 	p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
1076 	u_src = (uintptr_t)p_src;
1077 	u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
1078 
1079 	/**
1080 	 * Copy the content between cipher offset and auth offset for generating
1081 	 * correct digest.
1082 	 */
1083 	if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
1084 		memcpy(p_dst + op->sym->auth.data.offset,
1085 				p_src + op->sym->auth.data.offset,
1086 				op->sym->cipher.data.offset -
1087 				op->sym->auth.data.offset);
1088 
1089 	/**
1090 	 * Copy the content between (cipher offset + length) and (auth offset +
1091 	 * length) for generating correct digest
1092 	 */
1093 	cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
1094 	auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
1095 	if (cipher_end < auth_end)
1096 		memcpy(p_dst + cipher_end, p_src + cipher_end,
1097 				auth_end - cipher_end);
1098 
1099 	/**
1100 	 * Since intel-ipsec-mb only supports positive values,
1101 	 * we need to deduct the correct offset between src and dst.
1102 	 */
1103 
1104 	return u_src < u_dst ? (u_dst - u_src) :
1105 			(UINT64_MAX - u_src + u_dst + 1);
1106 }
1107 
1108 static inline void
set_cpu_mb_job_params(JOB_AES_HMAC * job,struct aesni_mb_session * session,union rte_crypto_sym_ofs sofs,void * buf,uint32_t len,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * aad,void * digest,void * udata)1109 set_cpu_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_session *session,
1110 		union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
1111 		struct rte_crypto_va_iova_ptr *iv,
1112 		struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata)
1113 {
1114 	/* Set crypto operation */
1115 	job->chain_order = session->chain_order;
1116 
1117 	/* Set cipher parameters */
1118 	job->cipher_direction = session->cipher.direction;
1119 	job->cipher_mode = session->cipher.mode;
1120 
1121 	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1122 
1123 	/* Set authentication parameters */
1124 	job->hash_alg = session->auth.algo;
1125 	job->iv = iv->va;
1126 
1127 	switch (job->hash_alg) {
1128 	case AES_XCBC:
1129 		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1130 		job->u.XCBC._k2 = session->auth.xcbc.k2;
1131 		job->u.XCBC._k3 = session->auth.xcbc.k3;
1132 
1133 		job->aes_enc_key_expanded =
1134 				session->cipher.expanded_aes_keys.encode;
1135 		job->aes_dec_key_expanded =
1136 				session->cipher.expanded_aes_keys.decode;
1137 		break;
1138 
1139 	case AES_CCM:
1140 		job->u.CCM.aad = (uint8_t *)aad->va + 18;
1141 		job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1142 		job->aes_enc_key_expanded =
1143 				session->cipher.expanded_aes_keys.encode;
1144 		job->aes_dec_key_expanded =
1145 				session->cipher.expanded_aes_keys.decode;
1146 		job->iv++;
1147 		break;
1148 
1149 	case AES_CMAC:
1150 		job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1151 		job->u.CMAC._skey1 = session->auth.cmac.skey1;
1152 		job->u.CMAC._skey2 = session->auth.cmac.skey2;
1153 		job->aes_enc_key_expanded =
1154 				session->cipher.expanded_aes_keys.encode;
1155 		job->aes_dec_key_expanded =
1156 				session->cipher.expanded_aes_keys.decode;
1157 		break;
1158 
1159 	case AES_GMAC:
1160 		if (session->cipher.mode == GCM) {
1161 			job->u.GCM.aad = aad->va;
1162 			job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1163 		} else {
1164 			/* For GMAC */
1165 			job->u.GCM.aad = buf;
1166 			job->u.GCM.aad_len_in_bytes = len;
1167 			job->cipher_mode = GCM;
1168 		}
1169 		job->aes_enc_key_expanded = &session->cipher.gcm_key;
1170 		job->aes_dec_key_expanded = &session->cipher.gcm_key;
1171 		break;
1172 
1173 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
1174 	case IMB_AUTH_CHACHA20_POLY1305:
1175 		job->u.CHACHA20_POLY1305.aad = aad->va;
1176 		job->u.CHACHA20_POLY1305.aad_len_in_bytes = session->aead.aad_len;
1177 		job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1178 		job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.encode;
1179 		break;
1180 #endif
1181 	default:
1182 		job->u.HMAC._hashed_auth_key_xor_ipad =
1183 				session->auth.pads.inner;
1184 		job->u.HMAC._hashed_auth_key_xor_opad =
1185 				session->auth.pads.outer;
1186 
1187 		if (job->cipher_mode == DES3) {
1188 			job->aes_enc_key_expanded =
1189 				session->cipher.exp_3des_keys.ks_ptr;
1190 			job->aes_dec_key_expanded =
1191 				session->cipher.exp_3des_keys.ks_ptr;
1192 		} else {
1193 			job->aes_enc_key_expanded =
1194 				session->cipher.expanded_aes_keys.encode;
1195 			job->aes_dec_key_expanded =
1196 				session->cipher.expanded_aes_keys.decode;
1197 		}
1198 	}
1199 
1200 	/*
1201 	 * Multi-buffer library current only support returning a truncated
1202 	 * digest length as specified in the relevant IPsec RFCs
1203 	 */
1204 
1205 	/* Set digest location and length */
1206 	job->auth_tag_output = digest;
1207 	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1208 
1209 	/* Set IV parameters */
1210 	job->iv_len_in_bytes = session->iv.length;
1211 
1212 	/* Data Parameters */
1213 	job->src = buf;
1214 	job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
1215 	job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
1216 	job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
1217 	if (job->hash_alg == AES_GMAC && session->cipher.mode != GCM) {
1218 		job->msg_len_to_hash_in_bytes = 0;
1219 		job->msg_len_to_cipher_in_bytes = 0;
1220 	} else {
1221 		job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
1222 			sofs.ofs.auth.tail;
1223 		job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
1224 			sofs.ofs.cipher.tail;
1225 	}
1226 
1227 	job->user_data = udata;
1228 }
1229 
1230 /**
1231  * Process a crypto operation and complete a JOB_AES_HMAC job structure for
1232  * submission to the multi buffer library for processing.
1233  *
1234  * @param	qp	queue pair
1235  * @param	job	JOB_AES_HMAC structure to fill
1236  * @param	m	mbuf to process
1237  *
1238  * @return
1239  * - Completed JOB_AES_HMAC structure pointer on success
1240  * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
1241  */
1242 static inline int
set_mb_job_params(JOB_AES_HMAC * job,struct aesni_mb_qp * qp,struct rte_crypto_op * op,uint8_t * digest_idx)1243 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1244 		struct rte_crypto_op *op, uint8_t *digest_idx)
1245 {
1246 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
1247 	struct aesni_mb_session *session;
1248 	uint32_t m_offset, oop;
1249 
1250 	session = get_session(qp, op);
1251 	if (session == NULL) {
1252 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1253 		return -1;
1254 	}
1255 
1256 	/* Set crypto operation */
1257 	job->chain_order = session->chain_order;
1258 
1259 	/* Set cipher parameters */
1260 	job->cipher_direction = session->cipher.direction;
1261 	job->cipher_mode = session->cipher.mode;
1262 
1263 	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1264 
1265 	/* Set authentication parameters */
1266 	job->hash_alg = session->auth.algo;
1267 
1268 	const int aead = is_aead_algo(job->hash_alg, job->cipher_mode);
1269 
1270 	switch (job->hash_alg) {
1271 	case AES_XCBC:
1272 		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1273 		job->u.XCBC._k2 = session->auth.xcbc.k2;
1274 		job->u.XCBC._k3 = session->auth.xcbc.k3;
1275 
1276 		job->aes_enc_key_expanded =
1277 				session->cipher.expanded_aes_keys.encode;
1278 		job->aes_dec_key_expanded =
1279 				session->cipher.expanded_aes_keys.decode;
1280 		break;
1281 
1282 	case AES_CCM:
1283 		job->u.CCM.aad = op->sym->aead.aad.data + 18;
1284 		job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1285 		job->aes_enc_key_expanded =
1286 				session->cipher.expanded_aes_keys.encode;
1287 		job->aes_dec_key_expanded =
1288 				session->cipher.expanded_aes_keys.decode;
1289 		break;
1290 
1291 	case AES_CMAC:
1292 		job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1293 		job->u.CMAC._skey1 = session->auth.cmac.skey1;
1294 		job->u.CMAC._skey2 = session->auth.cmac.skey2;
1295 		job->aes_enc_key_expanded =
1296 				session->cipher.expanded_aes_keys.encode;
1297 		job->aes_dec_key_expanded =
1298 				session->cipher.expanded_aes_keys.decode;
1299 		break;
1300 
1301 	case AES_GMAC:
1302 		if (session->cipher.mode == GCM) {
1303 			job->u.GCM.aad = op->sym->aead.aad.data;
1304 			job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1305 		} else {
1306 			/* For GMAC */
1307 			job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
1308 					uint8_t *, op->sym->auth.data.offset);
1309 			job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
1310 			job->cipher_mode = GCM;
1311 		}
1312 		job->aes_enc_key_expanded = &session->cipher.gcm_key;
1313 		job->aes_dec_key_expanded = &session->cipher.gcm_key;
1314 		break;
1315 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1316 	case IMB_AUTH_ZUC_EIA3_BITLEN:
1317 		job->u.ZUC_EIA3._key = session->auth.zuc_auth_key;
1318 		job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1319 						session->auth_iv.offset);
1320 		break;
1321 	case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1322 		job->u.SNOW3G_UIA2._key = (void *) &session->auth.pKeySched_snow3g_auth;
1323 		job->u.SNOW3G_UIA2._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1324 						session->auth_iv.offset);
1325 		break;
1326 	case IMB_AUTH_KASUMI_UIA1:
1327 		job->u.KASUMI_UIA1._key = (void *) &session->auth.pKeySched_kasumi_auth;
1328 		break;
1329 #endif
1330 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
1331 	case IMB_AUTH_CHACHA20_POLY1305:
1332 		job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data;
1333 		job->u.CHACHA20_POLY1305.aad_len_in_bytes = session->aead.aad_len;
1334 		job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1335 		job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.encode;
1336 		break;
1337 #endif
1338 	default:
1339 		job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
1340 		job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
1341 
1342 		if (job->cipher_mode == DES3) {
1343 			job->aes_enc_key_expanded =
1344 				session->cipher.exp_3des_keys.ks_ptr;
1345 			job->aes_dec_key_expanded =
1346 				session->cipher.exp_3des_keys.ks_ptr;
1347 		} else {
1348 			job->aes_enc_key_expanded =
1349 				session->cipher.expanded_aes_keys.encode;
1350 			job->aes_dec_key_expanded =
1351 				session->cipher.expanded_aes_keys.decode;
1352 		}
1353 	}
1354 
1355 	if (aead)
1356 		m_offset = op->sym->aead.data.offset;
1357 	else
1358 		m_offset = op->sym->cipher.data.offset;
1359 
1360 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1361 	if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
1362 		job->aes_enc_key_expanded = session->cipher.zuc_cipher_key;
1363 		job->aes_dec_key_expanded = session->cipher.zuc_cipher_key;
1364 	} else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
1365 		job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
1366 		m_offset = 0;
1367 	} else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
1368 		job->enc_keys = &session->cipher.pKeySched_kasumi_cipher;
1369 		m_offset = 0;
1370 	}
1371 #endif
1372 
1373 	if (!op->sym->m_dst) {
1374 		/* in-place operation */
1375 		m_dst = m_src;
1376 		oop = 0;
1377 	} else if (op->sym->m_dst == op->sym->m_src) {
1378 		/* in-place operation */
1379 		m_dst = m_src;
1380 		oop = 0;
1381 	} else {
1382 		/* out-of-place operation */
1383 		m_dst = op->sym->m_dst;
1384 		oop = 1;
1385 	}
1386 
1387 	/* Set digest output location */
1388 	if (job->hash_alg != NULL_HASH &&
1389 			session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1390 		job->auth_tag_output = qp->temp_digests[*digest_idx];
1391 		*digest_idx = (*digest_idx + 1) % MAX_JOBS;
1392 	} else {
1393 		if (aead)
1394 			job->auth_tag_output = op->sym->aead.digest.data;
1395 		else
1396 			job->auth_tag_output = op->sym->auth.digest.data;
1397 
1398 		if (session->auth.req_digest_len != session->auth.gen_digest_len) {
1399 			job->auth_tag_output = qp->temp_digests[*digest_idx];
1400 			*digest_idx = (*digest_idx + 1) % MAX_JOBS;
1401 		}
1402 	}
1403 	/*
1404 	 * Multi-buffer library current only support returning a truncated
1405 	 * digest length as specified in the relevant IPsec RFCs
1406 	 */
1407 
1408 	/* Set digest length */
1409 	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1410 
1411 	/* Set IV parameters */
1412 	job->iv_len_in_bytes = session->iv.length;
1413 
1414 	/* Data Parameters */
1415 	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1416 	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1417 
1418 	switch (job->hash_alg) {
1419 	case AES_CCM:
1420 		job->cipher_start_src_offset_in_bytes =
1421 				op->sym->aead.data.offset;
1422 		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1423 		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1424 		job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
1425 
1426 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1427 			session->iv.offset + 1);
1428 		break;
1429 
1430 	case AES_GMAC:
1431 		if (session->cipher.mode == GCM) {
1432 			job->cipher_start_src_offset_in_bytes =
1433 					op->sym->aead.data.offset;
1434 			job->hash_start_src_offset_in_bytes =
1435 					op->sym->aead.data.offset;
1436 			job->msg_len_to_cipher_in_bytes =
1437 					op->sym->aead.data.length;
1438 			job->msg_len_to_hash_in_bytes =
1439 					op->sym->aead.data.length;
1440 		} else {
1441 			job->cipher_start_src_offset_in_bytes =
1442 					op->sym->auth.data.offset;
1443 			job->hash_start_src_offset_in_bytes =
1444 					op->sym->auth.data.offset;
1445 			job->msg_len_to_cipher_in_bytes = 0;
1446 			job->msg_len_to_hash_in_bytes = 0;
1447 		}
1448 
1449 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1450 				session->iv.offset);
1451 		break;
1452 
1453 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
1454 	case IMB_AUTH_CHACHA20_POLY1305:
1455 		job->cipher_start_src_offset_in_bytes = op->sym->aead.data.offset;
1456 		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1457 		job->msg_len_to_cipher_in_bytes =
1458 				op->sym->aead.data.length;
1459 		job->msg_len_to_hash_in_bytes =
1460 					op->sym->aead.data.length;
1461 
1462 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1463 				session->iv.offset);
1464 		break;
1465 #endif
1466 	default:
1467 		/* For SNOW3G, length and offsets are already in bits */
1468 		job->cipher_start_src_offset_in_bytes =
1469 				op->sym->cipher.data.offset;
1470 		job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1471 
1472 		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1473 				session, oop);
1474 		job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1475 
1476 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1477 			session->iv.offset);
1478 	}
1479 
1480 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1481 	if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3)
1482 		job->msg_len_to_cipher_in_bytes >>= 3;
1483 	else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1)
1484 		job->msg_len_to_hash_in_bytes >>= 3;
1485 #endif
1486 
1487 	/* Set user data to be crypto operation data struct */
1488 	job->user_data = op;
1489 
1490 	return 0;
1491 }
1492 
1493 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1494 /**
1495  * Process a crypto operation containing a security op and complete a
1496  * JOB_AES_HMAC job structure for submission to the multi buffer library for
1497  * processing.
1498  */
1499 static inline int
set_sec_mb_job_params(JOB_AES_HMAC * job,struct aesni_mb_qp * qp,struct rte_crypto_op * op,uint8_t * digest_idx)1500 set_sec_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1501 		struct rte_crypto_op *op, uint8_t *digest_idx)
1502 {
1503 	struct rte_mbuf *m_src, *m_dst;
1504 	struct rte_crypto_sym_op *sym;
1505 	struct aesni_mb_session *session;
1506 
1507 	session = get_session(qp, op);
1508 	if (unlikely(session == NULL)) {
1509 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1510 		return -1;
1511 	}
1512 
1513 	/* Only DOCSIS protocol operations supported now */
1514 	if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
1515 			session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
1516 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1517 		return -1;
1518 	}
1519 
1520 	sym = op->sym;
1521 	m_src = sym->m_src;
1522 
1523 	if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
1524 		/* in-place operation */
1525 		m_dst = m_src;
1526 	} else {
1527 		/* out-of-place operation not supported */
1528 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1529 		return -ENOTSUP;
1530 	}
1531 
1532 	/* Set crypto operation */
1533 	job->chain_order = session->chain_order;
1534 
1535 	/* Set cipher parameters */
1536 	job->cipher_direction = session->cipher.direction;
1537 	job->cipher_mode = session->cipher.mode;
1538 
1539 	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1540 	job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1541 	job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
1542 
1543 	/* Set IV parameters */
1544 	job->iv_len_in_bytes = session->iv.length;
1545 	job->iv = (uint8_t *)op + session->iv.offset;
1546 
1547 	/* Set authentication parameters */
1548 	job->hash_alg = session->auth.algo;
1549 
1550 	/* Set digest output location */
1551 	job->auth_tag_output = qp->temp_digests[*digest_idx];
1552 	*digest_idx = (*digest_idx + 1) % MAX_JOBS;
1553 
1554 	/* Set digest length */
1555 	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1556 
1557 	/* Set data parameters */
1558 	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1559 	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
1560 						sym->cipher.data.offset);
1561 
1562 	job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
1563 	job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
1564 
1565 	job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
1566 	job->msg_len_to_hash_in_bytes = sym->auth.data.length;
1567 
1568 	job->user_data = op;
1569 
1570 	return 0;
1571 }
1572 
1573 static inline void
verify_docsis_sec_crc(JOB_AES_HMAC * job,uint8_t * status)1574 verify_docsis_sec_crc(JOB_AES_HMAC *job, uint8_t *status)
1575 {
1576 	uint16_t crc_offset;
1577 	uint8_t *crc;
1578 
1579 	if (!job->msg_len_to_hash_in_bytes)
1580 		return;
1581 
1582 	crc_offset = job->hash_start_src_offset_in_bytes +
1583 			job->msg_len_to_hash_in_bytes -
1584 			job->cipher_start_src_offset_in_bytes;
1585 	crc = job->dst + crc_offset;
1586 
1587 	/* Verify CRC (at the end of the message) */
1588 	if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
1589 		*status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1590 }
1591 #endif
1592 
1593 static inline void
verify_digest(JOB_AES_HMAC * job,void * digest,uint16_t len,uint8_t * status)1594 verify_digest(JOB_AES_HMAC *job, void *digest, uint16_t len, uint8_t *status)
1595 {
1596 	/* Verify digest if required */
1597 	if (memcmp(job->auth_tag_output, digest, len) != 0)
1598 		*status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1599 }
1600 
1601 static inline void
generate_digest(JOB_AES_HMAC * job,struct rte_crypto_op * op,struct aesni_mb_session * sess)1602 generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
1603 		struct aesni_mb_session *sess)
1604 {
1605 	/* No extra copy needed */
1606 	if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
1607 		return;
1608 
1609 	/*
1610 	 * This can only happen for HMAC, so only digest
1611 	 * for authentication algos is required
1612 	 */
1613 	memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1614 			sess->auth.req_digest_len);
1615 }
1616 
1617 /**
1618  * Process a completed job and return rte_mbuf which job processed
1619  *
1620  * @param qp		Queue Pair to process
1621  * @param job	JOB_AES_HMAC job to process
1622  *
1623  * @return
1624  * - Returns processed crypto operation.
1625  * - Returns NULL on invalid job
1626  */
1627 static inline struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp * qp,JOB_AES_HMAC * job)1628 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
1629 {
1630 	struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1631 	struct aesni_mb_session *sess = NULL;
1632 
1633 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1634 	uint8_t is_docsis_sec = 0;
1635 
1636 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1637 		/*
1638 		 * Assuming at this point that if it's a security type op, that
1639 		 * this is for DOCSIS
1640 		 */
1641 		is_docsis_sec = 1;
1642 		sess = get_sec_session_private_data(op->sym->sec_session);
1643 	} else
1644 #endif
1645 	{
1646 		sess = get_sym_session_private_data(op->sym->session,
1647 						cryptodev_driver_id);
1648 	}
1649 
1650 	if (unlikely(sess == NULL)) {
1651 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1652 		return op;
1653 	}
1654 
1655 	if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1656 		switch (job->status) {
1657 		case STS_COMPLETED:
1658 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1659 
1660 			if (job->hash_alg == NULL_HASH)
1661 				break;
1662 
1663 			if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1664 				if (is_aead_algo(job->hash_alg, sess->cipher.mode))
1665 					verify_digest(job,
1666 						op->sym->aead.digest.data,
1667 						sess->auth.req_digest_len,
1668 						&op->status);
1669 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1670 				else if (is_docsis_sec)
1671 					verify_docsis_sec_crc(job,
1672 						&op->status);
1673 #endif
1674 				else
1675 					verify_digest(job,
1676 						op->sym->auth.digest.data,
1677 						sess->auth.req_digest_len,
1678 						&op->status);
1679 			} else
1680 				generate_digest(job, op, sess);
1681 			break;
1682 		default:
1683 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1684 		}
1685 	}
1686 
1687 	/* Free session if a session-less crypto op */
1688 	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1689 		memset(sess, 0, sizeof(struct aesni_mb_session));
1690 		memset(op->sym->session, 0,
1691 			rte_cryptodev_sym_get_existing_header_session_size(
1692 				op->sym->session));
1693 		rte_mempool_put(qp->sess_mp_priv, sess);
1694 		rte_mempool_put(qp->sess_mp, op->sym->session);
1695 		op->sym->session = NULL;
1696 	}
1697 
1698 	return op;
1699 }
1700 
1701 static inline void
post_process_mb_sync_job(JOB_AES_HMAC * job)1702 post_process_mb_sync_job(JOB_AES_HMAC *job)
1703 {
1704 	uint32_t *st;
1705 
1706 	st = job->user_data;
1707 	st[0] = (job->status == STS_COMPLETED) ? 0 : EBADMSG;
1708 }
1709 
1710 /**
1711  * Process a completed JOB_AES_HMAC job and keep processing jobs until
1712  * get_completed_job return NULL
1713  *
1714  * @param qp		Queue Pair to process
1715  * @param job		JOB_AES_HMAC job
1716  *
1717  * @return
1718  * - Number of processed jobs
1719  */
1720 static unsigned
handle_completed_jobs(struct aesni_mb_qp * qp,JOB_AES_HMAC * job,struct rte_crypto_op ** ops,uint16_t nb_ops)1721 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
1722 		struct rte_crypto_op **ops, uint16_t nb_ops)
1723 {
1724 	struct rte_crypto_op *op = NULL;
1725 	unsigned processed_jobs = 0;
1726 
1727 	while (job != NULL) {
1728 		op = post_process_mb_job(qp, job);
1729 
1730 		if (op) {
1731 			ops[processed_jobs++] = op;
1732 			qp->stats.dequeued_count++;
1733 		} else {
1734 			qp->stats.dequeue_err_count++;
1735 			break;
1736 		}
1737 		if (processed_jobs == nb_ops)
1738 			break;
1739 
1740 		job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
1741 	}
1742 
1743 	return processed_jobs;
1744 }
1745 
1746 static inline uint32_t
handle_completed_sync_jobs(JOB_AES_HMAC * job,MB_MGR * mb_mgr)1747 handle_completed_sync_jobs(JOB_AES_HMAC *job, MB_MGR *mb_mgr)
1748 {
1749 	uint32_t i;
1750 
1751 	for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
1752 		post_process_mb_sync_job(job);
1753 
1754 	return i;
1755 }
1756 
1757 static inline uint32_t
flush_mb_sync_mgr(MB_MGR * mb_mgr)1758 flush_mb_sync_mgr(MB_MGR *mb_mgr)
1759 {
1760 	JOB_AES_HMAC *job;
1761 
1762 	job = IMB_FLUSH_JOB(mb_mgr);
1763 	return handle_completed_sync_jobs(job, mb_mgr);
1764 }
1765 
1766 static inline uint16_t
flush_mb_mgr(struct aesni_mb_qp * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)1767 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
1768 		uint16_t nb_ops)
1769 {
1770 	int processed_ops = 0;
1771 
1772 	/* Flush the remaining jobs */
1773 	JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
1774 
1775 	if (job)
1776 		processed_ops += handle_completed_jobs(qp, job,
1777 				&ops[processed_ops], nb_ops - processed_ops);
1778 
1779 	return processed_ops;
1780 }
1781 
1782 static inline JOB_AES_HMAC *
set_job_null_op(JOB_AES_HMAC * job,struct rte_crypto_op * op)1783 set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
1784 {
1785 	job->chain_order = HASH_CIPHER;
1786 	job->cipher_mode = NULL_CIPHER;
1787 	job->hash_alg = NULL_HASH;
1788 	job->cipher_direction = DECRYPT;
1789 
1790 	/* Set user data to be crypto operation data struct */
1791 	job->user_data = op;
1792 
1793 	return job;
1794 }
1795 
1796 static uint16_t
aesni_mb_pmd_dequeue_burst(void * queue_pair,struct rte_crypto_op ** ops,uint16_t nb_ops)1797 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1798 		uint16_t nb_ops)
1799 {
1800 	struct aesni_mb_qp *qp = queue_pair;
1801 
1802 	struct rte_crypto_op *op;
1803 	JOB_AES_HMAC *job;
1804 
1805 	int retval, processed_jobs = 0;
1806 
1807 	if (unlikely(nb_ops == 0))
1808 		return 0;
1809 
1810 	uint8_t digest_idx = qp->digest_idx;
1811 	do {
1812 		/* Get next free mb job struct from mb manager */
1813 		job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1814 		if (unlikely(job == NULL)) {
1815 			/* if no free mb job structs we need to flush mb_mgr */
1816 			processed_jobs += flush_mb_mgr(qp,
1817 					&ops[processed_jobs],
1818 					nb_ops - processed_jobs);
1819 
1820 			if (nb_ops == processed_jobs)
1821 				break;
1822 
1823 			job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1824 		}
1825 
1826 		/*
1827 		 * Get next operation to process from ingress queue.
1828 		 * There is no need to return the job to the MB_MGR
1829 		 * if there are no more operations to process, since the MB_MGR
1830 		 * can use that pointer again in next get_next calls.
1831 		 */
1832 		retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
1833 		if (retval < 0)
1834 			break;
1835 
1836 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1837 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1838 			retval = set_sec_mb_job_params(job, qp, op,
1839 						&digest_idx);
1840 		else
1841 #endif
1842 			retval = set_mb_job_params(job, qp, op, &digest_idx);
1843 
1844 		if (unlikely(retval != 0)) {
1845 			qp->stats.dequeue_err_count++;
1846 			set_job_null_op(job, op);
1847 		}
1848 
1849 		/* Submit job to multi-buffer for processing */
1850 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1851 		job = IMB_SUBMIT_JOB(qp->mb_mgr);
1852 #else
1853 		job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
1854 #endif
1855 		/*
1856 		 * If submit returns a processed job then handle it,
1857 		 * before submitting subsequent jobs
1858 		 */
1859 		if (job)
1860 			processed_jobs += handle_completed_jobs(qp, job,
1861 					&ops[processed_jobs],
1862 					nb_ops - processed_jobs);
1863 
1864 	} while (processed_jobs < nb_ops);
1865 
1866 	qp->digest_idx = digest_idx;
1867 
1868 	if (processed_jobs < 1)
1869 		processed_jobs += flush_mb_mgr(qp,
1870 				&ops[processed_jobs],
1871 				nb_ops - processed_jobs);
1872 
1873 	return processed_jobs;
1874 }
1875 
1876 static MB_MGR *
alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)1877 alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
1878 {
1879 	MB_MGR *mb_mgr = alloc_mb_mgr(0);
1880 	if (mb_mgr == NULL)
1881 		return NULL;
1882 
1883 	switch (vector_mode) {
1884 	case RTE_AESNI_MB_SSE:
1885 		init_mb_mgr_sse(mb_mgr);
1886 		break;
1887 	case RTE_AESNI_MB_AVX:
1888 		init_mb_mgr_avx(mb_mgr);
1889 		break;
1890 	case RTE_AESNI_MB_AVX2:
1891 		init_mb_mgr_avx2(mb_mgr);
1892 		break;
1893 	case RTE_AESNI_MB_AVX512:
1894 		init_mb_mgr_avx512(mb_mgr);
1895 		break;
1896 	default:
1897 		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
1898 		free_mb_mgr(mb_mgr);
1899 		return NULL;
1900 	}
1901 
1902 	return mb_mgr;
1903 }
1904 
1905 static inline void
aesni_mb_fill_error_code(struct rte_crypto_sym_vec * vec,int32_t err)1906 aesni_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
1907 {
1908 	uint32_t i;
1909 
1910 	for (i = 0; i != vec->num; ++i)
1911 		vec->status[i] = err;
1912 }
1913 
1914 static inline int
check_crypto_sgl(union rte_crypto_sym_ofs so,const struct rte_crypto_sgl * sgl)1915 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
1916 {
1917 	/* no multi-seg support with current AESNI-MB PMD */
1918 	if (sgl->num != 1)
1919 		return ENOTSUP;
1920 	else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
1921 		return EINVAL;
1922 	return 0;
1923 }
1924 
1925 static inline JOB_AES_HMAC *
submit_sync_job(MB_MGR * mb_mgr)1926 submit_sync_job(MB_MGR *mb_mgr)
1927 {
1928 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1929 	return IMB_SUBMIT_JOB(mb_mgr);
1930 #else
1931 	return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
1932 #endif
1933 }
1934 
1935 static inline uint32_t
generate_sync_dgst(struct rte_crypto_sym_vec * vec,const uint8_t dgst[][DIGEST_LENGTH_MAX],uint32_t len)1936 generate_sync_dgst(struct rte_crypto_sym_vec *vec,
1937 	const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1938 {
1939 	uint32_t i, k;
1940 
1941 	for (i = 0, k = 0; i != vec->num; i++) {
1942 		if (vec->status[i] == 0) {
1943 			memcpy(vec->digest[i].va, dgst[i], len);
1944 			k++;
1945 		}
1946 	}
1947 
1948 	return k;
1949 }
1950 
1951 static inline uint32_t
verify_sync_dgst(struct rte_crypto_sym_vec * vec,const uint8_t dgst[][DIGEST_LENGTH_MAX],uint32_t len)1952 verify_sync_dgst(struct rte_crypto_sym_vec *vec,
1953 	const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1954 {
1955 	uint32_t i, k;
1956 
1957 	for (i = 0, k = 0; i != vec->num; i++) {
1958 		if (vec->status[i] == 0) {
1959 			if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
1960 				vec->status[i] = EBADMSG;
1961 			else
1962 				k++;
1963 		}
1964 	}
1965 
1966 	return k;
1967 }
1968 
1969 uint32_t
aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev * dev,struct rte_cryptodev_sym_session * sess,union rte_crypto_sym_ofs sofs,struct rte_crypto_sym_vec * vec)1970 aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
1971 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
1972 	struct rte_crypto_sym_vec *vec)
1973 {
1974 	int32_t ret;
1975 	uint32_t i, j, k, len;
1976 	void *buf;
1977 	JOB_AES_HMAC *job;
1978 	MB_MGR *mb_mgr;
1979 	struct aesni_mb_private *priv;
1980 	struct aesni_mb_session *s;
1981 	uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
1982 
1983 	s = get_sym_session_private_data(sess, dev->driver_id);
1984 	if (s == NULL) {
1985 		aesni_mb_fill_error_code(vec, EINVAL);
1986 		return 0;
1987 	}
1988 
1989 	/* get per-thread MB MGR, create one if needed */
1990 	mb_mgr = RTE_PER_LCORE(sync_mb_mgr);
1991 	if (mb_mgr == NULL) {
1992 
1993 		priv = dev->data->dev_private;
1994 		mb_mgr = alloc_init_mb_mgr(priv->vector_mode);
1995 		if (mb_mgr == NULL) {
1996 			aesni_mb_fill_error_code(vec, ENOMEM);
1997 			return 0;
1998 		}
1999 		RTE_PER_LCORE(sync_mb_mgr) = mb_mgr;
2000 	}
2001 
2002 	for (i = 0, j = 0, k = 0; i != vec->num; i++) {
2003 
2004 
2005 		ret = check_crypto_sgl(sofs, vec->sgl + i);
2006 		if (ret != 0) {
2007 			vec->status[i] = ret;
2008 			continue;
2009 		}
2010 
2011 		buf = vec->sgl[i].vec[0].base;
2012 		len = vec->sgl[i].vec[0].len;
2013 
2014 		job = IMB_GET_NEXT_JOB(mb_mgr);
2015 		if (job == NULL) {
2016 			k += flush_mb_sync_mgr(mb_mgr);
2017 			job = IMB_GET_NEXT_JOB(mb_mgr);
2018 			RTE_ASSERT(job != NULL);
2019 		}
2020 
2021 		/* Submit job for processing */
2022 		set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i],
2023 			&vec->aad[i], tmp_dgst[i], &vec->status[i]);
2024 		job = submit_sync_job(mb_mgr);
2025 		j++;
2026 
2027 		/* handle completed jobs */
2028 		k += handle_completed_sync_jobs(job, mb_mgr);
2029 	}
2030 
2031 	/* flush remaining jobs */
2032 	while (k != j)
2033 		k += flush_mb_sync_mgr(mb_mgr);
2034 
2035 	/* finish processing for successful jobs: check/update digest */
2036 	if (k != 0) {
2037 		if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
2038 			k = verify_sync_dgst(vec,
2039 				(const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2040 				s->auth.req_digest_len);
2041 		else
2042 			k = generate_sync_dgst(vec,
2043 				(const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2044 				s->auth.req_digest_len);
2045 	}
2046 
2047 	return k;
2048 }
2049 
2050 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
2051 
2052 static uint64_t
vec_mode_to_flags(enum aesni_mb_vector_mode mode)2053 vec_mode_to_flags(enum aesni_mb_vector_mode mode)
2054 {
2055 	switch (mode) {
2056 	case RTE_AESNI_MB_SSE:
2057 		return RTE_CRYPTODEV_FF_CPU_SSE;
2058 	case RTE_AESNI_MB_AVX:
2059 		return RTE_CRYPTODEV_FF_CPU_AVX;
2060 	case RTE_AESNI_MB_AVX2:
2061 		return RTE_CRYPTODEV_FF_CPU_AVX2;
2062 	case RTE_AESNI_MB_AVX512:
2063 		return RTE_CRYPTODEV_FF_CPU_AVX512;
2064 	default:
2065 		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", mode);
2066 		return 0;
2067 	}
2068 }
2069 
2070 static int
cryptodev_aesni_mb_create(const char * name,struct rte_vdev_device * vdev,struct rte_cryptodev_pmd_init_params * init_params)2071 cryptodev_aesni_mb_create(const char *name,
2072 			struct rte_vdev_device *vdev,
2073 			struct rte_cryptodev_pmd_init_params *init_params)
2074 {
2075 	struct rte_cryptodev *dev;
2076 	struct aesni_mb_private *internals;
2077 	enum aesni_mb_vector_mode vector_mode;
2078 	MB_MGR *mb_mgr;
2079 
2080 	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2081 	if (dev == NULL) {
2082 		AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
2083 		return -ENODEV;
2084 	}
2085 
2086 	/* Check CPU for supported vector instruction set */
2087 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
2088 		vector_mode = RTE_AESNI_MB_AVX512;
2089 	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
2090 		vector_mode = RTE_AESNI_MB_AVX2;
2091 	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
2092 		vector_mode = RTE_AESNI_MB_AVX;
2093 	else
2094 		vector_mode = RTE_AESNI_MB_SSE;
2095 
2096 	dev->driver_id = cryptodev_driver_id;
2097 	dev->dev_ops = rte_aesni_mb_pmd_ops;
2098 
2099 	/* register rx/tx burst functions for data path */
2100 	dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
2101 	dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
2102 
2103 	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2104 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2105 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
2106 			RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
2107 			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
2108 			RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
2109 
2110 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2111 	struct rte_security_ctx *security_instance;
2112 	security_instance = rte_malloc("aesni_mb_sec",
2113 				sizeof(struct rte_security_ctx),
2114 				RTE_CACHE_LINE_SIZE);
2115 	if (security_instance == NULL) {
2116 		AESNI_MB_LOG(ERR, "rte_security_ctx memory alloc failed");
2117 		rte_cryptodev_pmd_destroy(dev);
2118 		return -ENOMEM;
2119 	}
2120 
2121 	security_instance->device = (void *)dev;
2122 	security_instance->ops = rte_aesni_mb_pmd_sec_ops;
2123 	security_instance->sess_cnt = 0;
2124 	dev->security_ctx = security_instance;
2125 	dev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
2126 #endif
2127 
2128 	/* Check CPU for support for AES instruction set */
2129 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
2130 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
2131 	else
2132 		AESNI_MB_LOG(WARNING, "AES instructions not supported by CPU");
2133 
2134 	dev->feature_flags |= vec_mode_to_flags(vector_mode);
2135 
2136 	mb_mgr = alloc_init_mb_mgr(vector_mode);
2137 	if (mb_mgr == NULL) {
2138 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2139 		rte_free(dev->security_ctx);
2140 		dev->security_ctx = NULL;
2141 #endif
2142 		rte_cryptodev_pmd_destroy(dev);
2143 		return -ENOMEM;
2144 	}
2145 
2146 	/* Set vector instructions mode supported */
2147 	internals = dev->data->dev_private;
2148 
2149 	internals->vector_mode = vector_mode;
2150 	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
2151 	internals->mb_mgr = mb_mgr;
2152 
2153 	AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
2154 			imb_get_version_str());
2155 	return 0;
2156 }
2157 
2158 static int
cryptodev_aesni_mb_probe(struct rte_vdev_device * vdev)2159 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
2160 {
2161 	struct rte_cryptodev_pmd_init_params init_params = {
2162 		"",
2163 		sizeof(struct aesni_mb_private),
2164 		rte_socket_id(),
2165 		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2166 	};
2167 	const char *name, *args;
2168 	int retval;
2169 
2170 	name = rte_vdev_device_name(vdev);
2171 	if (name == NULL)
2172 		return -EINVAL;
2173 
2174 	args = rte_vdev_device_args(vdev);
2175 
2176 	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
2177 	if (retval) {
2178 		AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
2179 				args);
2180 		return -EINVAL;
2181 	}
2182 
2183 	return cryptodev_aesni_mb_create(name, vdev, &init_params);
2184 }
2185 
2186 static int
cryptodev_aesni_mb_remove(struct rte_vdev_device * vdev)2187 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
2188 {
2189 	struct rte_cryptodev *cryptodev;
2190 	struct aesni_mb_private *internals;
2191 	const char *name;
2192 
2193 	name = rte_vdev_device_name(vdev);
2194 	if (name == NULL)
2195 		return -EINVAL;
2196 
2197 	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2198 	if (cryptodev == NULL)
2199 		return -ENODEV;
2200 
2201 	internals = cryptodev->data->dev_private;
2202 
2203 	free_mb_mgr(internals->mb_mgr);
2204 	if (RTE_PER_LCORE(sync_mb_mgr)) {
2205 		free_mb_mgr(RTE_PER_LCORE(sync_mb_mgr));
2206 		RTE_PER_LCORE(sync_mb_mgr) = NULL;
2207 	}
2208 
2209 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2210 	rte_free(cryptodev->security_ctx);
2211 	cryptodev->security_ctx = NULL;
2212 #endif
2213 
2214 	return rte_cryptodev_pmd_destroy(cryptodev);
2215 }
2216 
2217 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
2218 	.probe = cryptodev_aesni_mb_probe,
2219 	.remove = cryptodev_aesni_mb_remove
2220 };
2221 
2222 static struct cryptodev_driver aesni_mb_crypto_drv;
2223 
2224 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
2225 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
2226 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
2227 	"max_nb_queue_pairs=<int> "
2228 	"socket_id=<int>");
2229 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
2230 		cryptodev_aesni_mb_pmd_drv.driver,
2231 		cryptodev_driver_id);
2232 RTE_LOG_REGISTER(aesni_mb_logtype_driver, pmd.crypto.aesni_mb, NOTICE);
2233