xref: /dpdk/app/test-crypto-perf/cperf_ops.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_ether.h>
7 #include <rte_ip.h>
8 
9 #include "cperf_ops.h"
10 #include "cperf_test_vectors.h"
11 
12 static int
13 cperf_set_ops_asym(struct rte_crypto_op **ops,
14 		   uint32_t src_buf_offset __rte_unused,
15 		   uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
16 		   struct rte_cryptodev_sym_session *sess,
17 		   const struct cperf_options *options __rte_unused,
18 		   const struct cperf_test_vector *test_vector __rte_unused,
19 		   uint16_t iv_offset __rte_unused,
20 		   uint32_t *imix_idx __rte_unused,
21 		   uint64_t *tsc_start __rte_unused)
22 {
23 	uint16_t i;
24 	void *asym_sess = (void *)sess;
25 
26 	for (i = 0; i < nb_ops; i++) {
27 		struct rte_crypto_asym_op *asym_op = ops[i]->asym;
28 
29 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
30 		asym_op->modex.base.data = perf_base;
31 		asym_op->modex.base.length = sizeof(perf_base);
32 		asym_op->modex.result.data = perf_mod_result;
33 		asym_op->modex.result.length = sizeof(perf_mod_result);
34 		rte_crypto_op_attach_asym_session(ops[i], asym_sess);
35 	}
36 	return 0;
37 }
38 
39 #ifdef RTE_LIB_SECURITY
40 static void
41 test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options,
42 			const struct cperf_test_vector *test_vector)
43 {
44 	struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
45 
46 	if ((options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ||
47 		(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
48 		memcpy(ip, test_vector->plaintext.data,
49 		       sizeof(struct rte_ipv4_hdr));
50 
51 		ip->total_length = rte_cpu_to_be_16(m->data_len);
52 	}
53 }
54 
55 static int
56 cperf_set_ops_security(struct rte_crypto_op **ops,
57 		uint32_t src_buf_offset __rte_unused,
58 		uint32_t dst_buf_offset __rte_unused,
59 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
60 		const struct cperf_options *options,
61 		const struct cperf_test_vector *test_vector,
62 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
63 		uint64_t *tsc_start)
64 {
65 	uint16_t i;
66 
67 	for (i = 0; i < nb_ops; i++) {
68 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
69 		struct rte_security_session *sec_sess =
70 			(struct rte_security_session *)sess;
71 		uint32_t buf_sz;
72 
73 		uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i],
74 					uint32_t *, iv_offset);
75 		*per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN;
76 
77 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
78 		rte_security_attach_session(ops[i], sec_sess);
79 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
80 							src_buf_offset);
81 
82 		if (options->op_type == CPERF_PDCP) {
83 			sym_op->m_src->buf_len = options->segment_sz;
84 			sym_op->m_src->data_len = options->test_buffer_size;
85 			sym_op->m_src->pkt_len = sym_op->m_src->data_len;
86 		}
87 
88 		if (options->op_type == CPERF_DOCSIS) {
89 			if (options->imix_distribution_count) {
90 				buf_sz = options->imix_buffer_sizes[*imix_idx];
91 				*imix_idx = (*imix_idx + 1) % options->pool_sz;
92 			} else
93 				buf_sz = options->test_buffer_size;
94 
95 			sym_op->m_src->buf_len = options->segment_sz;
96 			sym_op->m_src->data_len = buf_sz;
97 			sym_op->m_src->pkt_len = buf_sz;
98 
99 			/* DOCSIS header is not CRC'ed */
100 			sym_op->auth.data.offset = options->docsis_hdr_sz;
101 			sym_op->auth.data.length = buf_sz -
102 				sym_op->auth.data.offset - RTE_ETHER_CRC_LEN;
103 			/*
104 			 * DOCSIS header and SRC and DST MAC addresses are not
105 			 * ciphered
106 			 */
107 			sym_op->cipher.data.offset = sym_op->auth.data.offset +
108 				RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN;
109 			sym_op->cipher.data.length = buf_sz -
110 				sym_op->cipher.data.offset;
111 		}
112 
113 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
114 		if (dst_buf_offset == 0)
115 			sym_op->m_dst = NULL;
116 		else
117 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
118 							dst_buf_offset);
119 	}
120 
121 	RTE_SET_USED(tsc_start);
122 	RTE_SET_USED(test_vector);
123 
124 	return 0;
125 }
126 
127 static int
128 cperf_set_ops_security_ipsec(struct rte_crypto_op **ops,
129 		uint32_t src_buf_offset __rte_unused,
130 		uint32_t dst_buf_offset __rte_unused,
131 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
132 		const struct cperf_options *options,
133 		const struct cperf_test_vector *test_vector,
134 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
135 		uint64_t *tsc_start)
136 {
137 	struct rte_security_session *sec_sess =
138 			(struct rte_security_session *)sess;
139 	const uint32_t test_buffer_size = options->test_buffer_size;
140 	const uint32_t headroom_sz = options->headroom_sz;
141 	const uint32_t segment_sz = options->segment_sz;
142 	uint64_t tsc_start_temp, tsc_end_temp;
143 	uint16_t i = 0;
144 
145 	RTE_SET_USED(imix_idx);
146 
147 	for (i = 0; i < nb_ops; i++) {
148 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
149 		struct rte_mbuf *m = sym_op->m_src;
150 
151 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
152 		rte_security_attach_session(ops[i], sec_sess);
153 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
154 							src_buf_offset);
155 
156 		/* In case of IPsec, headroom is consumed by PMD,
157 		 * hence resetting it.
158 		 */
159 		m->data_off = headroom_sz;
160 
161 		m->buf_len = segment_sz;
162 		m->data_len = test_buffer_size;
163 		m->pkt_len = test_buffer_size;
164 
165 		sym_op->m_dst = NULL;
166 	}
167 
168 	if (options->test_file != NULL)
169 		return 0;
170 
171 	tsc_start_temp = rte_rdtsc_precise();
172 
173 	for (i = 0; i < nb_ops; i++) {
174 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
175 		struct rte_mbuf *m = sym_op->m_src;
176 
177 		test_ipsec_vec_populate(m, options, test_vector);
178 	}
179 
180 	tsc_end_temp = rte_rdtsc_precise();
181 	*tsc_start += tsc_end_temp - tsc_start_temp;
182 
183 	return 0;
184 }
185 
186 #endif
187 
188 static int
189 cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
190 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
191 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
192 		const struct cperf_options *options,
193 		const struct cperf_test_vector *test_vector __rte_unused,
194 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
195 		uint64_t *tsc_start __rte_unused)
196 {
197 	uint16_t i;
198 
199 	for (i = 0; i < nb_ops; i++) {
200 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
201 
202 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
203 		rte_crypto_op_attach_sym_session(ops[i], sess);
204 
205 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
206 							src_buf_offset);
207 
208 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
209 		if (dst_buf_offset == 0)
210 			sym_op->m_dst = NULL;
211 		else
212 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
213 							dst_buf_offset);
214 
215 		/* cipher parameters */
216 		if (options->imix_distribution_count) {
217 			sym_op->cipher.data.length =
218 				options->imix_buffer_sizes[*imix_idx];
219 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
220 		} else
221 			sym_op->cipher.data.length = options->test_buffer_size;
222 		sym_op->cipher.data.offset = 0;
223 	}
224 
225 	return 0;
226 }
227 
228 static int
229 cperf_set_ops_null_auth(struct rte_crypto_op **ops,
230 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
231 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
232 		const struct cperf_options *options,
233 		const struct cperf_test_vector *test_vector __rte_unused,
234 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
235 		uint64_t *tsc_start __rte_unused)
236 {
237 	uint16_t i;
238 
239 	for (i = 0; i < nb_ops; i++) {
240 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
241 
242 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
243 		rte_crypto_op_attach_sym_session(ops[i], sess);
244 
245 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
246 							src_buf_offset);
247 
248 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
249 		if (dst_buf_offset == 0)
250 			sym_op->m_dst = NULL;
251 		else
252 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
253 							dst_buf_offset);
254 
255 		/* auth parameters */
256 		if (options->imix_distribution_count) {
257 			sym_op->auth.data.length =
258 				options->imix_buffer_sizes[*imix_idx];
259 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
260 		} else
261 			sym_op->auth.data.length = options->test_buffer_size;
262 		sym_op->auth.data.offset = 0;
263 	}
264 
265 	return 0;
266 }
267 
268 static int
269 cperf_set_ops_cipher(struct rte_crypto_op **ops,
270 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
271 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
272 		const struct cperf_options *options,
273 		const struct cperf_test_vector *test_vector,
274 		uint16_t iv_offset, uint32_t *imix_idx,
275 		uint64_t *tsc_start __rte_unused)
276 {
277 	uint16_t i;
278 
279 	for (i = 0; i < nb_ops; i++) {
280 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
281 
282 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
283 		rte_crypto_op_attach_sym_session(ops[i], sess);
284 
285 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
286 							src_buf_offset);
287 
288 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
289 		if (dst_buf_offset == 0)
290 			sym_op->m_dst = NULL;
291 		else
292 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
293 							dst_buf_offset);
294 
295 		/* cipher parameters */
296 		if (options->imix_distribution_count) {
297 			sym_op->cipher.data.length =
298 				options->imix_buffer_sizes[*imix_idx];
299 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
300 		} else
301 			sym_op->cipher.data.length = options->test_buffer_size;
302 
303 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
304 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
305 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
306 			sym_op->cipher.data.length <<= 3;
307 
308 		sym_op->cipher.data.offset = 0;
309 	}
310 
311 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
312 		for (i = 0; i < nb_ops; i++) {
313 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
314 					uint8_t *, iv_offset);
315 
316 			memcpy(iv_ptr, test_vector->cipher_iv.data,
317 					test_vector->cipher_iv.length);
318 
319 		}
320 	}
321 
322 	return 0;
323 }
324 
325 static int
326 cperf_set_ops_auth(struct rte_crypto_op **ops,
327 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
328 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
329 		const struct cperf_options *options,
330 		const struct cperf_test_vector *test_vector,
331 		uint16_t iv_offset, uint32_t *imix_idx,
332 		uint64_t *tsc_start __rte_unused)
333 {
334 	uint16_t i;
335 
336 	for (i = 0; i < nb_ops; i++) {
337 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
338 
339 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
340 		rte_crypto_op_attach_sym_session(ops[i], sess);
341 
342 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
343 							src_buf_offset);
344 
345 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
346 		if (dst_buf_offset == 0)
347 			sym_op->m_dst = NULL;
348 		else
349 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
350 							dst_buf_offset);
351 
352 		if (test_vector->auth_iv.length) {
353 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
354 								uint8_t *,
355 								iv_offset);
356 			memcpy(iv_ptr, test_vector->auth_iv.data,
357 					test_vector->auth_iv.length);
358 		}
359 
360 		/* authentication parameters */
361 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
362 			sym_op->auth.digest.data = test_vector->digest.data;
363 			sym_op->auth.digest.phys_addr =
364 					test_vector->digest.phys_addr;
365 		} else {
366 
367 			uint32_t offset = options->test_buffer_size;
368 			struct rte_mbuf *buf, *tbuf;
369 
370 			if (options->out_of_place) {
371 				buf = sym_op->m_dst;
372 			} else {
373 				tbuf = sym_op->m_src;
374 				while ((tbuf->next != NULL) &&
375 						(offset >= tbuf->data_len)) {
376 					offset -= tbuf->data_len;
377 					tbuf = tbuf->next;
378 				}
379 				/*
380 				 * If there is not enough room in segment,
381 				 * place the digest in the next segment
382 				 */
383 				if ((tbuf->data_len - offset) < options->digest_sz) {
384 					tbuf = tbuf->next;
385 					offset = 0;
386 				}
387 				buf = tbuf;
388 			}
389 
390 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
391 					uint8_t *, offset);
392 			sym_op->auth.digest.phys_addr =
393 					rte_pktmbuf_iova_offset(buf, offset);
394 
395 		}
396 
397 		if (options->imix_distribution_count) {
398 			sym_op->auth.data.length =
399 				options->imix_buffer_sizes[*imix_idx];
400 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
401 		} else
402 			sym_op->auth.data.length = options->test_buffer_size;
403 
404 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
405 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
406 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
407 			sym_op->auth.data.length <<= 3;
408 
409 		sym_op->auth.data.offset = 0;
410 	}
411 
412 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
413 		if (test_vector->auth_iv.length) {
414 			for (i = 0; i < nb_ops; i++) {
415 				uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
416 						uint8_t *, iv_offset);
417 
418 				memcpy(iv_ptr, test_vector->auth_iv.data,
419 						test_vector->auth_iv.length);
420 			}
421 		}
422 	}
423 	return 0;
424 }
425 
426 static int
427 cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
428 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
429 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
430 		const struct cperf_options *options,
431 		const struct cperf_test_vector *test_vector,
432 		uint16_t iv_offset, uint32_t *imix_idx,
433 		uint64_t *tsc_start __rte_unused)
434 {
435 	uint16_t i;
436 
437 	for (i = 0; i < nb_ops; i++) {
438 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
439 
440 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
441 		rte_crypto_op_attach_sym_session(ops[i], sess);
442 
443 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
444 							src_buf_offset);
445 
446 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
447 		if (dst_buf_offset == 0)
448 			sym_op->m_dst = NULL;
449 		else
450 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
451 							dst_buf_offset);
452 
453 		/* cipher parameters */
454 		if (options->imix_distribution_count) {
455 			sym_op->cipher.data.length =
456 				options->imix_buffer_sizes[*imix_idx];
457 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
458 		} else
459 			sym_op->cipher.data.length = options->test_buffer_size;
460 
461 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
462 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
463 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
464 			sym_op->cipher.data.length <<= 3;
465 
466 		sym_op->cipher.data.offset = 0;
467 
468 		/* authentication parameters */
469 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
470 			sym_op->auth.digest.data = test_vector->digest.data;
471 			sym_op->auth.digest.phys_addr =
472 					test_vector->digest.phys_addr;
473 		} else {
474 
475 			uint32_t offset = options->test_buffer_size;
476 			struct rte_mbuf *buf, *tbuf;
477 
478 			if (options->out_of_place) {
479 				buf = sym_op->m_dst;
480 			} else {
481 				tbuf = sym_op->m_src;
482 				while ((tbuf->next != NULL) &&
483 						(offset >= tbuf->data_len)) {
484 					offset -= tbuf->data_len;
485 					tbuf = tbuf->next;
486 				}
487 				/*
488 				 * If there is not enough room in segment,
489 				 * place the digest in the next segment
490 				 */
491 				if ((tbuf->data_len - offset) < options->digest_sz) {
492 					tbuf = tbuf->next;
493 					offset = 0;
494 				}
495 				buf = tbuf;
496 			}
497 
498 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
499 					uint8_t *, offset);
500 			sym_op->auth.digest.phys_addr =
501 					rte_pktmbuf_iova_offset(buf, offset);
502 		}
503 
504 		if (options->imix_distribution_count) {
505 			sym_op->auth.data.length =
506 				options->imix_buffer_sizes[*imix_idx];
507 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
508 		} else
509 			sym_op->auth.data.length = options->test_buffer_size;
510 
511 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
512 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
513 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
514 			sym_op->auth.data.length <<= 3;
515 
516 		sym_op->auth.data.offset = 0;
517 	}
518 
519 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
520 		for (i = 0; i < nb_ops; i++) {
521 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
522 					uint8_t *, iv_offset);
523 
524 			memcpy(iv_ptr, test_vector->cipher_iv.data,
525 					test_vector->cipher_iv.length);
526 			if (test_vector->auth_iv.length) {
527 				/*
528 				 * Copy IV after the crypto operation and
529 				 * the cipher IV
530 				 */
531 				iv_ptr += test_vector->cipher_iv.length;
532 				memcpy(iv_ptr, test_vector->auth_iv.data,
533 						test_vector->auth_iv.length);
534 			}
535 		}
536 
537 	}
538 
539 	return 0;
540 }
541 
542 static int
543 cperf_set_ops_aead(struct rte_crypto_op **ops,
544 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
545 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
546 		const struct cperf_options *options,
547 		const struct cperf_test_vector *test_vector,
548 		uint16_t iv_offset, uint32_t *imix_idx,
549 		uint64_t *tsc_start __rte_unused)
550 {
551 	uint16_t i;
552 	/* AAD is placed after the IV */
553 	uint16_t aad_offset = iv_offset +
554 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16);
555 
556 	for (i = 0; i < nb_ops; i++) {
557 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
558 
559 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
560 		rte_crypto_op_attach_sym_session(ops[i], sess);
561 
562 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
563 							src_buf_offset);
564 
565 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
566 		if (dst_buf_offset == 0)
567 			sym_op->m_dst = NULL;
568 		else
569 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
570 							dst_buf_offset);
571 
572 		/* AEAD parameters */
573 		if (options->imix_distribution_count) {
574 			sym_op->aead.data.length =
575 				options->imix_buffer_sizes[*imix_idx];
576 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
577 		} else
578 			sym_op->aead.data.length = options->test_buffer_size;
579 		sym_op->aead.data.offset = 0;
580 
581 		sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
582 					uint8_t *, aad_offset);
583 		sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
584 					aad_offset);
585 
586 		if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
587 			sym_op->aead.digest.data = test_vector->digest.data;
588 			sym_op->aead.digest.phys_addr =
589 					test_vector->digest.phys_addr;
590 		} else {
591 
592 			uint32_t offset = sym_op->aead.data.length +
593 						sym_op->aead.data.offset;
594 			struct rte_mbuf *buf, *tbuf;
595 
596 			if (options->out_of_place) {
597 				buf = sym_op->m_dst;
598 			} else {
599 				tbuf = sym_op->m_src;
600 				while ((tbuf->next != NULL) &&
601 						(offset >= tbuf->data_len)) {
602 					offset -= tbuf->data_len;
603 					tbuf = tbuf->next;
604 				}
605 				/*
606 				 * If there is not enough room in segment,
607 				 * place the digest in the next segment
608 				 */
609 				if ((tbuf->data_len - offset) < options->digest_sz) {
610 					tbuf = tbuf->next;
611 					offset = 0;
612 				}
613 				buf = tbuf;
614 			}
615 
616 			sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
617 					uint8_t *, offset);
618 			sym_op->aead.digest.phys_addr =
619 					rte_pktmbuf_iova_offset(buf, offset);
620 		}
621 	}
622 
623 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
624 		for (i = 0; i < nb_ops; i++) {
625 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
626 					uint8_t *, iv_offset);
627 
628 			/*
629 			 * If doing AES-CCM, nonce is copied one byte
630 			 * after the start of IV field, and AAD is copied
631 			 * 18 bytes after the start of the AAD field.
632 			 */
633 			if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
634 				memcpy(iv_ptr + 1, test_vector->aead_iv.data,
635 					test_vector->aead_iv.length);
636 
637 				memcpy(ops[i]->sym->aead.aad.data + 18,
638 					test_vector->aad.data,
639 					test_vector->aad.length);
640 			} else {
641 				memcpy(iv_ptr, test_vector->aead_iv.data,
642 					test_vector->aead_iv.length);
643 
644 				memcpy(ops[i]->sym->aead.aad.data,
645 					test_vector->aad.data,
646 					test_vector->aad.length);
647 			}
648 		}
649 	}
650 
651 	return 0;
652 }
653 
654 static struct rte_cryptodev_sym_session *
655 create_ipsec_session(struct rte_mempool *sess_mp,
656 		struct rte_mempool *priv_mp,
657 		uint8_t dev_id,
658 		const struct cperf_options *options,
659 		const struct cperf_test_vector *test_vector,
660 		uint16_t iv_offset)
661 {
662 	struct rte_crypto_sym_xform xform = {0};
663 	struct rte_crypto_sym_xform auth_xform = {0};
664 
665 	if (options->aead_algo != 0) {
666 		/* Setup AEAD Parameters */
667 		xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
668 		xform.next = NULL;
669 		xform.aead.algo = options->aead_algo;
670 		xform.aead.op = options->aead_op;
671 		xform.aead.iv.offset = iv_offset;
672 		xform.aead.key.data = test_vector->aead_key.data;
673 		xform.aead.key.length = test_vector->aead_key.length;
674 		xform.aead.iv.length = test_vector->aead_iv.length;
675 		xform.aead.digest_length = options->digest_sz;
676 		xform.aead.aad_length = options->aead_aad_sz;
677 	} else if (options->cipher_algo != 0 && options->auth_algo != 0) {
678 		/* Setup Cipher Parameters */
679 		xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
680 		xform.next = NULL;
681 		xform.cipher.algo = options->cipher_algo;
682 		xform.cipher.op = options->cipher_op;
683 		xform.cipher.iv.offset = iv_offset;
684 		xform.cipher.iv.length = test_vector->cipher_iv.length;
685 		/* cipher different than null */
686 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
687 			xform.cipher.key.data = test_vector->cipher_key.data;
688 			xform.cipher.key.length =
689 				test_vector->cipher_key.length;
690 		} else {
691 			xform.cipher.key.data = NULL;
692 			xform.cipher.key.length = 0;
693 		}
694 
695 		/* Setup Auth Parameters */
696 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
697 		auth_xform.next = NULL;
698 		auth_xform.auth.algo = options->auth_algo;
699 		auth_xform.auth.op = options->auth_op;
700 		auth_xform.auth.iv.offset = iv_offset +
701 				xform.cipher.iv.length;
702 		/* auth different than null */
703 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
704 			auth_xform.auth.digest_length = options->digest_sz;
705 			auth_xform.auth.key.length =
706 						test_vector->auth_key.length;
707 			auth_xform.auth.key.data = test_vector->auth_key.data;
708 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
709 		} else {
710 			auth_xform.auth.digest_length = 0;
711 			auth_xform.auth.key.length = 0;
712 			auth_xform.auth.key.data = NULL;
713 			auth_xform.auth.iv.length = 0;
714 		}
715 
716 		xform.next = &auth_xform;
717 	} else {
718 		return NULL;
719 	}
720 
721 #define CPERF_IPSEC_SRC_IP	0x01010101
722 #define CPERF_IPSEC_DST_IP	0x02020202
723 #define CPERF_IPSEC_SALT	0x0
724 #define CPERF_IPSEC_DEFTTL	64
725 	struct rte_security_ipsec_tunnel_param tunnel = {
726 		.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4,
727 		{.ipv4 = {
728 			.src_ip = { .s_addr = CPERF_IPSEC_SRC_IP},
729 			.dst_ip = { .s_addr = CPERF_IPSEC_DST_IP},
730 			.dscp = 0,
731 			.df = 0,
732 			.ttl = CPERF_IPSEC_DEFTTL,
733 		} },
734 	};
735 	struct rte_security_session_conf sess_conf = {
736 		.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
737 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
738 		{.ipsec = {
739 			.spi = rte_lcore_id(),
740 			/**< For testing sake, lcore_id is taken as SPI so that
741 			 * for every core a different session is created.
742 			 */
743 			.salt = CPERF_IPSEC_SALT,
744 			.options = { 0 },
745 			.replay_win_sz = 0,
746 			.direction =
747 				((options->cipher_op ==
748 					RTE_CRYPTO_CIPHER_OP_ENCRYPT) &&
749 				(options->auth_op ==
750 					RTE_CRYPTO_AUTH_OP_GENERATE)) ||
751 				(options->aead_op ==
752 					RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
753 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS :
754 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
755 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
756 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
757 			.tunnel = tunnel,
758 		} },
759 		.userdata = NULL,
760 		.crypto_xform = &xform
761 	};
762 
763 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
764 				rte_cryptodev_get_sec_ctx(dev_id);
765 
766 	/* Create security session */
767 	return (void *)rte_security_session_create(ctx,
768 				&sess_conf, sess_mp, priv_mp);
769 }
770 
771 static struct rte_cryptodev_sym_session *
772 cperf_create_session(struct rte_mempool *sess_mp,
773 	struct rte_mempool *priv_mp,
774 	uint8_t dev_id,
775 	const struct cperf_options *options,
776 	const struct cperf_test_vector *test_vector,
777 	uint16_t iv_offset)
778 {
779 	struct rte_crypto_sym_xform cipher_xform;
780 	struct rte_crypto_sym_xform auth_xform;
781 	struct rte_crypto_sym_xform aead_xform;
782 	struct rte_cryptodev_sym_session *sess = NULL;
783 	void *asym_sess = NULL;
784 	struct rte_crypto_asym_xform xform = {0};
785 	int ret;
786 
787 	if (options->op_type == CPERF_ASYM_MODEX) {
788 		xform.next = NULL;
789 		xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
790 		xform.modex.modulus.data = perf_mod_p;
791 		xform.modex.modulus.length = sizeof(perf_mod_p);
792 		xform.modex.exponent.data = perf_mod_e;
793 		xform.modex.exponent.length = sizeof(perf_mod_e);
794 
795 		ret = rte_cryptodev_asym_session_create(dev_id, &xform,
796 				sess_mp, &asym_sess);
797 		if (ret < 0) {
798 			RTE_LOG(ERR, USER1, "Asym session create failed\n");
799 			return NULL;
800 		}
801 		return asym_sess;
802 	}
803 #ifdef RTE_LIB_SECURITY
804 	/*
805 	 * security only
806 	 */
807 	if (options->op_type == CPERF_PDCP) {
808 		/* Setup Cipher Parameters */
809 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
810 		cipher_xform.next = NULL;
811 		cipher_xform.cipher.algo = options->cipher_algo;
812 		cipher_xform.cipher.op = options->cipher_op;
813 		cipher_xform.cipher.iv.offset = iv_offset;
814 		cipher_xform.cipher.iv.length = 4;
815 
816 		/* cipher different than null */
817 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
818 			cipher_xform.cipher.key.data = test_vector->cipher_key.data;
819 			cipher_xform.cipher.key.length = test_vector->cipher_key.length;
820 		} else {
821 			cipher_xform.cipher.key.data = NULL;
822 			cipher_xform.cipher.key.length = 0;
823 		}
824 
825 		/* Setup Auth Parameters */
826 		if (options->auth_algo != 0) {
827 			auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
828 			auth_xform.next = NULL;
829 			auth_xform.auth.algo = options->auth_algo;
830 			auth_xform.auth.op = options->auth_op;
831 			auth_xform.auth.iv.offset = iv_offset +
832 				cipher_xform.cipher.iv.length;
833 
834 			/* auth different than null */
835 			if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
836 				auth_xform.auth.digest_length = options->digest_sz;
837 				auth_xform.auth.key.length = test_vector->auth_key.length;
838 				auth_xform.auth.key.data = test_vector->auth_key.data;
839 				auth_xform.auth.iv.length = test_vector->auth_iv.length;
840 			} else {
841 				auth_xform.auth.digest_length = 0;
842 				auth_xform.auth.key.length = 0;
843 				auth_xform.auth.key.data = NULL;
844 				auth_xform.auth.iv.length = 0;
845 			}
846 
847 			cipher_xform.next = &auth_xform;
848 		} else {
849 			cipher_xform.next = NULL;
850 		}
851 
852 		struct rte_security_session_conf sess_conf = {
853 			.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
854 			.protocol = RTE_SECURITY_PROTOCOL_PDCP,
855 			{.pdcp = {
856 				.bearer = 0x16,
857 				.domain = options->pdcp_domain,
858 				.pkt_dir = 0,
859 				.sn_size = options->pdcp_sn_sz,
860 				.hfn = options->pdcp_ses_hfn_en ?
861 					PDCP_DEFAULT_HFN : 0,
862 				.hfn_threshold = 0x70C0A,
863 				.hfn_ovrd = !(options->pdcp_ses_hfn_en),
864 			} },
865 			.crypto_xform = &cipher_xform
866 		};
867 
868 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
869 					rte_cryptodev_get_sec_ctx(dev_id);
870 
871 		/* Create security session */
872 		return (void *)rte_security_session_create(ctx,
873 					&sess_conf, sess_mp, priv_mp);
874 	}
875 
876 	if (options->op_type == CPERF_IPSEC) {
877 		return create_ipsec_session(sess_mp, priv_mp, dev_id,
878 				options, test_vector, iv_offset);
879 	}
880 
881 	if (options->op_type == CPERF_DOCSIS) {
882 		enum rte_security_docsis_direction direction;
883 
884 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
885 		cipher_xform.next = NULL;
886 		cipher_xform.cipher.algo = options->cipher_algo;
887 		cipher_xform.cipher.op = options->cipher_op;
888 		cipher_xform.cipher.iv.offset = iv_offset;
889 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
890 			cipher_xform.cipher.key.data =
891 				test_vector->cipher_key.data;
892 			cipher_xform.cipher.key.length =
893 				test_vector->cipher_key.length;
894 			cipher_xform.cipher.iv.length =
895 				test_vector->cipher_iv.length;
896 		} else {
897 			cipher_xform.cipher.key.data = NULL;
898 			cipher_xform.cipher.key.length = 0;
899 			cipher_xform.cipher.iv.length = 0;
900 		}
901 		cipher_xform.next = NULL;
902 
903 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
904 			direction = RTE_SECURITY_DOCSIS_DOWNLINK;
905 		else
906 			direction = RTE_SECURITY_DOCSIS_UPLINK;
907 
908 		struct rte_security_session_conf sess_conf = {
909 			.action_type =
910 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
911 			.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
912 			{.docsis = {
913 				.direction = direction,
914 			} },
915 			.crypto_xform = &cipher_xform
916 		};
917 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
918 					rte_cryptodev_get_sec_ctx(dev_id);
919 
920 		/* Create security session */
921 		return (void *)rte_security_session_create(ctx,
922 					&sess_conf, sess_mp, priv_mp);
923 	}
924 #endif
925 	sess = rte_cryptodev_sym_session_create(sess_mp);
926 	/*
927 	 * cipher only
928 	 */
929 	if (options->op_type == CPERF_CIPHER_ONLY) {
930 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
931 		cipher_xform.next = NULL;
932 		cipher_xform.cipher.algo = options->cipher_algo;
933 		cipher_xform.cipher.op = options->cipher_op;
934 		cipher_xform.cipher.iv.offset = iv_offset;
935 
936 		/* cipher different than null */
937 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
938 			cipher_xform.cipher.key.data =
939 					test_vector->cipher_key.data;
940 			cipher_xform.cipher.key.length =
941 					test_vector->cipher_key.length;
942 			cipher_xform.cipher.iv.length =
943 					test_vector->cipher_iv.length;
944 		} else {
945 			cipher_xform.cipher.key.data = NULL;
946 			cipher_xform.cipher.key.length = 0;
947 			cipher_xform.cipher.iv.length = 0;
948 		}
949 		/* create crypto session */
950 		rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform,
951 				priv_mp);
952 	/*
953 	 *  auth only
954 	 */
955 	} else if (options->op_type == CPERF_AUTH_ONLY) {
956 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
957 		auth_xform.next = NULL;
958 		auth_xform.auth.algo = options->auth_algo;
959 		auth_xform.auth.op = options->auth_op;
960 		auth_xform.auth.iv.offset = iv_offset;
961 
962 		/* auth different than null */
963 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
964 			auth_xform.auth.digest_length =
965 					options->digest_sz;
966 			auth_xform.auth.key.length =
967 					test_vector->auth_key.length;
968 			auth_xform.auth.key.data = test_vector->auth_key.data;
969 			auth_xform.auth.iv.length =
970 					test_vector->auth_iv.length;
971 		} else {
972 			auth_xform.auth.digest_length = 0;
973 			auth_xform.auth.key.length = 0;
974 			auth_xform.auth.key.data = NULL;
975 			auth_xform.auth.iv.length = 0;
976 		}
977 		/* create crypto session */
978 		rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform,
979 				priv_mp);
980 	/*
981 	 * cipher and auth
982 	 */
983 	} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
984 			|| options->op_type == CPERF_AUTH_THEN_CIPHER) {
985 		/*
986 		 * cipher
987 		 */
988 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
989 		cipher_xform.next = NULL;
990 		cipher_xform.cipher.algo = options->cipher_algo;
991 		cipher_xform.cipher.op = options->cipher_op;
992 		cipher_xform.cipher.iv.offset = iv_offset;
993 
994 		/* cipher different than null */
995 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
996 			cipher_xform.cipher.key.data =
997 					test_vector->cipher_key.data;
998 			cipher_xform.cipher.key.length =
999 					test_vector->cipher_key.length;
1000 			cipher_xform.cipher.iv.length =
1001 					test_vector->cipher_iv.length;
1002 		} else {
1003 			cipher_xform.cipher.key.data = NULL;
1004 			cipher_xform.cipher.key.length = 0;
1005 			cipher_xform.cipher.iv.length = 0;
1006 		}
1007 
1008 		/*
1009 		 * auth
1010 		 */
1011 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1012 		auth_xform.next = NULL;
1013 		auth_xform.auth.algo = options->auth_algo;
1014 		auth_xform.auth.op = options->auth_op;
1015 		auth_xform.auth.iv.offset = iv_offset +
1016 			cipher_xform.cipher.iv.length;
1017 
1018 		/* auth different than null */
1019 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
1020 			auth_xform.auth.digest_length = options->digest_sz;
1021 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
1022 			auth_xform.auth.key.length =
1023 					test_vector->auth_key.length;
1024 			auth_xform.auth.key.data =
1025 					test_vector->auth_key.data;
1026 		} else {
1027 			auth_xform.auth.digest_length = 0;
1028 			auth_xform.auth.key.length = 0;
1029 			auth_xform.auth.key.data = NULL;
1030 			auth_xform.auth.iv.length = 0;
1031 		}
1032 
1033 		/* cipher then auth */
1034 		if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
1035 			cipher_xform.next = &auth_xform;
1036 			/* create crypto session */
1037 			rte_cryptodev_sym_session_init(dev_id,
1038 					sess, &cipher_xform, priv_mp);
1039 		} else { /* auth then cipher */
1040 			auth_xform.next = &cipher_xform;
1041 			/* create crypto session */
1042 			rte_cryptodev_sym_session_init(dev_id,
1043 					sess, &auth_xform, priv_mp);
1044 		}
1045 	} else { /* options->op_type == CPERF_AEAD */
1046 		aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1047 		aead_xform.next = NULL;
1048 		aead_xform.aead.algo = options->aead_algo;
1049 		aead_xform.aead.op = options->aead_op;
1050 		aead_xform.aead.iv.offset = iv_offset;
1051 
1052 		aead_xform.aead.key.data =
1053 					test_vector->aead_key.data;
1054 		aead_xform.aead.key.length =
1055 					test_vector->aead_key.length;
1056 		aead_xform.aead.iv.length = test_vector->aead_iv.length;
1057 
1058 		aead_xform.aead.digest_length = options->digest_sz;
1059 		aead_xform.aead.aad_length =
1060 					options->aead_aad_sz;
1061 
1062 		/* Create crypto session */
1063 		rte_cryptodev_sym_session_init(dev_id,
1064 					sess, &aead_xform, priv_mp);
1065 	}
1066 
1067 	return sess;
1068 }
1069 
1070 int
1071 cperf_get_op_functions(const struct cperf_options *options,
1072 		struct cperf_op_fns *op_fns)
1073 {
1074 	memset(op_fns, 0, sizeof(struct cperf_op_fns));
1075 
1076 	op_fns->sess_create = cperf_create_session;
1077 
1078 	switch (options->op_type) {
1079 	case CPERF_AEAD:
1080 		op_fns->populate_ops = cperf_set_ops_aead;
1081 		break;
1082 
1083 	case CPERF_AUTH_THEN_CIPHER:
1084 	case CPERF_CIPHER_THEN_AUTH:
1085 		op_fns->populate_ops = cperf_set_ops_cipher_auth;
1086 		break;
1087 	case CPERF_AUTH_ONLY:
1088 		if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
1089 			op_fns->populate_ops = cperf_set_ops_null_auth;
1090 		else
1091 			op_fns->populate_ops = cperf_set_ops_auth;
1092 		break;
1093 	case CPERF_CIPHER_ONLY:
1094 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
1095 			op_fns->populate_ops = cperf_set_ops_null_cipher;
1096 		else
1097 			op_fns->populate_ops = cperf_set_ops_cipher;
1098 		break;
1099 	case CPERF_ASYM_MODEX:
1100 		op_fns->populate_ops = cperf_set_ops_asym;
1101 		break;
1102 #ifdef RTE_LIB_SECURITY
1103 	case CPERF_PDCP:
1104 	case CPERF_DOCSIS:
1105 		op_fns->populate_ops = cperf_set_ops_security;
1106 		break;
1107 	case CPERF_IPSEC:
1108 		op_fns->populate_ops = cperf_set_ops_security_ipsec;
1109 		break;
1110 #endif
1111 	default:
1112 		return -1;
1113 	}
1114 
1115 	return 0;
1116 }
1117