xref: /dpdk/app/test-crypto-perf/cperf_ops.c (revision 14864c42)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_ether.h>
7 #include <rte_ip.h>
8 
9 #include "cperf_ops.h"
10 #include "cperf_test_vectors.h"
11 
12 static int
cperf_set_ops_asym(struct rte_crypto_op ** ops,uint32_t src_buf_offset __rte_unused,uint32_t dst_buf_offset __rte_unused,uint16_t nb_ops,struct rte_cryptodev_sym_session * sess,const struct cperf_options * options,const struct cperf_test_vector * test_vector __rte_unused,uint16_t iv_offset __rte_unused,uint32_t * imix_idx __rte_unused,uint64_t * tsc_start __rte_unused)13 cperf_set_ops_asym(struct rte_crypto_op **ops,
14 		   uint32_t src_buf_offset __rte_unused,
15 		   uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
16 		   struct rte_cryptodev_sym_session *sess,
17 		   const struct cperf_options *options,
18 		   const struct cperf_test_vector *test_vector __rte_unused,
19 		   uint16_t iv_offset __rte_unused,
20 		   uint32_t *imix_idx __rte_unused,
21 		   uint64_t *tsc_start __rte_unused)
22 {
23 	uint16_t i;
24 	void *asym_sess = (void *)sess;
25 
26 	for (i = 0; i < nb_ops; i++) {
27 		struct rte_crypto_asym_op *asym_op = ops[i]->asym;
28 
29 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
30 		asym_op->modex.base.data = options->modex_data->base.data;
31 		asym_op->modex.base.length = options->modex_data->base.len;
32 		asym_op->modex.result.data = options->modex_data->result.data;
33 		asym_op->modex.result.length = options->modex_data->result.len;
34 		rte_crypto_op_attach_asym_session(ops[i], asym_sess);
35 	}
36 	return 0;
37 }
38 
39 #ifdef RTE_LIB_SECURITY
40 static void
test_ipsec_vec_populate(struct rte_mbuf * m,const struct cperf_options * options,const struct cperf_test_vector * test_vector)41 test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options,
42 			const struct cperf_test_vector *test_vector)
43 {
44 	struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
45 
46 	if ((options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ||
47 		(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
48 		memcpy(ip, test_vector->plaintext.data,
49 		       sizeof(struct rte_ipv4_hdr));
50 
51 		ip->total_length = rte_cpu_to_be_16(m->data_len);
52 	}
53 }
54 
55 static int
cperf_set_ops_security(struct rte_crypto_op ** ops,uint32_t src_buf_offset __rte_unused,uint32_t dst_buf_offset __rte_unused,uint16_t nb_ops,struct rte_cryptodev_sym_session * sess,const struct cperf_options * options,const struct cperf_test_vector * test_vector,uint16_t iv_offset __rte_unused,uint32_t * imix_idx,uint64_t * tsc_start)56 cperf_set_ops_security(struct rte_crypto_op **ops,
57 		uint32_t src_buf_offset __rte_unused,
58 		uint32_t dst_buf_offset __rte_unused,
59 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
60 		const struct cperf_options *options,
61 		const struct cperf_test_vector *test_vector,
62 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
63 		uint64_t *tsc_start)
64 {
65 	uint16_t i;
66 
67 	for (i = 0; i < nb_ops; i++) {
68 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
69 		struct rte_security_session *sec_sess =
70 			(struct rte_security_session *)sess;
71 		uint32_t buf_sz;
72 
73 		uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i],
74 					uint32_t *, iv_offset);
75 		*per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN;
76 
77 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
78 		rte_security_attach_session(ops[i], sec_sess);
79 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
80 							src_buf_offset);
81 
82 		if (options->op_type == CPERF_PDCP) {
83 			sym_op->m_src->buf_len = options->segment_sz;
84 			sym_op->m_src->data_len = options->test_buffer_size;
85 			sym_op->m_src->pkt_len = sym_op->m_src->data_len;
86 		}
87 
88 		if (options->op_type == CPERF_DOCSIS) {
89 			if (options->imix_distribution_count) {
90 				buf_sz = options->imix_buffer_sizes[*imix_idx];
91 				*imix_idx = (*imix_idx + 1) % options->pool_sz;
92 			} else
93 				buf_sz = options->test_buffer_size;
94 
95 			sym_op->m_src->buf_len = options->segment_sz;
96 			sym_op->m_src->data_len = buf_sz;
97 			sym_op->m_src->pkt_len = buf_sz;
98 
99 			/* DOCSIS header is not CRC'ed */
100 			sym_op->auth.data.offset = options->docsis_hdr_sz;
101 			sym_op->auth.data.length = buf_sz -
102 				sym_op->auth.data.offset - RTE_ETHER_CRC_LEN;
103 			/*
104 			 * DOCSIS header and SRC and DST MAC addresses are not
105 			 * ciphered
106 			 */
107 			sym_op->cipher.data.offset = sym_op->auth.data.offset +
108 				RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN;
109 			sym_op->cipher.data.length = buf_sz -
110 				sym_op->cipher.data.offset;
111 		}
112 
113 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
114 		if (dst_buf_offset == 0)
115 			sym_op->m_dst = NULL;
116 		else
117 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
118 							dst_buf_offset);
119 	}
120 
121 	RTE_SET_USED(tsc_start);
122 	RTE_SET_USED(test_vector);
123 
124 	return 0;
125 }
126 
127 static int
cperf_set_ops_security_ipsec(struct rte_crypto_op ** ops,uint32_t src_buf_offset __rte_unused,uint32_t dst_buf_offset __rte_unused,uint16_t nb_ops,struct rte_cryptodev_sym_session * sess,const struct cperf_options * options,const struct cperf_test_vector * test_vector,uint16_t iv_offset __rte_unused,uint32_t * imix_idx,uint64_t * tsc_start)128 cperf_set_ops_security_ipsec(struct rte_crypto_op **ops,
129 		uint32_t src_buf_offset __rte_unused,
130 		uint32_t dst_buf_offset __rte_unused,
131 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
132 		const struct cperf_options *options,
133 		const struct cperf_test_vector *test_vector,
134 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
135 		uint64_t *tsc_start)
136 {
137 	struct rte_security_session *sec_sess =
138 			(struct rte_security_session *)sess;
139 	const uint32_t test_buffer_size = options->test_buffer_size;
140 	const uint32_t headroom_sz = options->headroom_sz;
141 	const uint32_t segment_sz = options->segment_sz;
142 	uint64_t tsc_start_temp, tsc_end_temp;
143 	uint16_t i = 0;
144 
145 	RTE_SET_USED(imix_idx);
146 
147 	for (i = 0; i < nb_ops; i++) {
148 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
149 		struct rte_mbuf *m = sym_op->m_src;
150 
151 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
152 		rte_security_attach_session(ops[i], sec_sess);
153 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
154 							src_buf_offset);
155 
156 		/* In case of IPsec, headroom is consumed by PMD,
157 		 * hence resetting it.
158 		 */
159 		m->data_off = headroom_sz;
160 
161 		m->buf_len = segment_sz;
162 		m->data_len = test_buffer_size;
163 		m->pkt_len = test_buffer_size;
164 
165 		sym_op->m_dst = NULL;
166 	}
167 
168 	if (options->test_file != NULL)
169 		return 0;
170 
171 	tsc_start_temp = rte_rdtsc_precise();
172 
173 	for (i = 0; i < nb_ops; i++) {
174 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
175 		struct rte_mbuf *m = sym_op->m_src;
176 
177 		test_ipsec_vec_populate(m, options, test_vector);
178 	}
179 
180 	tsc_end_temp = rte_rdtsc_precise();
181 	*tsc_start += tsc_end_temp - tsc_start_temp;
182 
183 	return 0;
184 }
185 
186 #endif
187 
188 static int
cperf_set_ops_null_cipher(struct rte_crypto_op ** ops,uint32_t src_buf_offset,uint32_t dst_buf_offset,uint16_t nb_ops,struct rte_cryptodev_sym_session * sess,const struct cperf_options * options,const struct cperf_test_vector * test_vector __rte_unused,uint16_t iv_offset __rte_unused,uint32_t * imix_idx,uint64_t * tsc_start __rte_unused)189 cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
190 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
191 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
192 		const struct cperf_options *options,
193 		const struct cperf_test_vector *test_vector __rte_unused,
194 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
195 		uint64_t *tsc_start __rte_unused)
196 {
197 	uint16_t i;
198 
199 	for (i = 0; i < nb_ops; i++) {
200 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
201 
202 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
203 		rte_crypto_op_attach_sym_session(ops[i], sess);
204 
205 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
206 							src_buf_offset);
207 
208 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
209 		if (dst_buf_offset == 0)
210 			sym_op->m_dst = NULL;
211 		else
212 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
213 							dst_buf_offset);
214 
215 		/* cipher parameters */
216 		if (options->imix_distribution_count) {
217 			sym_op->cipher.data.length =
218 				options->imix_buffer_sizes[*imix_idx];
219 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
220 		} else
221 			sym_op->cipher.data.length = options->test_buffer_size;
222 		sym_op->cipher.data.offset = 0;
223 	}
224 
225 	return 0;
226 }
227 
228 static int
cperf_set_ops_null_auth(struct rte_crypto_op ** ops,uint32_t src_buf_offset,uint32_t dst_buf_offset,uint16_t nb_ops,struct rte_cryptodev_sym_session * sess,const struct cperf_options * options,const struct cperf_test_vector * test_vector __rte_unused,uint16_t iv_offset __rte_unused,uint32_t * imix_idx,uint64_t * tsc_start __rte_unused)229 cperf_set_ops_null_auth(struct rte_crypto_op **ops,
230 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
231 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
232 		const struct cperf_options *options,
233 		const struct cperf_test_vector *test_vector __rte_unused,
234 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
235 		uint64_t *tsc_start __rte_unused)
236 {
237 	uint16_t i;
238 
239 	for (i = 0; i < nb_ops; i++) {
240 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
241 
242 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
243 		rte_crypto_op_attach_sym_session(ops[i], sess);
244 
245 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
246 							src_buf_offset);
247 
248 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
249 		if (dst_buf_offset == 0)
250 			sym_op->m_dst = NULL;
251 		else
252 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
253 							dst_buf_offset);
254 
255 		/* auth parameters */
256 		if (options->imix_distribution_count) {
257 			sym_op->auth.data.length =
258 				options->imix_buffer_sizes[*imix_idx];
259 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
260 		} else
261 			sym_op->auth.data.length = options->test_buffer_size;
262 		sym_op->auth.data.offset = 0;
263 	}
264 
265 	return 0;
266 }
267 
268 static int
cperf_set_ops_cipher(struct rte_crypto_op ** ops,uint32_t src_buf_offset,uint32_t dst_buf_offset,uint16_t nb_ops,struct rte_cryptodev_sym_session * sess,const struct cperf_options * options,const struct cperf_test_vector * test_vector,uint16_t iv_offset,uint32_t * imix_idx,uint64_t * tsc_start __rte_unused)269 cperf_set_ops_cipher(struct rte_crypto_op **ops,
270 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
271 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
272 		const struct cperf_options *options,
273 		const struct cperf_test_vector *test_vector,
274 		uint16_t iv_offset, uint32_t *imix_idx,
275 		uint64_t *tsc_start __rte_unused)
276 {
277 	uint16_t i;
278 
279 	for (i = 0; i < nb_ops; i++) {
280 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
281 
282 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
283 		rte_crypto_op_attach_sym_session(ops[i], sess);
284 
285 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
286 							src_buf_offset);
287 
288 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
289 		if (dst_buf_offset == 0)
290 			sym_op->m_dst = NULL;
291 		else
292 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
293 							dst_buf_offset);
294 
295 		/* cipher parameters */
296 		if (options->imix_distribution_count) {
297 			sym_op->cipher.data.length =
298 				options->imix_buffer_sizes[*imix_idx];
299 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
300 		} else
301 			sym_op->cipher.data.length = options->test_buffer_size;
302 
303 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
304 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
305 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
306 			sym_op->cipher.data.length <<= 3;
307 
308 		sym_op->cipher.data.offset = 0;
309 	}
310 
311 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
312 		for (i = 0; i < nb_ops; i++) {
313 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
314 					uint8_t *, iv_offset);
315 
316 			memcpy(iv_ptr, test_vector->cipher_iv.data,
317 					test_vector->cipher_iv.length);
318 
319 		}
320 	}
321 
322 	return 0;
323 }
324 
325 static int
cperf_set_ops_auth(struct rte_crypto_op ** ops,uint32_t src_buf_offset,uint32_t dst_buf_offset,uint16_t nb_ops,struct rte_cryptodev_sym_session * sess,const struct cperf_options * options,const struct cperf_test_vector * test_vector,uint16_t iv_offset,uint32_t * imix_idx,uint64_t * tsc_start __rte_unused)326 cperf_set_ops_auth(struct rte_crypto_op **ops,
327 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
328 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
329 		const struct cperf_options *options,
330 		const struct cperf_test_vector *test_vector,
331 		uint16_t iv_offset, uint32_t *imix_idx,
332 		uint64_t *tsc_start __rte_unused)
333 {
334 	uint16_t i;
335 
336 	for (i = 0; i < nb_ops; i++) {
337 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
338 
339 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
340 		rte_crypto_op_attach_sym_session(ops[i], sess);
341 
342 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
343 							src_buf_offset);
344 
345 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
346 		if (dst_buf_offset == 0)
347 			sym_op->m_dst = NULL;
348 		else
349 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
350 							dst_buf_offset);
351 
352 		if (test_vector->auth_iv.length) {
353 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
354 								uint8_t *,
355 								iv_offset);
356 			memcpy(iv_ptr, test_vector->auth_iv.data,
357 					test_vector->auth_iv.length);
358 		}
359 
360 		/* authentication parameters */
361 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
362 			sym_op->auth.digest.data = test_vector->digest.data;
363 			sym_op->auth.digest.phys_addr =
364 					test_vector->digest.phys_addr;
365 		} else {
366 
367 			uint32_t offset = options->test_buffer_size;
368 			struct rte_mbuf *buf, *tbuf;
369 
370 			if (options->out_of_place) {
371 				buf = sym_op->m_dst;
372 			} else {
373 				tbuf = sym_op->m_src;
374 				while ((tbuf->next != NULL) &&
375 						(offset >= tbuf->data_len)) {
376 					offset -= tbuf->data_len;
377 					tbuf = tbuf->next;
378 				}
379 				/*
380 				 * If there is not enough room in segment,
381 				 * place the digest in the next segment
382 				 */
383 				if ((tbuf->data_len - offset) < options->digest_sz) {
384 					tbuf = tbuf->next;
385 					offset = 0;
386 				}
387 				buf = tbuf;
388 			}
389 
390 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
391 					uint8_t *, offset);
392 			sym_op->auth.digest.phys_addr =
393 					rte_pktmbuf_iova_offset(buf, offset);
394 
395 		}
396 
397 		if (options->imix_distribution_count) {
398 			sym_op->auth.data.length =
399 				options->imix_buffer_sizes[*imix_idx];
400 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
401 		} else
402 			sym_op->auth.data.length = options->test_buffer_size;
403 
404 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
405 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
406 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
407 			sym_op->auth.data.length <<= 3;
408 
409 		sym_op->auth.data.offset = 0;
410 	}
411 
412 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
413 		if (test_vector->auth_iv.length) {
414 			for (i = 0; i < nb_ops; i++) {
415 				uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
416 						uint8_t *, iv_offset);
417 
418 				memcpy(iv_ptr, test_vector->auth_iv.data,
419 						test_vector->auth_iv.length);
420 			}
421 		}
422 	}
423 	return 0;
424 }
425 
426 static int
cperf_set_ops_cipher_auth(struct rte_crypto_op ** ops,uint32_t src_buf_offset,uint32_t dst_buf_offset,uint16_t nb_ops,struct rte_cryptodev_sym_session * sess,const struct cperf_options * options,const struct cperf_test_vector * test_vector,uint16_t iv_offset,uint32_t * imix_idx,uint64_t * tsc_start __rte_unused)427 cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
428 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
429 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
430 		const struct cperf_options *options,
431 		const struct cperf_test_vector *test_vector,
432 		uint16_t iv_offset, uint32_t *imix_idx,
433 		uint64_t *tsc_start __rte_unused)
434 {
435 	uint16_t i;
436 
437 	for (i = 0; i < nb_ops; i++) {
438 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
439 
440 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
441 		rte_crypto_op_attach_sym_session(ops[i], sess);
442 
443 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
444 							src_buf_offset);
445 
446 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
447 		if (dst_buf_offset == 0)
448 			sym_op->m_dst = NULL;
449 		else
450 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
451 							dst_buf_offset);
452 
453 		/* cipher parameters */
454 		if (options->imix_distribution_count) {
455 			sym_op->cipher.data.length =
456 				options->imix_buffer_sizes[*imix_idx];
457 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
458 		} else
459 			sym_op->cipher.data.length = options->test_buffer_size;
460 
461 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
462 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
463 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
464 			sym_op->cipher.data.length <<= 3;
465 
466 		sym_op->cipher.data.offset = 0;
467 
468 		/* authentication parameters */
469 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
470 			sym_op->auth.digest.data = test_vector->digest.data;
471 			sym_op->auth.digest.phys_addr =
472 					test_vector->digest.phys_addr;
473 		} else {
474 
475 			uint32_t offset = options->test_buffer_size;
476 			struct rte_mbuf *buf, *tbuf;
477 
478 			if (options->out_of_place) {
479 				buf = sym_op->m_dst;
480 			} else {
481 				tbuf = sym_op->m_src;
482 				while ((tbuf->next != NULL) &&
483 						(offset >= tbuf->data_len)) {
484 					offset -= tbuf->data_len;
485 					tbuf = tbuf->next;
486 				}
487 				/*
488 				 * If there is not enough room in segment,
489 				 * place the digest in the next segment
490 				 */
491 				if ((tbuf->data_len - offset) < options->digest_sz) {
492 					tbuf = tbuf->next;
493 					offset = 0;
494 				}
495 				buf = tbuf;
496 			}
497 
498 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
499 					uint8_t *, offset);
500 			sym_op->auth.digest.phys_addr =
501 					rte_pktmbuf_iova_offset(buf, offset);
502 		}
503 
504 		if (options->imix_distribution_count) {
505 			sym_op->auth.data.length =
506 				options->imix_buffer_sizes[*imix_idx];
507 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
508 		} else
509 			sym_op->auth.data.length = options->test_buffer_size;
510 
511 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
512 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
513 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
514 			sym_op->auth.data.length <<= 3;
515 
516 		sym_op->auth.data.offset = 0;
517 	}
518 
519 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
520 		for (i = 0; i < nb_ops; i++) {
521 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
522 					uint8_t *, iv_offset);
523 
524 			memcpy(iv_ptr, test_vector->cipher_iv.data,
525 					test_vector->cipher_iv.length);
526 			if (test_vector->auth_iv.length) {
527 				/*
528 				 * Copy IV after the crypto operation and
529 				 * the cipher IV
530 				 */
531 				iv_ptr += test_vector->cipher_iv.length;
532 				memcpy(iv_ptr, test_vector->auth_iv.data,
533 						test_vector->auth_iv.length);
534 			}
535 		}
536 
537 	}
538 
539 	return 0;
540 }
541 
542 static int
cperf_set_ops_aead(struct rte_crypto_op ** ops,uint32_t src_buf_offset,uint32_t dst_buf_offset,uint16_t nb_ops,struct rte_cryptodev_sym_session * sess,const struct cperf_options * options,const struct cperf_test_vector * test_vector,uint16_t iv_offset,uint32_t * imix_idx,uint64_t * tsc_start __rte_unused)543 cperf_set_ops_aead(struct rte_crypto_op **ops,
544 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
545 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
546 		const struct cperf_options *options,
547 		const struct cperf_test_vector *test_vector,
548 		uint16_t iv_offset, uint32_t *imix_idx,
549 		uint64_t *tsc_start __rte_unused)
550 {
551 	uint16_t i;
552 	/* AAD is placed after the IV */
553 	uint16_t aad_offset = iv_offset +
554 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16);
555 
556 	for (i = 0; i < nb_ops; i++) {
557 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
558 
559 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
560 		rte_crypto_op_attach_sym_session(ops[i], sess);
561 
562 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
563 							src_buf_offset);
564 
565 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
566 		if (dst_buf_offset == 0)
567 			sym_op->m_dst = NULL;
568 		else
569 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
570 							dst_buf_offset);
571 
572 		/* AEAD parameters */
573 		if (options->imix_distribution_count) {
574 			sym_op->aead.data.length =
575 				options->imix_buffer_sizes[*imix_idx];
576 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
577 		} else
578 			sym_op->aead.data.length = options->test_buffer_size;
579 		sym_op->aead.data.offset = 0;
580 
581 		sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
582 					uint8_t *, aad_offset);
583 		sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
584 					aad_offset);
585 
586 		if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
587 			sym_op->aead.digest.data = test_vector->digest.data;
588 			sym_op->aead.digest.phys_addr =
589 					test_vector->digest.phys_addr;
590 		} else {
591 
592 			uint32_t offset = sym_op->aead.data.length +
593 						sym_op->aead.data.offset;
594 			struct rte_mbuf *buf, *tbuf;
595 
596 			if (options->out_of_place) {
597 				buf = sym_op->m_dst;
598 			} else {
599 				tbuf = sym_op->m_src;
600 				while ((tbuf->next != NULL) &&
601 						(offset >= tbuf->data_len)) {
602 					offset -= tbuf->data_len;
603 					tbuf = tbuf->next;
604 				}
605 				/*
606 				 * If there is not enough room in segment,
607 				 * place the digest in the next segment
608 				 */
609 				if ((tbuf->data_len - offset) < options->digest_sz) {
610 					tbuf = tbuf->next;
611 					offset = 0;
612 				}
613 				buf = tbuf;
614 			}
615 
616 			sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
617 					uint8_t *, offset);
618 			sym_op->aead.digest.phys_addr =
619 					rte_pktmbuf_iova_offset(buf, offset);
620 		}
621 	}
622 
623 	if ((options->test == CPERF_TEST_TYPE_VERIFY) ||
624 			(options->test == CPERF_TEST_TYPE_LATENCY)) {
625 		for (i = 0; i < nb_ops; i++) {
626 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
627 					uint8_t *, iv_offset);
628 
629 			/*
630 			 * If doing AES-CCM, nonce is copied one byte
631 			 * after the start of IV field, and AAD is copied
632 			 * 18 bytes after the start of the AAD field.
633 			 */
634 			if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
635 				memcpy(iv_ptr + 1, test_vector->aead_iv.data,
636 					test_vector->aead_iv.length);
637 
638 				memcpy(ops[i]->sym->aead.aad.data + 18,
639 					test_vector->aad.data,
640 					test_vector->aad.length);
641 			} else {
642 				memcpy(iv_ptr, test_vector->aead_iv.data,
643 					test_vector->aead_iv.length);
644 
645 				memcpy(ops[i]->sym->aead.aad.data,
646 					test_vector->aad.data,
647 					test_vector->aad.length);
648 			}
649 		}
650 	}
651 
652 	return 0;
653 }
654 
655 static struct rte_cryptodev_sym_session *
create_ipsec_session(struct rte_mempool * sess_mp,struct rte_mempool * priv_mp,uint8_t dev_id,const struct cperf_options * options,const struct cperf_test_vector * test_vector,uint16_t iv_offset)656 create_ipsec_session(struct rte_mempool *sess_mp,
657 		struct rte_mempool *priv_mp,
658 		uint8_t dev_id,
659 		const struct cperf_options *options,
660 		const struct cperf_test_vector *test_vector,
661 		uint16_t iv_offset)
662 {
663 	struct rte_crypto_sym_xform xform = {0};
664 	struct rte_crypto_sym_xform auth_xform = {0};
665 
666 	if (options->aead_algo != 0) {
667 		/* Setup AEAD Parameters */
668 		xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
669 		xform.next = NULL;
670 		xform.aead.algo = options->aead_algo;
671 		xform.aead.op = options->aead_op;
672 		xform.aead.iv.offset = iv_offset;
673 		xform.aead.key.data = test_vector->aead_key.data;
674 		xform.aead.key.length = test_vector->aead_key.length;
675 		xform.aead.iv.length = test_vector->aead_iv.length;
676 		xform.aead.digest_length = options->digest_sz;
677 		xform.aead.aad_length = options->aead_aad_sz;
678 	} else if (options->cipher_algo != 0 && options->auth_algo != 0) {
679 		/* Setup Cipher Parameters */
680 		xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
681 		xform.next = NULL;
682 		xform.cipher.algo = options->cipher_algo;
683 		xform.cipher.op = options->cipher_op;
684 		xform.cipher.iv.offset = iv_offset;
685 		xform.cipher.iv.length = test_vector->cipher_iv.length;
686 		/* cipher different than null */
687 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
688 			xform.cipher.key.data = test_vector->cipher_key.data;
689 			xform.cipher.key.length =
690 				test_vector->cipher_key.length;
691 		} else {
692 			xform.cipher.key.data = NULL;
693 			xform.cipher.key.length = 0;
694 		}
695 
696 		/* Setup Auth Parameters */
697 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
698 		auth_xform.next = NULL;
699 		auth_xform.auth.algo = options->auth_algo;
700 		auth_xform.auth.op = options->auth_op;
701 		auth_xform.auth.iv.offset = iv_offset +
702 				xform.cipher.iv.length;
703 		/* auth different than null */
704 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
705 			auth_xform.auth.digest_length = options->digest_sz;
706 			auth_xform.auth.key.length =
707 						test_vector->auth_key.length;
708 			auth_xform.auth.key.data = test_vector->auth_key.data;
709 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
710 		} else {
711 			auth_xform.auth.digest_length = 0;
712 			auth_xform.auth.key.length = 0;
713 			auth_xform.auth.key.data = NULL;
714 			auth_xform.auth.iv.length = 0;
715 		}
716 
717 		xform.next = &auth_xform;
718 	} else {
719 		return NULL;
720 	}
721 
722 #define CPERF_IPSEC_SRC_IP	0x01010101
723 #define CPERF_IPSEC_DST_IP	0x02020202
724 #define CPERF_IPSEC_SALT	0x0
725 #define CPERF_IPSEC_DEFTTL	64
726 	struct rte_security_ipsec_tunnel_param tunnel = {
727 		.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4,
728 		{.ipv4 = {
729 			.src_ip = { .s_addr = CPERF_IPSEC_SRC_IP},
730 			.dst_ip = { .s_addr = CPERF_IPSEC_DST_IP},
731 			.dscp = 0,
732 			.df = 0,
733 			.ttl = CPERF_IPSEC_DEFTTL,
734 		} },
735 	};
736 	struct rte_security_session_conf sess_conf = {
737 		.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
738 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
739 		{.ipsec = {
740 			.spi = rte_lcore_id(),
741 			/**< For testing sake, lcore_id is taken as SPI so that
742 			 * for every core a different session is created.
743 			 */
744 			.salt = CPERF_IPSEC_SALT,
745 			.options = { 0 },
746 			.replay_win_sz = 0,
747 			.direction =
748 				((options->cipher_op ==
749 					RTE_CRYPTO_CIPHER_OP_ENCRYPT) &&
750 				(options->auth_op ==
751 					RTE_CRYPTO_AUTH_OP_GENERATE)) ||
752 				(options->aead_op ==
753 					RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
754 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS :
755 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
756 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
757 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
758 			.tunnel = tunnel,
759 		} },
760 		.userdata = NULL,
761 		.crypto_xform = &xform
762 	};
763 
764 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
765 				rte_cryptodev_get_sec_ctx(dev_id);
766 
767 	/* Create security session */
768 	return (void *)rte_security_session_create(ctx,
769 				&sess_conf, sess_mp, priv_mp);
770 }
771 
772 static struct rte_cryptodev_sym_session *
cperf_create_session(struct rte_mempool * sess_mp,struct rte_mempool * priv_mp,uint8_t dev_id,const struct cperf_options * options,const struct cperf_test_vector * test_vector,uint16_t iv_offset)773 cperf_create_session(struct rte_mempool *sess_mp,
774 	struct rte_mempool *priv_mp,
775 	uint8_t dev_id,
776 	const struct cperf_options *options,
777 	const struct cperf_test_vector *test_vector,
778 	uint16_t iv_offset)
779 {
780 	struct rte_crypto_sym_xform cipher_xform;
781 	struct rte_crypto_sym_xform auth_xform;
782 	struct rte_crypto_sym_xform aead_xform;
783 	struct rte_cryptodev_sym_session *sess = NULL;
784 	void *asym_sess = NULL;
785 	struct rte_crypto_asym_xform xform = {0};
786 	int ret;
787 
788 	if (options->op_type == CPERF_ASYM_MODEX) {
789 		xform.next = NULL;
790 		xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
791 		xform.modex.modulus.data = options->modex_data->modulus.data;
792 		xform.modex.modulus.length = options->modex_data->modulus.len;
793 		xform.modex.exponent.data = options->modex_data->exponent.data;
794 		xform.modex.exponent.length = options->modex_data->exponent.len;
795 
796 		ret = rte_cryptodev_asym_session_create(dev_id, &xform,
797 				sess_mp, &asym_sess);
798 		if (ret < 0) {
799 			RTE_LOG(ERR, USER1, "Asym session create failed\n");
800 			return NULL;
801 		}
802 		return asym_sess;
803 	}
804 #ifdef RTE_LIB_SECURITY
805 	/*
806 	 * security only
807 	 */
808 	if (options->op_type == CPERF_PDCP) {
809 		/* Setup Cipher Parameters */
810 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
811 		cipher_xform.next = NULL;
812 		cipher_xform.cipher.algo = options->cipher_algo;
813 		cipher_xform.cipher.op = options->cipher_op;
814 		cipher_xform.cipher.iv.offset = iv_offset;
815 		cipher_xform.cipher.iv.length = 4;
816 
817 		/* cipher different than null */
818 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
819 			cipher_xform.cipher.key.data = test_vector->cipher_key.data;
820 			cipher_xform.cipher.key.length = test_vector->cipher_key.length;
821 		} else {
822 			cipher_xform.cipher.key.data = NULL;
823 			cipher_xform.cipher.key.length = 0;
824 		}
825 
826 		/* Setup Auth Parameters */
827 		if (options->auth_algo != 0) {
828 			auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
829 			auth_xform.next = NULL;
830 			auth_xform.auth.algo = options->auth_algo;
831 			auth_xform.auth.op = options->auth_op;
832 			auth_xform.auth.iv.offset = iv_offset +
833 				cipher_xform.cipher.iv.length;
834 
835 			/* auth different than null */
836 			if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
837 				auth_xform.auth.digest_length = options->digest_sz;
838 				auth_xform.auth.key.length = test_vector->auth_key.length;
839 				auth_xform.auth.key.data = test_vector->auth_key.data;
840 				auth_xform.auth.iv.length = test_vector->auth_iv.length;
841 			} else {
842 				auth_xform.auth.digest_length = 0;
843 				auth_xform.auth.key.length = 0;
844 				auth_xform.auth.key.data = NULL;
845 				auth_xform.auth.iv.length = 0;
846 			}
847 
848 			cipher_xform.next = &auth_xform;
849 		} else {
850 			cipher_xform.next = NULL;
851 		}
852 
853 		struct rte_security_session_conf sess_conf = {
854 			.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
855 			.protocol = RTE_SECURITY_PROTOCOL_PDCP,
856 			{.pdcp = {
857 				.bearer = 0x16,
858 				.domain = options->pdcp_domain,
859 				.pkt_dir = 0,
860 				.sn_size = options->pdcp_sn_sz,
861 				.hfn = options->pdcp_ses_hfn_en ?
862 					PDCP_DEFAULT_HFN : 0,
863 				.hfn_threshold = 0x70C0A,
864 				.hfn_ovrd = !(options->pdcp_ses_hfn_en),
865 			} },
866 			.crypto_xform = &cipher_xform
867 		};
868 
869 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
870 					rte_cryptodev_get_sec_ctx(dev_id);
871 
872 		/* Create security session */
873 		return (void *)rte_security_session_create(ctx,
874 					&sess_conf, sess_mp, priv_mp);
875 	}
876 
877 	if (options->op_type == CPERF_IPSEC) {
878 		return create_ipsec_session(sess_mp, priv_mp, dev_id,
879 				options, test_vector, iv_offset);
880 	}
881 
882 	if (options->op_type == CPERF_DOCSIS) {
883 		enum rte_security_docsis_direction direction;
884 
885 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
886 		cipher_xform.next = NULL;
887 		cipher_xform.cipher.algo = options->cipher_algo;
888 		cipher_xform.cipher.op = options->cipher_op;
889 		cipher_xform.cipher.iv.offset = iv_offset;
890 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
891 			cipher_xform.cipher.key.data =
892 				test_vector->cipher_key.data;
893 			cipher_xform.cipher.key.length =
894 				test_vector->cipher_key.length;
895 			cipher_xform.cipher.iv.length =
896 				test_vector->cipher_iv.length;
897 		} else {
898 			cipher_xform.cipher.key.data = NULL;
899 			cipher_xform.cipher.key.length = 0;
900 			cipher_xform.cipher.iv.length = 0;
901 		}
902 		cipher_xform.next = NULL;
903 
904 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
905 			direction = RTE_SECURITY_DOCSIS_DOWNLINK;
906 		else
907 			direction = RTE_SECURITY_DOCSIS_UPLINK;
908 
909 		struct rte_security_session_conf sess_conf = {
910 			.action_type =
911 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
912 			.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
913 			{.docsis = {
914 				.direction = direction,
915 			} },
916 			.crypto_xform = &cipher_xform
917 		};
918 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
919 					rte_cryptodev_get_sec_ctx(dev_id);
920 
921 		/* Create security session */
922 		return (void *)rte_security_session_create(ctx,
923 					&sess_conf, sess_mp, priv_mp);
924 	}
925 #endif
926 	sess = rte_cryptodev_sym_session_create(sess_mp);
927 	/*
928 	 * cipher only
929 	 */
930 	if (options->op_type == CPERF_CIPHER_ONLY) {
931 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
932 		cipher_xform.next = NULL;
933 		cipher_xform.cipher.algo = options->cipher_algo;
934 		cipher_xform.cipher.op = options->cipher_op;
935 		cipher_xform.cipher.iv.offset = iv_offset;
936 
937 		/* cipher different than null */
938 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
939 			cipher_xform.cipher.key.data =
940 					test_vector->cipher_key.data;
941 			cipher_xform.cipher.key.length =
942 					test_vector->cipher_key.length;
943 			cipher_xform.cipher.iv.length =
944 					test_vector->cipher_iv.length;
945 		} else {
946 			cipher_xform.cipher.key.data = NULL;
947 			cipher_xform.cipher.key.length = 0;
948 			cipher_xform.cipher.iv.length = 0;
949 		}
950 		/* create crypto session */
951 		rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform,
952 				priv_mp);
953 	/*
954 	 *  auth only
955 	 */
956 	} else if (options->op_type == CPERF_AUTH_ONLY) {
957 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
958 		auth_xform.next = NULL;
959 		auth_xform.auth.algo = options->auth_algo;
960 		auth_xform.auth.op = options->auth_op;
961 		auth_xform.auth.iv.offset = iv_offset;
962 
963 		/* auth different than null */
964 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
965 			auth_xform.auth.digest_length =
966 					options->digest_sz;
967 			auth_xform.auth.key.length =
968 					test_vector->auth_key.length;
969 			auth_xform.auth.key.data = test_vector->auth_key.data;
970 			auth_xform.auth.iv.length =
971 					test_vector->auth_iv.length;
972 		} else {
973 			auth_xform.auth.digest_length = 0;
974 			auth_xform.auth.key.length = 0;
975 			auth_xform.auth.key.data = NULL;
976 			auth_xform.auth.iv.length = 0;
977 		}
978 		/* create crypto session */
979 		rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform,
980 				priv_mp);
981 	/*
982 	 * cipher and auth
983 	 */
984 	} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
985 			|| options->op_type == CPERF_AUTH_THEN_CIPHER) {
986 		/*
987 		 * cipher
988 		 */
989 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
990 		cipher_xform.next = NULL;
991 		cipher_xform.cipher.algo = options->cipher_algo;
992 		cipher_xform.cipher.op = options->cipher_op;
993 		cipher_xform.cipher.iv.offset = iv_offset;
994 
995 		/* cipher different than null */
996 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
997 			cipher_xform.cipher.key.data =
998 					test_vector->cipher_key.data;
999 			cipher_xform.cipher.key.length =
1000 					test_vector->cipher_key.length;
1001 			cipher_xform.cipher.iv.length =
1002 					test_vector->cipher_iv.length;
1003 		} else {
1004 			cipher_xform.cipher.key.data = NULL;
1005 			cipher_xform.cipher.key.length = 0;
1006 			cipher_xform.cipher.iv.length = 0;
1007 		}
1008 
1009 		/*
1010 		 * auth
1011 		 */
1012 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1013 		auth_xform.next = NULL;
1014 		auth_xform.auth.algo = options->auth_algo;
1015 		auth_xform.auth.op = options->auth_op;
1016 		auth_xform.auth.iv.offset = iv_offset +
1017 			cipher_xform.cipher.iv.length;
1018 
1019 		/* auth different than null */
1020 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
1021 			auth_xform.auth.digest_length = options->digest_sz;
1022 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
1023 			auth_xform.auth.key.length =
1024 					test_vector->auth_key.length;
1025 			auth_xform.auth.key.data =
1026 					test_vector->auth_key.data;
1027 		} else {
1028 			auth_xform.auth.digest_length = 0;
1029 			auth_xform.auth.key.length = 0;
1030 			auth_xform.auth.key.data = NULL;
1031 			auth_xform.auth.iv.length = 0;
1032 		}
1033 
1034 		/* cipher then auth */
1035 		if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
1036 			cipher_xform.next = &auth_xform;
1037 			/* create crypto session */
1038 			rte_cryptodev_sym_session_init(dev_id,
1039 					sess, &cipher_xform, priv_mp);
1040 		} else { /* auth then cipher */
1041 			auth_xform.next = &cipher_xform;
1042 			/* create crypto session */
1043 			rte_cryptodev_sym_session_init(dev_id,
1044 					sess, &auth_xform, priv_mp);
1045 		}
1046 	} else { /* options->op_type == CPERF_AEAD */
1047 		aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1048 		aead_xform.next = NULL;
1049 		aead_xform.aead.algo = options->aead_algo;
1050 		aead_xform.aead.op = options->aead_op;
1051 		aead_xform.aead.iv.offset = iv_offset;
1052 
1053 		aead_xform.aead.key.data =
1054 					test_vector->aead_key.data;
1055 		aead_xform.aead.key.length =
1056 					test_vector->aead_key.length;
1057 		aead_xform.aead.iv.length = test_vector->aead_iv.length;
1058 
1059 		aead_xform.aead.digest_length = options->digest_sz;
1060 		aead_xform.aead.aad_length =
1061 					options->aead_aad_sz;
1062 
1063 		/* Create crypto session */
1064 		rte_cryptodev_sym_session_init(dev_id,
1065 					sess, &aead_xform, priv_mp);
1066 	}
1067 
1068 	return sess;
1069 }
1070 
1071 int
cperf_get_op_functions(const struct cperf_options * options,struct cperf_op_fns * op_fns)1072 cperf_get_op_functions(const struct cperf_options *options,
1073 		struct cperf_op_fns *op_fns)
1074 {
1075 	memset(op_fns, 0, sizeof(struct cperf_op_fns));
1076 
1077 	op_fns->sess_create = cperf_create_session;
1078 
1079 	switch (options->op_type) {
1080 	case CPERF_AEAD:
1081 		op_fns->populate_ops = cperf_set_ops_aead;
1082 		break;
1083 
1084 	case CPERF_AUTH_THEN_CIPHER:
1085 	case CPERF_CIPHER_THEN_AUTH:
1086 		op_fns->populate_ops = cperf_set_ops_cipher_auth;
1087 		break;
1088 	case CPERF_AUTH_ONLY:
1089 		if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
1090 			op_fns->populate_ops = cperf_set_ops_null_auth;
1091 		else
1092 			op_fns->populate_ops = cperf_set_ops_auth;
1093 		break;
1094 	case CPERF_CIPHER_ONLY:
1095 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
1096 			op_fns->populate_ops = cperf_set_ops_null_cipher;
1097 		else
1098 			op_fns->populate_ops = cperf_set_ops_cipher;
1099 		break;
1100 	case CPERF_ASYM_MODEX:
1101 		op_fns->populate_ops = cperf_set_ops_asym;
1102 		break;
1103 #ifdef RTE_LIB_SECURITY
1104 	case CPERF_PDCP:
1105 	case CPERF_DOCSIS:
1106 		op_fns->populate_ops = cperf_set_ops_security;
1107 		break;
1108 	case CPERF_IPSEC:
1109 		op_fns->populate_ops = cperf_set_ops_security_ipsec;
1110 		break;
1111 #endif
1112 	default:
1113 		return -1;
1114 	}
1115 
1116 	return 0;
1117 }
1118