1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <rte_malloc.h>
6 #include <rte_mbuf_pool_ops.h>
7
8 #include "cperf_test_common.h"
9
10 struct obj_params {
11 uint32_t src_buf_offset;
12 uint32_t dst_buf_offset;
13 uint16_t segment_sz;
14 uint16_t headroom_sz;
15 uint16_t data_len;
16 uint16_t segments_nb;
17 };
18
19 static void
fill_single_seg_mbuf(struct rte_mbuf * m,struct rte_mempool * mp,void * obj,uint32_t mbuf_offset,uint16_t segment_sz,uint16_t headroom,uint16_t data_len)20 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
21 void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
22 uint16_t headroom, uint16_t data_len)
23 {
24 uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
25
26 /* start of buffer is after mbuf structure and priv data */
27 m->priv_size = 0;
28 m->buf_addr = (char *)m + mbuf_hdr_size;
29 m->buf_iova = rte_mempool_virt2iova(obj) +
30 mbuf_offset + mbuf_hdr_size;
31 m->buf_len = segment_sz;
32 m->data_len = data_len;
33 m->pkt_len = data_len;
34
35 /* Use headroom specified for the buffer */
36 m->data_off = headroom;
37
38 /* init some constant fields */
39 m->pool = mp;
40 m->nb_segs = 1;
41 m->port = 0xff;
42 rte_mbuf_refcnt_set(m, 1);
43 m->next = NULL;
44 }
45
46 static void
fill_multi_seg_mbuf(struct rte_mbuf * m,struct rte_mempool * mp,void * obj,uint32_t mbuf_offset,uint16_t segment_sz,uint16_t headroom,uint16_t data_len,uint16_t segments_nb)47 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
48 void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
49 uint16_t headroom, uint16_t data_len, uint16_t segments_nb)
50 {
51 uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
52 uint16_t remaining_segments = segments_nb;
53 struct rte_mbuf *next_mbuf;
54 rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
55 mbuf_offset + mbuf_hdr_size;
56
57 do {
58 /* start of buffer is after mbuf structure and priv data */
59 m->priv_size = 0;
60 m->buf_addr = (char *)m + mbuf_hdr_size;
61 m->buf_iova = next_seg_phys_addr;
62 next_seg_phys_addr += mbuf_hdr_size + segment_sz;
63 m->buf_len = segment_sz;
64 m->data_len = data_len;
65
66 /* Use headroom specified for the buffer */
67 m->data_off = headroom;
68
69 /* init some constant fields */
70 m->pool = mp;
71 m->nb_segs = segments_nb;
72 m->port = 0xff;
73 rte_mbuf_refcnt_set(m, 1);
74 next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
75 mbuf_hdr_size + segment_sz);
76 m->next = next_mbuf;
77 m = next_mbuf;
78 remaining_segments--;
79
80 } while (remaining_segments > 0);
81
82 m->next = NULL;
83 }
84
85 static void
mempool_asym_obj_init(struct rte_mempool * mp,__rte_unused void * opaque_arg,void * obj,__rte_unused unsigned int i)86 mempool_asym_obj_init(struct rte_mempool *mp, __rte_unused void *opaque_arg,
87 void *obj, __rte_unused unsigned int i)
88 {
89 struct rte_crypto_op *op = obj;
90
91 /* Set crypto operation */
92 op->type = RTE_CRYPTO_OP_TYPE_ASYMMETRIC;
93 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
94 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
95 op->phys_addr = rte_mem_virt2iova(obj);
96 op->mempool = mp;
97 }
98
99 static void
mempool_obj_init(struct rte_mempool * mp,void * opaque_arg,void * obj,__rte_unused unsigned int i)100 mempool_obj_init(struct rte_mempool *mp,
101 void *opaque_arg,
102 void *obj,
103 __rte_unused unsigned int i)
104 {
105 struct obj_params *params = opaque_arg;
106 struct rte_crypto_op *op = obj;
107 struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
108 params->src_buf_offset);
109 /* Set crypto operation */
110 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
111 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
112 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
113 op->phys_addr = rte_mem_virt2iova(obj);
114 op->mempool = mp;
115
116 /* Set source buffer */
117 op->sym->m_src = m;
118 if (params->segments_nb == 1)
119 fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
120 params->segment_sz, params->headroom_sz,
121 params->data_len);
122 else
123 fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
124 params->segment_sz, params->headroom_sz,
125 params->data_len, params->segments_nb);
126
127
128 /* Set destination buffer */
129 if (params->dst_buf_offset) {
130 m = (struct rte_mbuf *) ((uint8_t *) obj +
131 params->dst_buf_offset);
132 fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
133 params->segment_sz, params->headroom_sz,
134 params->data_len);
135 op->sym->m_dst = m;
136 } else
137 op->sym->m_dst = NULL;
138 }
139
140 int
cperf_alloc_common_memory(const struct cperf_options * options,const struct cperf_test_vector * test_vector,uint8_t dev_id,uint16_t qp_id,size_t extra_op_priv_size,uint32_t * src_buf_offset,uint32_t * dst_buf_offset,struct rte_mempool ** pool)141 cperf_alloc_common_memory(const struct cperf_options *options,
142 const struct cperf_test_vector *test_vector,
143 uint8_t dev_id, uint16_t qp_id,
144 size_t extra_op_priv_size,
145 uint32_t *src_buf_offset,
146 uint32_t *dst_buf_offset,
147 struct rte_mempool **pool)
148 {
149 const char *mp_ops_name;
150 char pool_name[32] = "";
151 int ret;
152
153 /* Calculate the object size */
154 uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
155 sizeof(struct rte_crypto_sym_op);
156 uint16_t crypto_op_private_size;
157
158 if (options->op_type == CPERF_ASYM_MODEX) {
159 snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_op_pool%u",
160 rte_socket_id());
161 *pool = rte_crypto_op_pool_create(
162 pool_name, RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
163 options->pool_sz, RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
164 rte_socket_id());
165 if (*pool == NULL) {
166 RTE_LOG(ERR, USER1,
167 "Cannot allocate mempool for device %u\n",
168 dev_id);
169 return -1;
170 }
171 rte_mempool_obj_iter(*pool, mempool_asym_obj_init, NULL);
172 return 0;
173 }
174
175 /*
176 * If doing AES-CCM, IV field needs to be 16 bytes long,
177 * and AAD field needs to be long enough to have 18 bytes,
178 * plus the length of the AAD, and all rounded to a
179 * multiple of 16 bytes.
180 */
181 if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
182 crypto_op_private_size = extra_op_priv_size +
183 test_vector->cipher_iv.length +
184 test_vector->auth_iv.length +
185 RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) +
186 RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16);
187 } else {
188 crypto_op_private_size = extra_op_priv_size +
189 test_vector->cipher_iv.length +
190 test_vector->auth_iv.length +
191 test_vector->aead_iv.length +
192 options->aead_aad_sz;
193 }
194
195 uint16_t crypto_op_total_size = crypto_op_size +
196 crypto_op_private_size;
197 uint16_t crypto_op_total_size_padded =
198 RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
199 uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
200 uint32_t max_size = options->max_buffer_size + options->digest_sz;
201 uint16_t segments_nb = (max_size % options->segment_sz) ?
202 (max_size / options->segment_sz) + 1 :
203 max_size / options->segment_sz;
204 uint32_t obj_size = crypto_op_total_size_padded +
205 (mbuf_size * segments_nb);
206
207 snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
208 dev_id, qp_id);
209
210 *src_buf_offset = crypto_op_total_size_padded;
211
212 struct obj_params params = {
213 .segment_sz = options->segment_sz,
214 .headroom_sz = options->headroom_sz,
215 /* Data len = segment size - (headroom + tailroom) */
216 .data_len = options->segment_sz -
217 options->headroom_sz -
218 options->tailroom_sz,
219 .segments_nb = segments_nb,
220 .src_buf_offset = crypto_op_total_size_padded,
221 .dst_buf_offset = 0
222 };
223
224 if (options->out_of_place) {
225 *dst_buf_offset = *src_buf_offset +
226 (mbuf_size * segments_nb);
227 params.dst_buf_offset = *dst_buf_offset;
228 /* Destination buffer will be one segment only */
229 obj_size += max_size + sizeof(struct rte_mbuf);
230 }
231
232 *pool = rte_mempool_create_empty(pool_name,
233 options->pool_sz, obj_size, 512, 0,
234 rte_socket_id(), 0);
235 if (*pool == NULL) {
236 RTE_LOG(ERR, USER1,
237 "Cannot allocate mempool for device %u\n",
238 dev_id);
239 return -1;
240 }
241
242 mp_ops_name = rte_mbuf_best_mempool_ops();
243
244 ret = rte_mempool_set_ops_byname(*pool,
245 mp_ops_name, NULL);
246 if (ret != 0) {
247 RTE_LOG(ERR, USER1,
248 "Error setting mempool handler for device %u\n",
249 dev_id);
250 return -1;
251 }
252
253 ret = rte_mempool_populate_default(*pool);
254 if (ret < 0) {
255 RTE_LOG(ERR, USER1,
256 "Error populating mempool for device %u\n",
257 dev_id);
258 return -1;
259 }
260
261 rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)¶ms);
262
263 return 0;
264 }
265
266 void
cperf_mbuf_set(struct rte_mbuf * mbuf,const struct cperf_options * options,const struct cperf_test_vector * test_vector)267 cperf_mbuf_set(struct rte_mbuf *mbuf,
268 const struct cperf_options *options,
269 const struct cperf_test_vector *test_vector)
270 {
271 uint32_t segment_sz = options->segment_sz;
272 uint8_t *mbuf_data;
273 uint8_t *test_data;
274 uint32_t remaining_bytes = options->max_buffer_size;
275
276 if (options->op_type == CPERF_AEAD) {
277 test_data = (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
278 test_vector->plaintext.data :
279 test_vector->ciphertext.data;
280 } else {
281 test_data =
282 (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
283 test_vector->plaintext.data :
284 test_vector->ciphertext.data;
285 }
286
287 while (remaining_bytes) {
288 mbuf_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
289
290 if (remaining_bytes <= segment_sz) {
291 memcpy(mbuf_data, test_data, remaining_bytes);
292 return;
293 }
294
295 memcpy(mbuf_data, test_data, segment_sz);
296 remaining_bytes -= segment_sz;
297 test_data += segment_sz;
298 mbuf = mbuf->next;
299 }
300 }
301