1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
3 */
4 #include <rte_malloc.h>
5 #include <rte_hash.h>
6 #include <rte_jhash.h>
7 #include <rte_mbuf.h>
8 #include <rte_cryptodev.h>
9
10 #include "rte_vhost_crypto.h"
11 #include "vhost.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
14
15 #define INHDR_LEN (sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
17 sizeof(struct rte_crypto_sym_op))
18
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...) \
21 RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n", \
22 "Vhost-Crypto", __func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...) \
24 RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n", \
25 "Vhost-Crypto", __func__, __LINE__, ## args)
26
27 #define VC_LOG_DBG(fmt, args...) \
28 RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n", \
29 "Vhost-Crypto", __func__, __LINE__, ## args)
30 #else
31 #define VC_LOG_ERR(fmt, args...) \
32 RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...) \
34 RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
36 #endif
37
38 #define VIRTIO_CRYPTO_FEATURES ((1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
39 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
40 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
41 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
42 (1ULL << VIRTIO_F_VERSION_1) | \
43 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
44
45 #define IOVA_TO_VVA(t, r, a, l, p) \
46 ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
47
48 /*
49 * vhost_crypto_desc is used to copy original vring_desc to the local buffer
50 * before processing (except the next index). The copy result will be an
51 * array of vhost_crypto_desc elements that follows the sequence of original
52 * vring_desc.next is arranged.
53 */
54 #define vhost_crypto_desc vring_desc
55
56 static int
cipher_algo_transform(uint32_t virtio_cipher_algo,enum rte_crypto_cipher_algorithm * algo)57 cipher_algo_transform(uint32_t virtio_cipher_algo,
58 enum rte_crypto_cipher_algorithm *algo)
59 {
60 switch (virtio_cipher_algo) {
61 case VIRTIO_CRYPTO_CIPHER_AES_CBC:
62 *algo = RTE_CRYPTO_CIPHER_AES_CBC;
63 break;
64 case VIRTIO_CRYPTO_CIPHER_AES_CTR:
65 *algo = RTE_CRYPTO_CIPHER_AES_CTR;
66 break;
67 case VIRTIO_CRYPTO_CIPHER_DES_ECB:
68 *algo = -VIRTIO_CRYPTO_NOTSUPP;
69 break;
70 case VIRTIO_CRYPTO_CIPHER_DES_CBC:
71 *algo = RTE_CRYPTO_CIPHER_DES_CBC;
72 break;
73 case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
74 *algo = RTE_CRYPTO_CIPHER_3DES_ECB;
75 break;
76 case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
77 *algo = RTE_CRYPTO_CIPHER_3DES_CBC;
78 break;
79 case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
80 *algo = RTE_CRYPTO_CIPHER_3DES_CTR;
81 break;
82 case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
83 *algo = RTE_CRYPTO_CIPHER_KASUMI_F8;
84 break;
85 case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
86 *algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
87 break;
88 case VIRTIO_CRYPTO_CIPHER_AES_F8:
89 *algo = RTE_CRYPTO_CIPHER_AES_F8;
90 break;
91 case VIRTIO_CRYPTO_CIPHER_AES_XTS:
92 *algo = RTE_CRYPTO_CIPHER_AES_XTS;
93 break;
94 case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
95 *algo = RTE_CRYPTO_CIPHER_ZUC_EEA3;
96 break;
97 default:
98 return -VIRTIO_CRYPTO_BADMSG;
99 break;
100 }
101
102 return 0;
103 }
104
105 static int
auth_algo_transform(uint32_t virtio_auth_algo,enum rte_crypto_auth_algorithm * algo)106 auth_algo_transform(uint32_t virtio_auth_algo,
107 enum rte_crypto_auth_algorithm *algo)
108 {
109 switch (virtio_auth_algo) {
110 case VIRTIO_CRYPTO_NO_MAC:
111 *algo = RTE_CRYPTO_AUTH_NULL;
112 break;
113 case VIRTIO_CRYPTO_MAC_HMAC_MD5:
114 *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
115 break;
116 case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
117 *algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
118 break;
119 case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
120 *algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
121 break;
122 case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
123 *algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
124 break;
125 case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
126 *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
127 break;
128 case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
129 *algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
130 break;
131 case VIRTIO_CRYPTO_MAC_CMAC_AES:
132 *algo = RTE_CRYPTO_AUTH_AES_CMAC;
133 break;
134 case VIRTIO_CRYPTO_MAC_KASUMI_F9:
135 *algo = RTE_CRYPTO_AUTH_KASUMI_F9;
136 break;
137 case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
138 *algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
139 break;
140 case VIRTIO_CRYPTO_MAC_GMAC_AES:
141 *algo = RTE_CRYPTO_AUTH_AES_GMAC;
142 break;
143 case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
144 *algo = RTE_CRYPTO_AUTH_AES_CBC_MAC;
145 break;
146 case VIRTIO_CRYPTO_MAC_XCBC_AES:
147 *algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
148 break;
149 case VIRTIO_CRYPTO_MAC_CMAC_3DES:
150 case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
151 case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
152 return -VIRTIO_CRYPTO_NOTSUPP;
153 default:
154 return -VIRTIO_CRYPTO_BADMSG;
155 }
156
157 return 0;
158 }
159
get_iv_len(enum rte_crypto_cipher_algorithm algo)160 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
161 {
162 int len;
163
164 switch (algo) {
165 case RTE_CRYPTO_CIPHER_3DES_CBC:
166 len = 8;
167 break;
168 case RTE_CRYPTO_CIPHER_3DES_CTR:
169 len = 8;
170 break;
171 case RTE_CRYPTO_CIPHER_3DES_ECB:
172 len = 8;
173 break;
174 case RTE_CRYPTO_CIPHER_AES_CBC:
175 len = 16;
176 break;
177
178 /* TODO: add common algos */
179
180 default:
181 len = -1;
182 break;
183 }
184
185 return len;
186 }
187
188 /**
189 * vhost_crypto struct is used to maintain a number of virtio_cryptos and
190 * one DPDK crypto device that deals with all crypto workloads. It is declared
191 * here and defined in vhost_crypto.c
192 */
193 struct vhost_crypto {
194 /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
195 * session ID.
196 */
197 struct rte_hash *session_map;
198 struct rte_mempool *mbuf_pool;
199 struct rte_mempool *sess_pool;
200 struct rte_mempool *sess_priv_pool;
201 struct rte_mempool *wb_pool;
202
203 /** DPDK cryptodev ID */
204 uint8_t cid;
205 uint16_t nb_qps;
206
207 uint64_t last_session_id;
208
209 uint64_t cache_session_id;
210 struct rte_cryptodev_sym_session *cache_session;
211 /** socket id for the device */
212 int socket_id;
213
214 struct virtio_net *dev;
215
216 uint8_t option;
217 } __rte_cache_aligned;
218
219 struct vhost_crypto_writeback_data {
220 uint8_t *src;
221 uint8_t *dst;
222 uint64_t len;
223 struct vhost_crypto_writeback_data *next;
224 };
225
226 struct vhost_crypto_data_req {
227 struct vring_desc *head;
228 struct virtio_net *dev;
229 struct virtio_crypto_inhdr *inhdr;
230 struct vhost_virtqueue *vq;
231 struct vhost_crypto_writeback_data *wb;
232 struct rte_mempool *wb_pool;
233 uint16_t desc_idx;
234 uint16_t len;
235 uint16_t zero_copy;
236 };
237
238 static int
transform_cipher_param(struct rte_crypto_sym_xform * xform,VhostUserCryptoSessionParam * param)239 transform_cipher_param(struct rte_crypto_sym_xform *xform,
240 VhostUserCryptoSessionParam *param)
241 {
242 int ret;
243
244 ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo);
245 if (unlikely(ret < 0))
246 return ret;
247
248 if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
249 VC_LOG_DBG("Invalid cipher key length\n");
250 return -VIRTIO_CRYPTO_BADMSG;
251 }
252
253 xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
254 xform->cipher.key.length = param->cipher_key_len;
255 if (xform->cipher.key.length > 0)
256 xform->cipher.key.data = param->cipher_key_buf;
257 if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
258 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
259 else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
260 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
261 else {
262 VC_LOG_DBG("Bad operation type");
263 return -VIRTIO_CRYPTO_BADMSG;
264 }
265
266 ret = get_iv_len(xform->cipher.algo);
267 if (unlikely(ret < 0))
268 return ret;
269 xform->cipher.iv.length = (uint16_t)ret;
270 xform->cipher.iv.offset = IV_OFFSET;
271 return 0;
272 }
273
274 static int
transform_chain_param(struct rte_crypto_sym_xform * xforms,VhostUserCryptoSessionParam * param)275 transform_chain_param(struct rte_crypto_sym_xform *xforms,
276 VhostUserCryptoSessionParam *param)
277 {
278 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
279 int ret;
280
281 switch (param->chaining_dir) {
282 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
283 xform_auth = xforms;
284 xform_cipher = xforms->next;
285 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
286 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
287 break;
288 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
289 xform_cipher = xforms;
290 xform_auth = xforms->next;
291 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
292 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
293 break;
294 default:
295 return -VIRTIO_CRYPTO_BADMSG;
296 }
297
298 /* cipher */
299 ret = cipher_algo_transform(param->cipher_algo,
300 &xform_cipher->cipher.algo);
301 if (unlikely(ret < 0))
302 return ret;
303
304 if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
305 VC_LOG_DBG("Invalid cipher key length\n");
306 return -VIRTIO_CRYPTO_BADMSG;
307 }
308
309 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
310 xform_cipher->cipher.key.length = param->cipher_key_len;
311 xform_cipher->cipher.key.data = param->cipher_key_buf;
312 ret = get_iv_len(xform_cipher->cipher.algo);
313 if (unlikely(ret < 0))
314 return ret;
315 xform_cipher->cipher.iv.length = (uint16_t)ret;
316 xform_cipher->cipher.iv.offset = IV_OFFSET;
317
318 /* auth */
319 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
320 ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo);
321 if (unlikely(ret < 0))
322 return ret;
323
324 if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) {
325 VC_LOG_DBG("Invalid auth key length\n");
326 return -VIRTIO_CRYPTO_BADMSG;
327 }
328
329 xform_auth->auth.digest_length = param->digest_len;
330 xform_auth->auth.key.length = param->auth_key_len;
331 xform_auth->auth.key.data = param->auth_key_buf;
332
333 return 0;
334 }
335
336 static void
vhost_crypto_create_sess(struct vhost_crypto * vcrypto,VhostUserCryptoSessionParam * sess_param)337 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
338 VhostUserCryptoSessionParam *sess_param)
339 {
340 struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
341 struct rte_cryptodev_sym_session *session;
342 int ret;
343
344 switch (sess_param->op_type) {
345 case VIRTIO_CRYPTO_SYM_OP_NONE:
346 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
347 ret = transform_cipher_param(&xform1, sess_param);
348 if (unlikely(ret)) {
349 VC_LOG_ERR("Error transform session msg (%i)", ret);
350 sess_param->session_id = ret;
351 return;
352 }
353 break;
354 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
355 if (unlikely(sess_param->hash_mode !=
356 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
357 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
358 VC_LOG_ERR("Error transform session message (%i)",
359 -VIRTIO_CRYPTO_NOTSUPP);
360 return;
361 }
362
363 xform1.next = &xform2;
364
365 ret = transform_chain_param(&xform1, sess_param);
366 if (unlikely(ret)) {
367 VC_LOG_ERR("Error transform session message (%i)", ret);
368 sess_param->session_id = ret;
369 return;
370 }
371
372 break;
373 default:
374 VC_LOG_ERR("Algorithm not yet supported");
375 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
376 return;
377 }
378
379 session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
380 if (!session) {
381 VC_LOG_ERR("Failed to create session");
382 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
383 return;
384 }
385
386 if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
387 vcrypto->sess_priv_pool) < 0) {
388 VC_LOG_ERR("Failed to initialize session");
389 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
390 return;
391 }
392
393 /* insert hash to map */
394 if (rte_hash_add_key_data(vcrypto->session_map,
395 &vcrypto->last_session_id, session) < 0) {
396 VC_LOG_ERR("Failed to insert session to hash table");
397
398 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
399 VC_LOG_ERR("Failed to clear session");
400 else {
401 if (rte_cryptodev_sym_session_free(session) < 0)
402 VC_LOG_ERR("Failed to free session");
403 }
404 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
405 return;
406 }
407
408 VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
409 vcrypto->last_session_id, vcrypto->dev->vid);
410
411 sess_param->session_id = vcrypto->last_session_id;
412 vcrypto->last_session_id++;
413 }
414
415 static int
vhost_crypto_close_sess(struct vhost_crypto * vcrypto,uint64_t session_id)416 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
417 {
418 struct rte_cryptodev_sym_session *session;
419 uint64_t sess_id = session_id;
420 int ret;
421
422 ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
423 (void **)&session);
424
425 if (unlikely(ret < 0)) {
426 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
427 return -VIRTIO_CRYPTO_INVSESS;
428 }
429
430 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
431 VC_LOG_DBG("Failed to clear session");
432 return -VIRTIO_CRYPTO_ERR;
433 }
434
435 if (rte_cryptodev_sym_session_free(session) < 0) {
436 VC_LOG_DBG("Failed to free session");
437 return -VIRTIO_CRYPTO_ERR;
438 }
439
440 if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
441 VC_LOG_DBG("Failed to delete session from hash table.");
442 return -VIRTIO_CRYPTO_ERR;
443 }
444
445 VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
446 vcrypto->dev->vid);
447
448 return 0;
449 }
450
451 static enum rte_vhost_msg_result
vhost_crypto_msg_post_handler(int vid,void * msg)452 vhost_crypto_msg_post_handler(int vid, void *msg)
453 {
454 struct virtio_net *dev = get_device(vid);
455 struct vhost_crypto *vcrypto;
456 struct vhu_msg_context *ctx = msg;
457 enum rte_vhost_msg_result ret = RTE_VHOST_MSG_RESULT_OK;
458
459 if (dev == NULL) {
460 VC_LOG_ERR("Invalid vid %i", vid);
461 return RTE_VHOST_MSG_RESULT_ERR;
462 }
463
464 vcrypto = dev->extern_data;
465 if (vcrypto == NULL) {
466 VC_LOG_ERR("Cannot find required data, is it initialized?");
467 return RTE_VHOST_MSG_RESULT_ERR;
468 }
469
470 switch (ctx->msg.request.master) {
471 case VHOST_USER_CRYPTO_CREATE_SESS:
472 vhost_crypto_create_sess(vcrypto,
473 &ctx->msg.payload.crypto_session);
474 ctx->fd_num = 0;
475 ret = RTE_VHOST_MSG_RESULT_REPLY;
476 break;
477 case VHOST_USER_CRYPTO_CLOSE_SESS:
478 if (vhost_crypto_close_sess(vcrypto, ctx->msg.payload.u64))
479 ret = RTE_VHOST_MSG_RESULT_ERR;
480 break;
481 default:
482 ret = RTE_VHOST_MSG_RESULT_NOT_HANDLED;
483 break;
484 }
485
486 return ret;
487 }
488
489 static __rte_always_inline struct vhost_crypto_desc *
find_write_desc(struct vhost_crypto_desc * head,struct vhost_crypto_desc * desc,uint32_t max_n_descs)490 find_write_desc(struct vhost_crypto_desc *head, struct vhost_crypto_desc *desc,
491 uint32_t max_n_descs)
492 {
493 if (desc < head)
494 return NULL;
495
496 while (desc - head < (int)max_n_descs) {
497 if (desc->flags & VRING_DESC_F_WRITE)
498 return desc;
499 desc++;
500 }
501
502 return NULL;
503 }
504
505 static __rte_always_inline struct virtio_crypto_inhdr *
reach_inhdr(struct vhost_crypto_data_req * vc_req,struct vhost_crypto_desc * head,uint32_t max_n_descs)506 reach_inhdr(struct vhost_crypto_data_req *vc_req,
507 struct vhost_crypto_desc *head,
508 uint32_t max_n_descs)
509 {
510 struct virtio_crypto_inhdr *inhdr;
511 struct vhost_crypto_desc *last = head + (max_n_descs - 1);
512 uint64_t dlen = last->len;
513
514 if (unlikely(dlen != sizeof(*inhdr)))
515 return NULL;
516
517 inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, last->addr,
518 &dlen, VHOST_ACCESS_WO);
519 if (unlikely(!inhdr || dlen != last->len))
520 return NULL;
521
522 return inhdr;
523 }
524
525 static __rte_always_inline int
move_desc(struct vhost_crypto_desc * head,struct vhost_crypto_desc ** cur_desc,uint32_t size,uint32_t max_n_descs)526 move_desc(struct vhost_crypto_desc *head,
527 struct vhost_crypto_desc **cur_desc,
528 uint32_t size, uint32_t max_n_descs)
529 {
530 struct vhost_crypto_desc *desc = *cur_desc;
531 int left = size - desc->len;
532
533 while (desc->flags & VRING_DESC_F_NEXT && left > 0 &&
534 desc >= head &&
535 desc - head < (int)max_n_descs) {
536 desc++;
537 left -= desc->len;
538 }
539
540 if (unlikely(left > 0))
541 return -1;
542
543 if (unlikely(head - desc == (int)max_n_descs))
544 *cur_desc = NULL;
545 else
546 *cur_desc = desc + 1;
547
548 return 0;
549 }
550
551 static __rte_always_inline void *
get_data_ptr(struct vhost_crypto_data_req * vc_req,struct vhost_crypto_desc * cur_desc,uint8_t perm)552 get_data_ptr(struct vhost_crypto_data_req *vc_req,
553 struct vhost_crypto_desc *cur_desc,
554 uint8_t perm)
555 {
556 void *data;
557 uint64_t dlen = cur_desc->len;
558
559 data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
560 if (unlikely(!data || dlen != cur_desc->len)) {
561 VC_LOG_ERR("Failed to map object");
562 return NULL;
563 }
564
565 return data;
566 }
567
568 static __rte_always_inline int
copy_data(void * dst_data,struct vhost_crypto_data_req * vc_req,struct vhost_crypto_desc * head,struct vhost_crypto_desc ** cur_desc,uint32_t size,uint32_t max_n_descs)569 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
570 struct vhost_crypto_desc *head,
571 struct vhost_crypto_desc **cur_desc,
572 uint32_t size, uint32_t max_n_descs)
573 {
574 struct vhost_crypto_desc *desc = *cur_desc;
575 uint64_t remain, addr, dlen, len;
576 uint32_t to_copy;
577 uint8_t *data = dst_data;
578 uint8_t *src;
579 int left = size;
580
581 to_copy = RTE_MIN(desc->len, (uint32_t)left);
582 dlen = to_copy;
583 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
584 VHOST_ACCESS_RO);
585 if (unlikely(!src || !dlen))
586 return -1;
587
588 rte_memcpy((uint8_t *)data, src, dlen);
589 data += dlen;
590
591 if (unlikely(dlen < to_copy)) {
592 remain = to_copy - dlen;
593 addr = desc->addr + dlen;
594
595 while (remain) {
596 len = remain;
597 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
598 VHOST_ACCESS_RO);
599 if (unlikely(!src || !len)) {
600 VC_LOG_ERR("Failed to map descriptor");
601 return -1;
602 }
603
604 rte_memcpy(data, src, len);
605 addr += len;
606 remain -= len;
607 data += len;
608 }
609 }
610
611 left -= to_copy;
612
613 while (desc >= head && desc - head < (int)max_n_descs && left) {
614 desc++;
615 to_copy = RTE_MIN(desc->len, (uint32_t)left);
616 dlen = to_copy;
617 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
618 VHOST_ACCESS_RO);
619 if (unlikely(!src || !dlen)) {
620 VC_LOG_ERR("Failed to map descriptor");
621 return -1;
622 }
623
624 rte_memcpy(data, src, dlen);
625 data += dlen;
626
627 if (unlikely(dlen < to_copy)) {
628 remain = to_copy - dlen;
629 addr = desc->addr + dlen;
630
631 while (remain) {
632 len = remain;
633 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
634 VHOST_ACCESS_RO);
635 if (unlikely(!src || !len)) {
636 VC_LOG_ERR("Failed to map descriptor");
637 return -1;
638 }
639
640 rte_memcpy(data, src, len);
641 addr += len;
642 remain -= len;
643 data += len;
644 }
645 }
646
647 left -= to_copy;
648 }
649
650 if (unlikely(left > 0)) {
651 VC_LOG_ERR("Incorrect virtio descriptor");
652 return -1;
653 }
654
655 if (unlikely(desc - head == (int)max_n_descs))
656 *cur_desc = NULL;
657 else
658 *cur_desc = desc + 1;
659
660 return 0;
661 }
662
663 static void
write_back_data(struct vhost_crypto_data_req * vc_req)664 write_back_data(struct vhost_crypto_data_req *vc_req)
665 {
666 struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
667
668 while (wb_data) {
669 rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
670 memset(wb_data->src, 0, wb_data->len);
671 wb_last = wb_data;
672 wb_data = wb_data->next;
673 rte_mempool_put(vc_req->wb_pool, wb_last);
674 }
675 }
676
677 static void
free_wb_data(struct vhost_crypto_writeback_data * wb_data,struct rte_mempool * mp)678 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
679 struct rte_mempool *mp)
680 {
681 while (wb_data->next != NULL)
682 free_wb_data(wb_data->next, mp);
683
684 rte_mempool_put(mp, wb_data);
685 }
686
687 /**
688 * The function will allocate a vhost_crypto_writeback_data linked list
689 * containing the source and destination data pointers for the write back
690 * operation after dequeued from Cryptodev PMD queues.
691 *
692 * @param vc_req
693 * The vhost crypto data request pointer
694 * @param cur_desc
695 * The pointer of the current in use descriptor pointer. The content of
696 * cur_desc is expected to be updated after the function execution.
697 * @param end_wb_data
698 * The last write back data element to be returned. It is used only in cipher
699 * and hash chain operations.
700 * @param src
701 * The source data pointer
702 * @param offset
703 * The offset to both source and destination data. For source data the offset
704 * is the number of bytes between src and start point of cipher operation. For
705 * destination data the offset is the number of bytes from *cur_desc->addr
706 * to the point where the src will be written to.
707 * @param write_back_len
708 * The size of the write back length.
709 * @return
710 * The pointer to the start of the write back data linked list.
711 */
712 static __rte_always_inline struct vhost_crypto_writeback_data *
prepare_write_back_data(struct vhost_crypto_data_req * vc_req,struct vhost_crypto_desc * head_desc,struct vhost_crypto_desc ** cur_desc,struct vhost_crypto_writeback_data ** end_wb_data,uint8_t * src,uint32_t offset,uint64_t write_back_len,uint32_t max_n_descs)713 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
714 struct vhost_crypto_desc *head_desc,
715 struct vhost_crypto_desc **cur_desc,
716 struct vhost_crypto_writeback_data **end_wb_data,
717 uint8_t *src,
718 uint32_t offset,
719 uint64_t write_back_len,
720 uint32_t max_n_descs)
721 {
722 struct vhost_crypto_writeback_data *wb_data, *head;
723 struct vhost_crypto_desc *desc = *cur_desc;
724 uint64_t dlen;
725 uint8_t *dst;
726 int ret;
727
728 ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
729 if (unlikely(ret < 0)) {
730 VC_LOG_ERR("no memory");
731 goto error_exit;
732 }
733
734 wb_data = head;
735
736 if (likely(desc->len > offset)) {
737 wb_data->src = src + offset;
738 dlen = desc->len;
739 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
740 &dlen, VHOST_ACCESS_RW);
741 if (unlikely(!dst || dlen != desc->len)) {
742 VC_LOG_ERR("Failed to map descriptor");
743 goto error_exit;
744 }
745
746 wb_data->dst = dst + offset;
747 wb_data->len = RTE_MIN(dlen - offset, write_back_len);
748 write_back_len -= wb_data->len;
749 src += offset + wb_data->len;
750 offset = 0;
751
752 if (unlikely(write_back_len)) {
753 ret = rte_mempool_get(vc_req->wb_pool,
754 (void **)&(wb_data->next));
755 if (unlikely(ret < 0)) {
756 VC_LOG_ERR("no memory");
757 goto error_exit;
758 }
759
760 wb_data = wb_data->next;
761 } else
762 wb_data->next = NULL;
763 } else
764 offset -= desc->len;
765
766 while (write_back_len &&
767 desc >= head_desc &&
768 desc - head_desc < (int)max_n_descs) {
769 desc++;
770 if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
771 VC_LOG_ERR("incorrect descriptor");
772 goto error_exit;
773 }
774
775 if (desc->len <= offset) {
776 offset -= desc->len;
777 continue;
778 }
779
780 dlen = desc->len;
781 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
782 VHOST_ACCESS_RW) + offset;
783 if (unlikely(dst == NULL || dlen != desc->len)) {
784 VC_LOG_ERR("Failed to map descriptor");
785 goto error_exit;
786 }
787
788 wb_data->src = src + offset;
789 wb_data->dst = dst;
790 wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
791 write_back_len -= wb_data->len;
792 src += wb_data->len;
793 offset = 0;
794
795 if (write_back_len) {
796 ret = rte_mempool_get(vc_req->wb_pool,
797 (void **)&(wb_data->next));
798 if (unlikely(ret < 0)) {
799 VC_LOG_ERR("no memory");
800 goto error_exit;
801 }
802
803 wb_data = wb_data->next;
804 } else
805 wb_data->next = NULL;
806 }
807
808 if (unlikely(desc - head_desc == (int)max_n_descs))
809 *cur_desc = NULL;
810 else
811 *cur_desc = desc + 1;
812
813 *end_wb_data = wb_data;
814
815 return head;
816
817 error_exit:
818 if (head)
819 free_wb_data(head, vc_req->wb_pool);
820
821 return NULL;
822 }
823
824 static __rte_always_inline uint8_t
vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req * req)825 vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req)
826 {
827 if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
828 (req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
829 (req->para.dst_data_len >= req->para.src_data_len) &&
830 (req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE)))
831 return VIRTIO_CRYPTO_OK;
832 return VIRTIO_CRYPTO_BADMSG;
833 }
834
835 static __rte_always_inline uint8_t
prepare_sym_cipher_op(struct vhost_crypto * vcrypto,struct rte_crypto_op * op,struct vhost_crypto_data_req * vc_req,struct virtio_crypto_cipher_data_req * cipher,struct vhost_crypto_desc * head,uint32_t max_n_descs)836 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
837 struct vhost_crypto_data_req *vc_req,
838 struct virtio_crypto_cipher_data_req *cipher,
839 struct vhost_crypto_desc *head,
840 uint32_t max_n_descs)
841 {
842 struct vhost_crypto_desc *desc = head;
843 struct vhost_crypto_writeback_data *ewb = NULL;
844 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
845 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
846 uint8_t ret = vhost_crypto_check_cipher_request(cipher);
847
848 if (unlikely(ret != VIRTIO_CRYPTO_OK))
849 goto error_exit;
850
851 /* prepare */
852 /* iv */
853 if (unlikely(copy_data(iv_data, vc_req, head, &desc,
854 cipher->para.iv_len, max_n_descs))) {
855 ret = VIRTIO_CRYPTO_BADMSG;
856 goto error_exit;
857 }
858
859 switch (vcrypto->option) {
860 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
861 m_src->data_len = cipher->para.src_data_len;
862 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
863 cipher->para.src_data_len);
864 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
865 if (unlikely(m_src->buf_iova == 0 ||
866 m_src->buf_addr == NULL)) {
867 VC_LOG_ERR("zero_copy may fail due to cross page data");
868 ret = VIRTIO_CRYPTO_ERR;
869 goto error_exit;
870 }
871
872 if (unlikely(move_desc(head, &desc, cipher->para.src_data_len,
873 max_n_descs) < 0)) {
874 VC_LOG_ERR("Incorrect descriptor");
875 ret = VIRTIO_CRYPTO_ERR;
876 goto error_exit;
877 }
878
879 break;
880 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
881 vc_req->wb_pool = vcrypto->wb_pool;
882 m_src->data_len = cipher->para.src_data_len;
883 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
884 vc_req, head, &desc, cipher->para.src_data_len,
885 max_n_descs) < 0)) {
886 ret = VIRTIO_CRYPTO_BADMSG;
887 goto error_exit;
888 }
889 break;
890 default:
891 ret = VIRTIO_CRYPTO_BADMSG;
892 goto error_exit;
893 }
894
895 /* dst */
896 desc = find_write_desc(head, desc, max_n_descs);
897 if (unlikely(!desc)) {
898 VC_LOG_ERR("Cannot find write location");
899 ret = VIRTIO_CRYPTO_BADMSG;
900 goto error_exit;
901 }
902
903 switch (vcrypto->option) {
904 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
905 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
906 desc->addr, cipher->para.dst_data_len);
907 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
908 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
909 VC_LOG_ERR("zero_copy may fail due to cross page data");
910 ret = VIRTIO_CRYPTO_ERR;
911 goto error_exit;
912 }
913
914 if (unlikely(move_desc(head, &desc, cipher->para.dst_data_len,
915 max_n_descs) < 0)) {
916 VC_LOG_ERR("Incorrect descriptor");
917 ret = VIRTIO_CRYPTO_ERR;
918 goto error_exit;
919 }
920
921 m_dst->data_len = cipher->para.dst_data_len;
922 break;
923 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
924 vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
925 rte_pktmbuf_mtod(m_src, uint8_t *), 0,
926 cipher->para.dst_data_len, max_n_descs);
927 if (unlikely(vc_req->wb == NULL)) {
928 ret = VIRTIO_CRYPTO_ERR;
929 goto error_exit;
930 }
931
932 break;
933 default:
934 ret = VIRTIO_CRYPTO_BADMSG;
935 goto error_exit;
936 }
937
938 /* src data */
939 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
940 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
941
942 op->sym->cipher.data.offset = 0;
943 op->sym->cipher.data.length = cipher->para.src_data_len;
944
945 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
946 if (unlikely(vc_req->inhdr == NULL)) {
947 ret = VIRTIO_CRYPTO_BADMSG;
948 goto error_exit;
949 }
950
951 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
952 vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
953
954 return 0;
955
956 error_exit:
957 if (vc_req->wb)
958 free_wb_data(vc_req->wb, vc_req->wb_pool);
959
960 vc_req->len = INHDR_LEN;
961 return ret;
962 }
963
964 static __rte_always_inline uint8_t
vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req * req)965 vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req)
966 {
967 if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
968 (req->para.src_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
969 (req->para.dst_data_len >= req->para.src_data_len) &&
970 (req->para.dst_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
971 (req->para.cipher_start_src_offset <
972 VHOST_CRYPTO_MAX_DATA_SIZE) &&
973 (req->para.len_to_cipher <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
974 (req->para.hash_start_src_offset <
975 VHOST_CRYPTO_MAX_DATA_SIZE) &&
976 (req->para.len_to_hash <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
977 (req->para.cipher_start_src_offset + req->para.len_to_cipher <=
978 req->para.src_data_len) &&
979 (req->para.hash_start_src_offset + req->para.len_to_hash <=
980 req->para.src_data_len) &&
981 (req->para.dst_data_len + req->para.hash_result_len <=
982 VHOST_CRYPTO_MAX_DATA_SIZE)))
983 return VIRTIO_CRYPTO_OK;
984 return VIRTIO_CRYPTO_BADMSG;
985 }
986
987 static __rte_always_inline uint8_t
prepare_sym_chain_op(struct vhost_crypto * vcrypto,struct rte_crypto_op * op,struct vhost_crypto_data_req * vc_req,struct virtio_crypto_alg_chain_data_req * chain,struct vhost_crypto_desc * head,uint32_t max_n_descs)988 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
989 struct vhost_crypto_data_req *vc_req,
990 struct virtio_crypto_alg_chain_data_req *chain,
991 struct vhost_crypto_desc *head,
992 uint32_t max_n_descs)
993 {
994 struct vhost_crypto_desc *desc = head, *digest_desc;
995 struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
996 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
997 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
998 uint32_t digest_offset;
999 void *digest_addr;
1000 uint8_t ret = vhost_crypto_check_chain_request(chain);
1001
1002 if (unlikely(ret != VIRTIO_CRYPTO_OK))
1003 goto error_exit;
1004
1005 /* prepare */
1006 /* iv */
1007 if (unlikely(copy_data(iv_data, vc_req, head, &desc,
1008 chain->para.iv_len, max_n_descs) < 0)) {
1009 ret = VIRTIO_CRYPTO_BADMSG;
1010 goto error_exit;
1011 }
1012
1013 switch (vcrypto->option) {
1014 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1015 m_src->data_len = chain->para.src_data_len;
1016 m_dst->data_len = chain->para.dst_data_len;
1017
1018 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
1019 chain->para.src_data_len);
1020 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
1021 if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
1022 VC_LOG_ERR("zero_copy may fail due to cross page data");
1023 ret = VIRTIO_CRYPTO_ERR;
1024 goto error_exit;
1025 }
1026
1027 if (unlikely(move_desc(head, &desc, chain->para.src_data_len,
1028 max_n_descs) < 0)) {
1029 VC_LOG_ERR("Incorrect descriptor");
1030 ret = VIRTIO_CRYPTO_ERR;
1031 goto error_exit;
1032 }
1033 break;
1034 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1035 vc_req->wb_pool = vcrypto->wb_pool;
1036 m_src->data_len = chain->para.src_data_len;
1037 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
1038 vc_req, head, &desc, chain->para.src_data_len,
1039 max_n_descs) < 0)) {
1040 ret = VIRTIO_CRYPTO_BADMSG;
1041 goto error_exit;
1042 }
1043
1044 break;
1045 default:
1046 ret = VIRTIO_CRYPTO_BADMSG;
1047 goto error_exit;
1048 }
1049
1050 /* dst */
1051 desc = find_write_desc(head, desc, max_n_descs);
1052 if (unlikely(!desc)) {
1053 VC_LOG_ERR("Cannot find write location");
1054 ret = VIRTIO_CRYPTO_BADMSG;
1055 goto error_exit;
1056 }
1057
1058 switch (vcrypto->option) {
1059 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1060 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
1061 desc->addr, chain->para.dst_data_len);
1062 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1063 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
1064 VC_LOG_ERR("zero_copy may fail due to cross page data");
1065 ret = VIRTIO_CRYPTO_ERR;
1066 goto error_exit;
1067 }
1068
1069 if (unlikely(move_desc(vc_req->head, &desc,
1070 chain->para.dst_data_len, max_n_descs) < 0)) {
1071 VC_LOG_ERR("Incorrect descriptor");
1072 ret = VIRTIO_CRYPTO_ERR;
1073 goto error_exit;
1074 }
1075
1076 op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1077 desc->addr, chain->para.hash_result_len);
1078 op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1079 VHOST_ACCESS_RW);
1080 if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1081 VC_LOG_ERR("zero_copy may fail due to cross page data");
1082 ret = VIRTIO_CRYPTO_ERR;
1083 goto error_exit;
1084 }
1085
1086 if (unlikely(move_desc(head, &desc,
1087 chain->para.hash_result_len,
1088 max_n_descs) < 0)) {
1089 VC_LOG_ERR("Incorrect descriptor");
1090 ret = VIRTIO_CRYPTO_ERR;
1091 goto error_exit;
1092 }
1093
1094 break;
1095 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1096 vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
1097 rte_pktmbuf_mtod(m_src, uint8_t *),
1098 chain->para.cipher_start_src_offset,
1099 chain->para.dst_data_len -
1100 chain->para.cipher_start_src_offset,
1101 max_n_descs);
1102 if (unlikely(vc_req->wb == NULL)) {
1103 ret = VIRTIO_CRYPTO_ERR;
1104 goto error_exit;
1105 }
1106
1107 digest_desc = desc;
1108 digest_offset = m_src->data_len;
1109 digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1110 digest_offset);
1111
1112 /** create a wb_data for digest */
1113 ewb->next = prepare_write_back_data(vc_req, head, &desc,
1114 &ewb2, digest_addr, 0,
1115 chain->para.hash_result_len, max_n_descs);
1116 if (unlikely(ewb->next == NULL)) {
1117 ret = VIRTIO_CRYPTO_ERR;
1118 goto error_exit;
1119 }
1120
1121 if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc,
1122 chain->para.hash_result_len,
1123 max_n_descs) < 0)) {
1124 ret = VIRTIO_CRYPTO_BADMSG;
1125 goto error_exit;
1126 }
1127
1128 op->sym->auth.digest.data = digest_addr;
1129 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1130 digest_offset);
1131 break;
1132 default:
1133 ret = VIRTIO_CRYPTO_BADMSG;
1134 goto error_exit;
1135 }
1136
1137 /* record inhdr */
1138 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1139 if (unlikely(vc_req->inhdr == NULL)) {
1140 ret = VIRTIO_CRYPTO_BADMSG;
1141 goto error_exit;
1142 }
1143
1144 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1145
1146 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1147 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1148
1149 op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1150 op->sym->cipher.data.length = chain->para.src_data_len -
1151 chain->para.cipher_start_src_offset;
1152
1153 op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1154 op->sym->auth.data.length = chain->para.len_to_hash;
1155
1156 vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1157 INHDR_LEN;
1158 return 0;
1159
1160 error_exit:
1161 if (vc_req->wb)
1162 free_wb_data(vc_req->wb, vc_req->wb_pool);
1163 vc_req->len = INHDR_LEN;
1164 return ret;
1165 }
1166
1167 /**
1168 * Process on descriptor
1169 */
1170 static __rte_always_inline int
vhost_crypto_process_one_req(struct vhost_crypto * vcrypto,struct vhost_virtqueue * vq,struct rte_crypto_op * op,struct vring_desc * head,struct vhost_crypto_desc * descs,uint16_t desc_idx)1171 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1172 struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1173 struct vring_desc *head, struct vhost_crypto_desc *descs,
1174 uint16_t desc_idx)
1175 {
1176 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1177 struct rte_cryptodev_sym_session *session;
1178 struct virtio_crypto_op_data_req req;
1179 struct virtio_crypto_inhdr *inhdr;
1180 struct vhost_crypto_desc *desc = descs;
1181 struct vring_desc *src_desc;
1182 uint64_t session_id;
1183 uint64_t dlen;
1184 uint32_t nb_descs = 0, max_n_descs, i;
1185 int err;
1186
1187 vc_req->desc_idx = desc_idx;
1188 vc_req->dev = vcrypto->dev;
1189 vc_req->vq = vq;
1190
1191 if (unlikely((head->flags & VRING_DESC_F_INDIRECT) == 0)) {
1192 VC_LOG_ERR("Invalid descriptor");
1193 return -1;
1194 }
1195
1196 dlen = head->len;
1197 src_desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1198 &dlen, VHOST_ACCESS_RO);
1199 if (unlikely(!src_desc || dlen != head->len)) {
1200 VC_LOG_ERR("Invalid descriptor");
1201 return -1;
1202 }
1203 head = src_desc;
1204
1205 nb_descs = max_n_descs = dlen / sizeof(struct vring_desc);
1206 if (unlikely(nb_descs > VHOST_CRYPTO_MAX_N_DESC || nb_descs == 0)) {
1207 err = VIRTIO_CRYPTO_ERR;
1208 VC_LOG_ERR("Cannot process num of descriptors %u", nb_descs);
1209 if (nb_descs > 0) {
1210 struct vring_desc *inhdr_desc = head;
1211 while (inhdr_desc->flags & VRING_DESC_F_NEXT) {
1212 if (inhdr_desc->next >= max_n_descs)
1213 return -1;
1214 inhdr_desc = &head[inhdr_desc->next];
1215 }
1216 if (inhdr_desc->len != sizeof(*inhdr))
1217 return -1;
1218 inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *,
1219 vc_req, inhdr_desc->addr, &dlen,
1220 VHOST_ACCESS_WO);
1221 if (unlikely(!inhdr || dlen != inhdr_desc->len))
1222 return -1;
1223 inhdr->status = VIRTIO_CRYPTO_ERR;
1224 return -1;
1225 }
1226 }
1227
1228 /* copy descriptors to local variable */
1229 for (i = 0; i < max_n_descs; i++) {
1230 desc->addr = src_desc->addr;
1231 desc->len = src_desc->len;
1232 desc->flags = src_desc->flags;
1233 desc++;
1234 if (unlikely((src_desc->flags & VRING_DESC_F_NEXT) == 0))
1235 break;
1236 if (unlikely(src_desc->next >= max_n_descs)) {
1237 err = VIRTIO_CRYPTO_BADMSG;
1238 VC_LOG_ERR("Invalid descriptor");
1239 goto error_exit;
1240 }
1241 src_desc = &head[src_desc->next];
1242 }
1243
1244 vc_req->head = head;
1245 vc_req->zero_copy = vcrypto->option;
1246
1247 nb_descs = desc - descs;
1248 desc = descs;
1249
1250 if (unlikely(desc->len < sizeof(req))) {
1251 err = VIRTIO_CRYPTO_BADMSG;
1252 VC_LOG_ERR("Invalid descriptor");
1253 goto error_exit;
1254 }
1255
1256 if (unlikely(copy_data(&req, vc_req, descs, &desc, sizeof(req),
1257 max_n_descs) < 0)) {
1258 err = VIRTIO_CRYPTO_BADMSG;
1259 VC_LOG_ERR("Invalid descriptor");
1260 goto error_exit;
1261 }
1262
1263 /* desc is advanced by 1 now */
1264 max_n_descs -= 1;
1265
1266 switch (req.header.opcode) {
1267 case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1268 case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1269 session_id = req.header.session_id;
1270
1271 /* one branch to avoid unnecessary table lookup */
1272 if (vcrypto->cache_session_id != session_id) {
1273 err = rte_hash_lookup_data(vcrypto->session_map,
1274 &session_id, (void **)&session);
1275 if (unlikely(err < 0)) {
1276 err = VIRTIO_CRYPTO_ERR;
1277 VC_LOG_ERR("Failed to find session %"PRIu64,
1278 session_id);
1279 goto error_exit;
1280 }
1281
1282 vcrypto->cache_session = session;
1283 vcrypto->cache_session_id = session_id;
1284 }
1285
1286 session = vcrypto->cache_session;
1287
1288 err = rte_crypto_op_attach_sym_session(op, session);
1289 if (unlikely(err < 0)) {
1290 err = VIRTIO_CRYPTO_ERR;
1291 VC_LOG_ERR("Failed to attach session to op");
1292 goto error_exit;
1293 }
1294
1295 switch (req.u.sym_req.op_type) {
1296 case VIRTIO_CRYPTO_SYM_OP_NONE:
1297 err = VIRTIO_CRYPTO_NOTSUPP;
1298 break;
1299 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1300 err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1301 &req.u.sym_req.u.cipher, desc,
1302 max_n_descs);
1303 break;
1304 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1305 err = prepare_sym_chain_op(vcrypto, op, vc_req,
1306 &req.u.sym_req.u.chain, desc,
1307 max_n_descs);
1308 break;
1309 }
1310 if (unlikely(err != 0)) {
1311 VC_LOG_ERR("Failed to process sym request");
1312 goto error_exit;
1313 }
1314 break;
1315 default:
1316 err = VIRTIO_CRYPTO_ERR;
1317 VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1318 req.header.opcode);
1319 goto error_exit;
1320 }
1321
1322 return 0;
1323
1324 error_exit:
1325
1326 inhdr = reach_inhdr(vc_req, descs, max_n_descs);
1327 if (likely(inhdr != NULL))
1328 inhdr->status = (uint8_t)err;
1329
1330 return -1;
1331 }
1332
1333 static __rte_always_inline struct vhost_virtqueue *
vhost_crypto_finalize_one_request(struct rte_crypto_op * op,struct vhost_virtqueue * old_vq)1334 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1335 struct vhost_virtqueue *old_vq)
1336 {
1337 struct rte_mbuf *m_src = op->sym->m_src;
1338 struct rte_mbuf *m_dst = op->sym->m_dst;
1339 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1340 struct vhost_virtqueue *vq;
1341 uint16_t used_idx, desc_idx;
1342
1343 if (unlikely(!vc_req)) {
1344 VC_LOG_ERR("Failed to retrieve vc_req");
1345 return NULL;
1346 }
1347 vq = vc_req->vq;
1348 used_idx = vc_req->desc_idx;
1349
1350 if (old_vq && (vq != old_vq))
1351 return vq;
1352
1353 if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1354 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1355 else {
1356 if (vc_req->zero_copy == 0)
1357 write_back_data(vc_req);
1358 }
1359
1360 desc_idx = vq->avail->ring[used_idx];
1361 vq->used->ring[desc_idx].id = vq->avail->ring[desc_idx];
1362 vq->used->ring[desc_idx].len = vc_req->len;
1363
1364 rte_mempool_put(m_src->pool, (void *)m_src);
1365
1366 if (m_dst)
1367 rte_mempool_put(m_dst->pool, (void *)m_dst);
1368
1369 return vc_req->vq;
1370 }
1371
1372 static __rte_always_inline uint16_t
vhost_crypto_complete_one_vm_requests(struct rte_crypto_op ** ops,uint16_t nb_ops,int * callfd)1373 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1374 uint16_t nb_ops, int *callfd)
1375 {
1376 uint16_t processed = 1;
1377 struct vhost_virtqueue *vq, *tmp_vq;
1378
1379 if (unlikely(nb_ops == 0))
1380 return 0;
1381
1382 vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1383 if (unlikely(vq == NULL))
1384 return 0;
1385 tmp_vq = vq;
1386
1387 while ((processed < nb_ops)) {
1388 tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1389 tmp_vq);
1390
1391 if (unlikely(vq != tmp_vq))
1392 break;
1393
1394 processed++;
1395 }
1396
1397 *callfd = vq->callfd;
1398
1399 *(volatile uint16_t *)&vq->used->idx += processed;
1400
1401 return processed;
1402 }
1403
1404 int
rte_vhost_crypto_driver_start(const char * path)1405 rte_vhost_crypto_driver_start(const char *path)
1406 {
1407 uint64_t protocol_features;
1408 int ret;
1409
1410 ret = rte_vhost_driver_set_features(path, VIRTIO_CRYPTO_FEATURES);
1411 if (ret)
1412 return -1;
1413
1414 ret = rte_vhost_driver_get_protocol_features(path, &protocol_features);
1415 if (ret)
1416 return -1;
1417 protocol_features |= (1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
1418 ret = rte_vhost_driver_set_protocol_features(path, protocol_features);
1419 if (ret)
1420 return -1;
1421
1422 return rte_vhost_driver_start(path);
1423 }
1424
1425 int
rte_vhost_crypto_create(int vid,uint8_t cryptodev_id,struct rte_mempool * sess_pool,struct rte_mempool * sess_priv_pool,int socket_id)1426 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1427 struct rte_mempool *sess_pool,
1428 struct rte_mempool *sess_priv_pool,
1429 int socket_id)
1430 {
1431 struct virtio_net *dev = get_device(vid);
1432 struct rte_hash_parameters params = {0};
1433 struct vhost_crypto *vcrypto;
1434 char name[128];
1435 int ret;
1436
1437 if (!dev) {
1438 VC_LOG_ERR("Invalid vid %i", vid);
1439 return -EINVAL;
1440 }
1441
1442 vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1443 RTE_CACHE_LINE_SIZE, socket_id);
1444 if (!vcrypto) {
1445 VC_LOG_ERR("Insufficient memory");
1446 return -ENOMEM;
1447 }
1448
1449 vcrypto->sess_pool = sess_pool;
1450 vcrypto->sess_priv_pool = sess_priv_pool;
1451 vcrypto->cid = cryptodev_id;
1452 vcrypto->cache_session_id = UINT64_MAX;
1453 vcrypto->last_session_id = 1;
1454 vcrypto->dev = dev;
1455 vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1456
1457 snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1458 params.name = name;
1459 params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1460 params.hash_func = rte_jhash;
1461 params.key_len = sizeof(uint64_t);
1462 params.socket_id = socket_id;
1463 vcrypto->session_map = rte_hash_create(¶ms);
1464 if (!vcrypto->session_map) {
1465 VC_LOG_ERR("Failed to creath session map");
1466 ret = -ENOMEM;
1467 goto error_exit;
1468 }
1469
1470 snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1471 vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1472 VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1473 sizeof(struct vhost_crypto_data_req),
1474 VHOST_CRYPTO_MAX_DATA_SIZE + RTE_PKTMBUF_HEADROOM,
1475 rte_socket_id());
1476 if (!vcrypto->mbuf_pool) {
1477 VC_LOG_ERR("Failed to creath mbuf pool");
1478 ret = -ENOMEM;
1479 goto error_exit;
1480 }
1481
1482 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1483 vcrypto->wb_pool = rte_mempool_create(name,
1484 VHOST_CRYPTO_MBUF_POOL_SIZE,
1485 sizeof(struct vhost_crypto_writeback_data),
1486 128, 0, NULL, NULL, NULL, NULL,
1487 rte_socket_id(), 0);
1488 if (!vcrypto->wb_pool) {
1489 VC_LOG_ERR("Failed to creath mempool");
1490 ret = -ENOMEM;
1491 goto error_exit;
1492 }
1493
1494 dev->extern_data = vcrypto;
1495 dev->extern_ops.pre_msg_handle = NULL;
1496 dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1497
1498 return 0;
1499
1500 error_exit:
1501 rte_hash_free(vcrypto->session_map);
1502 rte_mempool_free(vcrypto->mbuf_pool);
1503
1504 rte_free(vcrypto);
1505
1506 return ret;
1507 }
1508
1509 int
rte_vhost_crypto_free(int vid)1510 rte_vhost_crypto_free(int vid)
1511 {
1512 struct virtio_net *dev = get_device(vid);
1513 struct vhost_crypto *vcrypto;
1514
1515 if (unlikely(dev == NULL)) {
1516 VC_LOG_ERR("Invalid vid %i", vid);
1517 return -EINVAL;
1518 }
1519
1520 vcrypto = dev->extern_data;
1521 if (unlikely(vcrypto == NULL)) {
1522 VC_LOG_ERR("Cannot find required data, is it initialized?");
1523 return -ENOENT;
1524 }
1525
1526 rte_hash_free(vcrypto->session_map);
1527 rte_mempool_free(vcrypto->mbuf_pool);
1528 rte_mempool_free(vcrypto->wb_pool);
1529 rte_free(vcrypto);
1530
1531 dev->extern_data = NULL;
1532 dev->extern_ops.pre_msg_handle = NULL;
1533 dev->extern_ops.post_msg_handle = NULL;
1534
1535 return 0;
1536 }
1537
1538 int
rte_vhost_crypto_set_zero_copy(int vid,enum rte_vhost_crypto_zero_copy option)1539 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1540 {
1541 struct virtio_net *dev = get_device(vid);
1542 struct vhost_crypto *vcrypto;
1543
1544 if (unlikely(dev == NULL)) {
1545 VC_LOG_ERR("Invalid vid %i", vid);
1546 return -EINVAL;
1547 }
1548
1549 if (unlikely((uint32_t)option >=
1550 RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1551 VC_LOG_ERR("Invalid option %i", option);
1552 return -EINVAL;
1553 }
1554
1555 vcrypto = (struct vhost_crypto *)dev->extern_data;
1556 if (unlikely(vcrypto == NULL)) {
1557 VC_LOG_ERR("Cannot find required data, is it initialized?");
1558 return -ENOENT;
1559 }
1560
1561 if (vcrypto->option == (uint8_t)option)
1562 return 0;
1563
1564 if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1565 !(rte_mempool_full(vcrypto->wb_pool))) {
1566 VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1567 return -EINVAL;
1568 }
1569
1570 if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1571 char name[128];
1572
1573 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1574 vcrypto->wb_pool = rte_mempool_create(name,
1575 VHOST_CRYPTO_MBUF_POOL_SIZE,
1576 sizeof(struct vhost_crypto_writeback_data),
1577 128, 0, NULL, NULL, NULL, NULL,
1578 rte_socket_id(), 0);
1579 if (!vcrypto->wb_pool) {
1580 VC_LOG_ERR("Failed to creath mbuf pool");
1581 return -ENOMEM;
1582 }
1583 } else {
1584 rte_mempool_free(vcrypto->wb_pool);
1585 vcrypto->wb_pool = NULL;
1586 }
1587
1588 vcrypto->option = (uint8_t)option;
1589
1590 return 0;
1591 }
1592
1593 uint16_t
rte_vhost_crypto_fetch_requests(int vid,uint32_t qid,struct rte_crypto_op ** ops,uint16_t nb_ops)1594 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1595 struct rte_crypto_op **ops, uint16_t nb_ops)
1596 {
1597 struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1598 struct vhost_crypto_desc descs[VHOST_CRYPTO_MAX_N_DESC];
1599 struct virtio_net *dev = get_device(vid);
1600 struct vhost_crypto *vcrypto;
1601 struct vhost_virtqueue *vq;
1602 uint16_t avail_idx;
1603 uint16_t start_idx;
1604 uint16_t count;
1605 uint16_t i = 0;
1606
1607 if (unlikely(dev == NULL)) {
1608 VC_LOG_ERR("Invalid vid %i", vid);
1609 return 0;
1610 }
1611
1612 if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1613 VC_LOG_ERR("Invalid qid %u", qid);
1614 return 0;
1615 }
1616
1617 vcrypto = (struct vhost_crypto *)dev->extern_data;
1618 if (unlikely(vcrypto == NULL)) {
1619 VC_LOG_ERR("Cannot find required data, is it initialized?");
1620 return 0;
1621 }
1622
1623 vq = dev->virtqueue[qid];
1624
1625 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1626 start_idx = vq->last_used_idx;
1627 count = avail_idx - start_idx;
1628 count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1629 count = RTE_MIN(count, nb_ops);
1630
1631 if (unlikely(count == 0))
1632 return 0;
1633
1634 /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1635 * we need only 1 mbuf as src and dst
1636 */
1637 switch (vcrypto->option) {
1638 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1639 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1640 (void **)mbufs, count * 2) < 0)) {
1641 VC_LOG_ERR("Insufficient memory");
1642 return 0;
1643 }
1644
1645 for (i = 0; i < count; i++) {
1646 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1647 uint16_t desc_idx = vq->avail->ring[used_idx];
1648 struct vring_desc *head = &vq->desc[desc_idx];
1649 struct rte_crypto_op *op = ops[i];
1650
1651 op->sym->m_src = mbufs[i * 2];
1652 op->sym->m_dst = mbufs[i * 2 + 1];
1653 op->sym->m_src->data_off = 0;
1654 op->sym->m_dst->data_off = 0;
1655
1656 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1657 op, head, descs, used_idx) < 0))
1658 break;
1659 }
1660
1661 if (unlikely(i < count))
1662 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1663 (void **)&mbufs[i * 2],
1664 (count - i) * 2);
1665
1666 break;
1667
1668 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1669 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1670 (void **)mbufs, count) < 0)) {
1671 VC_LOG_ERR("Insufficient memory");
1672 return 0;
1673 }
1674
1675 for (i = 0; i < count; i++) {
1676 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1677 uint16_t desc_idx = vq->avail->ring[used_idx];
1678 struct vring_desc *head = &vq->desc[desc_idx];
1679 struct rte_crypto_op *op = ops[i];
1680
1681 op->sym->m_src = mbufs[i];
1682 op->sym->m_dst = NULL;
1683 op->sym->m_src->data_off = 0;
1684
1685 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1686 op, head, descs, desc_idx) < 0))
1687 break;
1688 }
1689
1690 if (unlikely(i < count))
1691 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1692 (void **)&mbufs[i],
1693 count - i);
1694
1695 break;
1696
1697 }
1698
1699 vq->last_used_idx += i;
1700
1701 return i;
1702 }
1703
1704 uint16_t
rte_vhost_crypto_finalize_requests(struct rte_crypto_op ** ops,uint16_t nb_ops,int * callfds,uint16_t * nb_callfds)1705 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1706 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1707 {
1708 struct rte_crypto_op **tmp_ops = ops;
1709 uint16_t count = 0, left = nb_ops;
1710 int callfd;
1711 uint16_t idx = 0;
1712
1713 while (left) {
1714 count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1715 &callfd);
1716 if (unlikely(count == 0))
1717 break;
1718
1719 tmp_ops = &tmp_ops[count];
1720 left -= count;
1721
1722 callfds[idx++] = callfd;
1723
1724 if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1725 VC_LOG_ERR("Too many vqs");
1726 break;
1727 }
1728 }
1729
1730 *nb_callfds = idx;
1731
1732 return nb_ops - left;
1733 }
1734