1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
3 * All rights reserved.
4 */
5
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <rte_dev.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16
17 #include "rte_eventdev.h"
18 #include "rte_eventdev_pmd.h"
19 #include "rte_eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27
28 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
29 * iterations of eca_crypto_adapter_enq_run()
30 */
31 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
32
33 struct rte_event_crypto_adapter {
34 /* Event device identifier */
35 uint8_t eventdev_id;
36 /* Event port identifier */
37 uint8_t event_port_id;
38 /* Store event device's implicit release capability */
39 uint8_t implicit_release_disabled;
40 /* Max crypto ops processed in any service function invocation */
41 uint32_t max_nb;
42 /* Lock to serialize config updates with service function */
43 rte_spinlock_t lock;
44 /* Next crypto device to be processed */
45 uint16_t next_cdev_id;
46 /* Per crypto device structure */
47 struct crypto_device_info *cdevs;
48 /* Loop counter to flush crypto ops */
49 uint16_t transmit_loop_count;
50 /* Per instance stats structure */
51 struct rte_event_crypto_adapter_stats crypto_stats;
52 /* Configuration callback for rte_service configuration */
53 rte_event_crypto_adapter_conf_cb conf_cb;
54 /* Configuration callback argument */
55 void *conf_arg;
56 /* Set if default_cb is being used */
57 int default_cb_arg;
58 /* Service initialization state */
59 uint8_t service_inited;
60 /* Memory allocation name */
61 char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
62 /* Socket identifier cached from eventdev */
63 int socket_id;
64 /* Per adapter EAL service */
65 uint32_t service_id;
66 /* No. of queue pairs configured */
67 uint16_t nb_qps;
68 /* Adapter mode */
69 enum rte_event_crypto_adapter_mode mode;
70 } __rte_cache_aligned;
71
72 /* Per crypto device information */
73 struct crypto_device_info {
74 /* Pointer to cryptodev */
75 struct rte_cryptodev *dev;
76 /* Pointer to queue pair info */
77 struct crypto_queue_pair_info *qpairs;
78 /* Next queue pair to be processed */
79 uint16_t next_queue_pair_id;
80 /* Set to indicate cryptodev->eventdev packet
81 * transfer uses a hardware mechanism
82 */
83 uint8_t internal_event_port;
84 /* Set to indicate processing has been started */
85 uint8_t dev_started;
86 /* If num_qpairs > 0, the start callback will
87 * be invoked if not already invoked
88 */
89 uint16_t num_qpairs;
90 } __rte_cache_aligned;
91
92 /* Per queue pair information */
93 struct crypto_queue_pair_info {
94 /* Set to indicate queue pair is enabled */
95 bool qp_enabled;
96 /* Pointer to hold rte_crypto_ops for batching */
97 struct rte_crypto_op **op_buffer;
98 /* No of crypto ops accumulated */
99 uint8_t len;
100 } __rte_cache_aligned;
101
102 static struct rte_event_crypto_adapter **event_crypto_adapter;
103
104 /* Macros to check for valid adapter */
105 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
106 if (!eca_valid_id(id)) { \
107 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
108 return retval; \
109 } \
110 } while (0)
111
112 static inline int
eca_valid_id(uint8_t id)113 eca_valid_id(uint8_t id)
114 {
115 return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
116 }
117
118 static int
eca_init(void)119 eca_init(void)
120 {
121 const char *name = "crypto_adapter_array";
122 const struct rte_memzone *mz;
123 unsigned int sz;
124
125 sz = sizeof(*event_crypto_adapter) *
126 RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
127 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
128
129 mz = rte_memzone_lookup(name);
130 if (mz == NULL) {
131 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
132 RTE_CACHE_LINE_SIZE);
133 if (mz == NULL) {
134 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
135 PRId32, rte_errno);
136 return -rte_errno;
137 }
138 }
139
140 event_crypto_adapter = mz->addr;
141 return 0;
142 }
143
144 static inline struct rte_event_crypto_adapter *
eca_id_to_adapter(uint8_t id)145 eca_id_to_adapter(uint8_t id)
146 {
147 return event_crypto_adapter ?
148 event_crypto_adapter[id] : NULL;
149 }
150
151 static int
eca_default_config_cb(uint8_t id,uint8_t dev_id,struct rte_event_crypto_adapter_conf * conf,void * arg)152 eca_default_config_cb(uint8_t id, uint8_t dev_id,
153 struct rte_event_crypto_adapter_conf *conf, void *arg)
154 {
155 struct rte_event_dev_config dev_conf;
156 struct rte_eventdev *dev;
157 uint8_t port_id;
158 int started;
159 int ret;
160 struct rte_event_port_conf *port_conf = arg;
161 struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
162
163 if (adapter == NULL)
164 return -EINVAL;
165
166 dev = &rte_eventdevs[adapter->eventdev_id];
167 dev_conf = dev->data->dev_conf;
168
169 started = dev->data->dev_started;
170 if (started)
171 rte_event_dev_stop(dev_id);
172 port_id = dev_conf.nb_event_ports;
173 dev_conf.nb_event_ports += 1;
174 ret = rte_event_dev_configure(dev_id, &dev_conf);
175 if (ret) {
176 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
177 if (started) {
178 if (rte_event_dev_start(dev_id))
179 return -EIO;
180 }
181 return ret;
182 }
183
184 ret = rte_event_port_setup(dev_id, port_id, port_conf);
185 if (ret) {
186 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
187 return ret;
188 }
189
190 conf->event_port_id = port_id;
191 conf->max_nb = DEFAULT_MAX_NB;
192 if (started)
193 ret = rte_event_dev_start(dev_id);
194
195 adapter->default_cb_arg = 1;
196 return ret;
197 }
198
199 int
rte_event_crypto_adapter_create_ext(uint8_t id,uint8_t dev_id,rte_event_crypto_adapter_conf_cb conf_cb,enum rte_event_crypto_adapter_mode mode,void * conf_arg)200 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
201 rte_event_crypto_adapter_conf_cb conf_cb,
202 enum rte_event_crypto_adapter_mode mode,
203 void *conf_arg)
204 {
205 struct rte_event_crypto_adapter *adapter;
206 char mem_name[CRYPTO_ADAPTER_NAME_LEN];
207 struct rte_event_dev_info dev_info;
208 int socket_id;
209 uint8_t i;
210 int ret;
211
212 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
213 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
214 if (conf_cb == NULL)
215 return -EINVAL;
216
217 if (event_crypto_adapter == NULL) {
218 ret = eca_init();
219 if (ret)
220 return ret;
221 }
222
223 adapter = eca_id_to_adapter(id);
224 if (adapter != NULL) {
225 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
226 return -EEXIST;
227 }
228
229 socket_id = rte_event_dev_socket_id(dev_id);
230 snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
231 "rte_event_crypto_adapter_%d", id);
232
233 adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
234 RTE_CACHE_LINE_SIZE, socket_id);
235 if (adapter == NULL) {
236 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
237 return -ENOMEM;
238 }
239
240 ret = rte_event_dev_info_get(dev_id, &dev_info);
241 if (ret < 0) {
242 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
243 dev_id, dev_info.driver_name);
244 rte_free(adapter);
245 return ret;
246 }
247
248 adapter->implicit_release_disabled = (dev_info.event_dev_cap &
249 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
250 adapter->eventdev_id = dev_id;
251 adapter->socket_id = socket_id;
252 adapter->conf_cb = conf_cb;
253 adapter->conf_arg = conf_arg;
254 adapter->mode = mode;
255 strcpy(adapter->mem_name, mem_name);
256 adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
257 rte_cryptodev_count() *
258 sizeof(struct crypto_device_info), 0,
259 socket_id);
260 if (adapter->cdevs == NULL) {
261 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
262 rte_free(adapter);
263 return -ENOMEM;
264 }
265
266 rte_spinlock_init(&adapter->lock);
267 for (i = 0; i < rte_cryptodev_count(); i++)
268 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
269
270 event_crypto_adapter[id] = adapter;
271
272 rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
273 mode);
274 return 0;
275 }
276
277
278 int
rte_event_crypto_adapter_create(uint8_t id,uint8_t dev_id,struct rte_event_port_conf * port_config,enum rte_event_crypto_adapter_mode mode)279 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
280 struct rte_event_port_conf *port_config,
281 enum rte_event_crypto_adapter_mode mode)
282 {
283 struct rte_event_port_conf *pc;
284 int ret;
285
286 if (port_config == NULL)
287 return -EINVAL;
288 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
289
290 pc = rte_malloc(NULL, sizeof(*pc), 0);
291 if (pc == NULL)
292 return -ENOMEM;
293 *pc = *port_config;
294 ret = rte_event_crypto_adapter_create_ext(id, dev_id,
295 eca_default_config_cb,
296 mode,
297 pc);
298 if (ret)
299 rte_free(pc);
300
301 return ret;
302 }
303
304 int
rte_event_crypto_adapter_free(uint8_t id)305 rte_event_crypto_adapter_free(uint8_t id)
306 {
307 struct rte_event_crypto_adapter *adapter;
308
309 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
310
311 adapter = eca_id_to_adapter(id);
312 if (adapter == NULL)
313 return -EINVAL;
314
315 if (adapter->nb_qps) {
316 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
317 adapter->nb_qps);
318 return -EBUSY;
319 }
320
321 rte_eventdev_trace_crypto_adapter_free(id, adapter);
322 if (adapter->default_cb_arg)
323 rte_free(adapter->conf_arg);
324 rte_free(adapter->cdevs);
325 rte_free(adapter);
326 event_crypto_adapter[id] = NULL;
327
328 return 0;
329 }
330
331 static inline unsigned int
eca_enq_to_cryptodev(struct rte_event_crypto_adapter * adapter,struct rte_event * ev,unsigned int cnt)332 eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
333 struct rte_event *ev, unsigned int cnt)
334 {
335 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
336 union rte_event_crypto_metadata *m_data = NULL;
337 struct crypto_queue_pair_info *qp_info = NULL;
338 struct rte_crypto_op *crypto_op;
339 unsigned int i, n;
340 uint16_t qp_id, len, ret;
341 uint8_t cdev_id;
342
343 len = 0;
344 ret = 0;
345 n = 0;
346 stats->event_deq_count += cnt;
347
348 for (i = 0; i < cnt; i++) {
349 crypto_op = ev[i].event_ptr;
350 if (crypto_op == NULL)
351 continue;
352 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
353 m_data = rte_cryptodev_sym_session_get_user_data(
354 crypto_op->sym->session);
355 if (m_data == NULL) {
356 rte_pktmbuf_free(crypto_op->sym->m_src);
357 rte_crypto_op_free(crypto_op);
358 continue;
359 }
360
361 cdev_id = m_data->request_info.cdev_id;
362 qp_id = m_data->request_info.queue_pair_id;
363 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
364 if (!qp_info->qp_enabled) {
365 rte_pktmbuf_free(crypto_op->sym->m_src);
366 rte_crypto_op_free(crypto_op);
367 continue;
368 }
369 len = qp_info->len;
370 qp_info->op_buffer[len] = crypto_op;
371 len++;
372 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
373 crypto_op->private_data_offset) {
374 m_data = (union rte_event_crypto_metadata *)
375 ((uint8_t *)crypto_op +
376 crypto_op->private_data_offset);
377 cdev_id = m_data->request_info.cdev_id;
378 qp_id = m_data->request_info.queue_pair_id;
379 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
380 if (!qp_info->qp_enabled) {
381 rte_pktmbuf_free(crypto_op->sym->m_src);
382 rte_crypto_op_free(crypto_op);
383 continue;
384 }
385 len = qp_info->len;
386 qp_info->op_buffer[len] = crypto_op;
387 len++;
388 } else {
389 rte_pktmbuf_free(crypto_op->sym->m_src);
390 rte_crypto_op_free(crypto_op);
391 continue;
392 }
393
394 if (len == BATCH_SIZE) {
395 struct rte_crypto_op **op_buffer = qp_info->op_buffer;
396 ret = rte_cryptodev_enqueue_burst(cdev_id,
397 qp_id,
398 op_buffer,
399 BATCH_SIZE);
400
401 stats->crypto_enq_count += ret;
402
403 while (ret < len) {
404 struct rte_crypto_op *op;
405 op = op_buffer[ret++];
406 stats->crypto_enq_fail++;
407 rte_pktmbuf_free(op->sym->m_src);
408 rte_crypto_op_free(op);
409 }
410
411 len = 0;
412 }
413
414 if (qp_info)
415 qp_info->len = len;
416 n += ret;
417 }
418
419 return n;
420 }
421
422 static unsigned int
eca_crypto_enq_flush(struct rte_event_crypto_adapter * adapter)423 eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
424 {
425 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
426 struct crypto_device_info *curr_dev;
427 struct crypto_queue_pair_info *curr_queue;
428 struct rte_crypto_op **op_buffer;
429 struct rte_cryptodev *dev;
430 uint8_t cdev_id;
431 uint16_t qp;
432 uint16_t ret;
433 uint16_t num_cdev = rte_cryptodev_count();
434
435 ret = 0;
436 for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
437 curr_dev = &adapter->cdevs[cdev_id];
438 dev = curr_dev->dev;
439 if (dev == NULL)
440 continue;
441 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
442
443 curr_queue = &curr_dev->qpairs[qp];
444 if (!curr_queue->qp_enabled)
445 continue;
446
447 op_buffer = curr_queue->op_buffer;
448 ret = rte_cryptodev_enqueue_burst(cdev_id,
449 qp,
450 op_buffer,
451 curr_queue->len);
452 stats->crypto_enq_count += ret;
453
454 while (ret < curr_queue->len) {
455 struct rte_crypto_op *op;
456 op = op_buffer[ret++];
457 stats->crypto_enq_fail++;
458 rte_pktmbuf_free(op->sym->m_src);
459 rte_crypto_op_free(op);
460 }
461 curr_queue->len = 0;
462 }
463 }
464
465 return ret;
466 }
467
468 static int
eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter * adapter,unsigned int max_enq)469 eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
470 unsigned int max_enq)
471 {
472 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
473 struct rte_event ev[BATCH_SIZE];
474 unsigned int nb_enq, nb_enqueued;
475 uint16_t n;
476 uint8_t event_dev_id = adapter->eventdev_id;
477 uint8_t event_port_id = adapter->event_port_id;
478
479 nb_enqueued = 0;
480 if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
481 return 0;
482
483 for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
484 stats->event_poll_count++;
485 n = rte_event_dequeue_burst(event_dev_id,
486 event_port_id, ev, BATCH_SIZE, 0);
487
488 if (!n)
489 break;
490
491 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
492 }
493
494 if ((++adapter->transmit_loop_count &
495 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
496 nb_enqueued += eca_crypto_enq_flush(adapter);
497 }
498
499 return nb_enqueued;
500 }
501
502 static inline void
eca_ops_enqueue_burst(struct rte_event_crypto_adapter * adapter,struct rte_crypto_op ** ops,uint16_t num)503 eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
504 struct rte_crypto_op **ops, uint16_t num)
505 {
506 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
507 union rte_event_crypto_metadata *m_data = NULL;
508 uint8_t event_dev_id = adapter->eventdev_id;
509 uint8_t event_port_id = adapter->event_port_id;
510 struct rte_event events[BATCH_SIZE];
511 uint16_t nb_enqueued, nb_ev;
512 uint8_t retry;
513 uint8_t i;
514
515 nb_ev = 0;
516 retry = 0;
517 nb_enqueued = 0;
518 num = RTE_MIN(num, BATCH_SIZE);
519 for (i = 0; i < num; i++) {
520 struct rte_event *ev = &events[nb_ev++];
521 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
522 m_data = rte_cryptodev_sym_session_get_user_data(
523 ops[i]->sym->session);
524 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
525 ops[i]->private_data_offset) {
526 m_data = (union rte_event_crypto_metadata *)
527 ((uint8_t *)ops[i] +
528 ops[i]->private_data_offset);
529 }
530
531 if (unlikely(m_data == NULL)) {
532 rte_pktmbuf_free(ops[i]->sym->m_src);
533 rte_crypto_op_free(ops[i]);
534 continue;
535 }
536
537 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
538 ev->event_ptr = ops[i];
539 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
540 if (adapter->implicit_release_disabled)
541 ev->op = RTE_EVENT_OP_FORWARD;
542 else
543 ev->op = RTE_EVENT_OP_NEW;
544 }
545
546 do {
547 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
548 event_port_id,
549 &events[nb_enqueued],
550 nb_ev - nb_enqueued);
551 } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
552 nb_enqueued < nb_ev);
553
554 /* Free mbufs and rte_crypto_ops for failed events */
555 for (i = nb_enqueued; i < nb_ev; i++) {
556 struct rte_crypto_op *op = events[i].event_ptr;
557 rte_pktmbuf_free(op->sym->m_src);
558 rte_crypto_op_free(op);
559 }
560
561 stats->event_enq_fail_count += nb_ev - nb_enqueued;
562 stats->event_enq_count += nb_enqueued;
563 stats->event_enq_retry_count += retry - 1;
564 }
565
566 static inline unsigned int
eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter * adapter,unsigned int max_deq)567 eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
568 unsigned int max_deq)
569 {
570 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
571 struct crypto_device_info *curr_dev;
572 struct crypto_queue_pair_info *curr_queue;
573 struct rte_crypto_op *ops[BATCH_SIZE];
574 uint16_t n, nb_deq;
575 struct rte_cryptodev *dev;
576 uint8_t cdev_id;
577 uint16_t qp, dev_qps;
578 bool done;
579 uint16_t num_cdev = rte_cryptodev_count();
580
581 nb_deq = 0;
582 do {
583 uint16_t queues = 0;
584 done = true;
585
586 for (cdev_id = adapter->next_cdev_id;
587 cdev_id < num_cdev; cdev_id++) {
588 curr_dev = &adapter->cdevs[cdev_id];
589 dev = curr_dev->dev;
590 if (dev == NULL)
591 continue;
592 dev_qps = dev->data->nb_queue_pairs;
593
594 for (qp = curr_dev->next_queue_pair_id;
595 queues < dev_qps; qp = (qp + 1) % dev_qps,
596 queues++) {
597
598 curr_queue = &curr_dev->qpairs[qp];
599 if (!curr_queue->qp_enabled)
600 continue;
601
602 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
603 ops, BATCH_SIZE);
604 if (!n)
605 continue;
606
607 done = false;
608 stats->crypto_deq_count += n;
609 eca_ops_enqueue_burst(adapter, ops, n);
610 nb_deq += n;
611
612 if (nb_deq > max_deq) {
613 if ((qp + 1) == dev_qps) {
614 adapter->next_cdev_id =
615 (cdev_id + 1)
616 % num_cdev;
617 }
618 curr_dev->next_queue_pair_id = (qp + 1)
619 % dev->data->nb_queue_pairs;
620
621 return nb_deq;
622 }
623 }
624 }
625 } while (done == false);
626 return nb_deq;
627 }
628
629 static void
eca_crypto_adapter_run(struct rte_event_crypto_adapter * adapter,unsigned int max_ops)630 eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
631 unsigned int max_ops)
632 {
633 while (max_ops) {
634 unsigned int e_cnt, d_cnt;
635
636 e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
637 max_ops -= RTE_MIN(max_ops, e_cnt);
638
639 d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
640 max_ops -= RTE_MIN(max_ops, d_cnt);
641
642 if (e_cnt == 0 && d_cnt == 0)
643 break;
644
645 }
646 }
647
648 static int
eca_service_func(void * args)649 eca_service_func(void *args)
650 {
651 struct rte_event_crypto_adapter *adapter = args;
652
653 if (rte_spinlock_trylock(&adapter->lock) == 0)
654 return 0;
655 eca_crypto_adapter_run(adapter, adapter->max_nb);
656 rte_spinlock_unlock(&adapter->lock);
657
658 return 0;
659 }
660
661 static int
eca_init_service(struct rte_event_crypto_adapter * adapter,uint8_t id)662 eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
663 {
664 struct rte_event_crypto_adapter_conf adapter_conf;
665 struct rte_service_spec service;
666 int ret;
667
668 if (adapter->service_inited)
669 return 0;
670
671 memset(&service, 0, sizeof(service));
672 snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
673 "rte_event_crypto_adapter_%d", id);
674 service.socket_id = adapter->socket_id;
675 service.callback = eca_service_func;
676 service.callback_userdata = adapter;
677 /* Service function handles locking for queue add/del updates */
678 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
679 ret = rte_service_component_register(&service, &adapter->service_id);
680 if (ret) {
681 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
682 service.name, ret);
683 return ret;
684 }
685
686 ret = adapter->conf_cb(id, adapter->eventdev_id,
687 &adapter_conf, adapter->conf_arg);
688 if (ret) {
689 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
690 ret);
691 return ret;
692 }
693
694 adapter->max_nb = adapter_conf.max_nb;
695 adapter->event_port_id = adapter_conf.event_port_id;
696 adapter->service_inited = 1;
697
698 return ret;
699 }
700
701 static void
eca_update_qp_info(struct rte_event_crypto_adapter * adapter,struct crypto_device_info * dev_info,int32_t queue_pair_id,uint8_t add)702 eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
703 struct crypto_device_info *dev_info,
704 int32_t queue_pair_id,
705 uint8_t add)
706 {
707 struct crypto_queue_pair_info *qp_info;
708 int enabled;
709 uint16_t i;
710
711 if (dev_info->qpairs == NULL)
712 return;
713
714 if (queue_pair_id == -1) {
715 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
716 eca_update_qp_info(adapter, dev_info, i, add);
717 } else {
718 qp_info = &dev_info->qpairs[queue_pair_id];
719 enabled = qp_info->qp_enabled;
720 if (add) {
721 adapter->nb_qps += !enabled;
722 dev_info->num_qpairs += !enabled;
723 } else {
724 adapter->nb_qps -= enabled;
725 dev_info->num_qpairs -= enabled;
726 }
727 qp_info->qp_enabled = !!add;
728 }
729 }
730
731 static int
eca_add_queue_pair(struct rte_event_crypto_adapter * adapter,uint8_t cdev_id,int queue_pair_id)732 eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
733 uint8_t cdev_id,
734 int queue_pair_id)
735 {
736 struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
737 struct crypto_queue_pair_info *qpairs;
738 uint32_t i;
739
740 if (dev_info->qpairs == NULL) {
741 dev_info->qpairs =
742 rte_zmalloc_socket(adapter->mem_name,
743 dev_info->dev->data->nb_queue_pairs *
744 sizeof(struct crypto_queue_pair_info),
745 0, adapter->socket_id);
746 if (dev_info->qpairs == NULL)
747 return -ENOMEM;
748
749 qpairs = dev_info->qpairs;
750 qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
751 BATCH_SIZE *
752 sizeof(struct rte_crypto_op *),
753 0, adapter->socket_id);
754 if (!qpairs->op_buffer) {
755 rte_free(qpairs);
756 return -ENOMEM;
757 }
758 }
759
760 if (queue_pair_id == -1) {
761 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
762 eca_update_qp_info(adapter, dev_info, i, 1);
763 } else
764 eca_update_qp_info(adapter, dev_info,
765 (uint16_t)queue_pair_id, 1);
766
767 return 0;
768 }
769
770 int
rte_event_crypto_adapter_queue_pair_add(uint8_t id,uint8_t cdev_id,int32_t queue_pair_id,const struct rte_event * event)771 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
772 uint8_t cdev_id,
773 int32_t queue_pair_id,
774 const struct rte_event *event)
775 {
776 struct rte_event_crypto_adapter *adapter;
777 struct rte_eventdev *dev;
778 struct crypto_device_info *dev_info;
779 uint32_t cap;
780 int ret;
781
782 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
783
784 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
785 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
786 return -EINVAL;
787 }
788
789 adapter = eca_id_to_adapter(id);
790 if (adapter == NULL)
791 return -EINVAL;
792
793 dev = &rte_eventdevs[adapter->eventdev_id];
794 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
795 cdev_id,
796 &cap);
797 if (ret) {
798 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
799 " cdev %" PRIu8, id, cdev_id);
800 return ret;
801 }
802
803 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
804 (event == NULL)) {
805 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
806 cdev_id);
807 return -EINVAL;
808 }
809
810 dev_info = &adapter->cdevs[cdev_id];
811
812 if (queue_pair_id != -1 &&
813 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
814 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
815 (uint16_t)queue_pair_id);
816 return -EINVAL;
817 }
818
819 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
820 * no need of service core as HW supports event forward capability.
821 */
822 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
823 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
824 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
825 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
826 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
827 RTE_FUNC_PTR_OR_ERR_RET(
828 *dev->dev_ops->crypto_adapter_queue_pair_add,
829 -ENOTSUP);
830 if (dev_info->qpairs == NULL) {
831 dev_info->qpairs =
832 rte_zmalloc_socket(adapter->mem_name,
833 dev_info->dev->data->nb_queue_pairs *
834 sizeof(struct crypto_queue_pair_info),
835 0, adapter->socket_id);
836 if (dev_info->qpairs == NULL)
837 return -ENOMEM;
838 }
839
840 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
841 dev_info->dev,
842 queue_pair_id,
843 event);
844 if (ret)
845 return ret;
846
847 else
848 eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
849 queue_pair_id, 1);
850 }
851
852 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
853 * or SW adapter, initiate services so the application can choose
854 * which ever way it wants to use the adapter.
855 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
856 * Application may wants to use one of below two mode
857 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
858 * b. OP_NEW mode -> HW Dequeue
859 * Case 2: No HW caps, use SW adapter
860 * a. OP_FORWARD mode -> SW enqueue & dequeue
861 * b. OP_NEW mode -> SW Dequeue
862 */
863 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
864 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
865 (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
866 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
867 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
868 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
869 rte_spinlock_lock(&adapter->lock);
870 ret = eca_init_service(adapter, id);
871 if (ret == 0)
872 ret = eca_add_queue_pair(adapter, cdev_id,
873 queue_pair_id);
874 rte_spinlock_unlock(&adapter->lock);
875
876 if (ret)
877 return ret;
878
879 rte_service_component_runstate_set(adapter->service_id, 1);
880 }
881
882 rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
883 queue_pair_id);
884 return 0;
885 }
886
887 int
rte_event_crypto_adapter_queue_pair_del(uint8_t id,uint8_t cdev_id,int32_t queue_pair_id)888 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
889 int32_t queue_pair_id)
890 {
891 struct rte_event_crypto_adapter *adapter;
892 struct crypto_device_info *dev_info;
893 struct rte_eventdev *dev;
894 int ret;
895 uint32_t cap;
896 uint16_t i;
897
898 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
899
900 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
901 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
902 return -EINVAL;
903 }
904
905 adapter = eca_id_to_adapter(id);
906 if (adapter == NULL)
907 return -EINVAL;
908
909 dev = &rte_eventdevs[adapter->eventdev_id];
910 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
911 cdev_id,
912 &cap);
913 if (ret)
914 return ret;
915
916 dev_info = &adapter->cdevs[cdev_id];
917
918 if (queue_pair_id != -1 &&
919 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
920 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
921 (uint16_t)queue_pair_id);
922 return -EINVAL;
923 }
924
925 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
926 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
927 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
928 RTE_FUNC_PTR_OR_ERR_RET(
929 *dev->dev_ops->crypto_adapter_queue_pair_del,
930 -ENOTSUP);
931 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
932 dev_info->dev,
933 queue_pair_id);
934 if (ret == 0) {
935 eca_update_qp_info(adapter,
936 &adapter->cdevs[cdev_id],
937 queue_pair_id,
938 0);
939 if (dev_info->num_qpairs == 0) {
940 rte_free(dev_info->qpairs);
941 dev_info->qpairs = NULL;
942 }
943 }
944 } else {
945 if (adapter->nb_qps == 0)
946 return 0;
947
948 rte_spinlock_lock(&adapter->lock);
949 if (queue_pair_id == -1) {
950 for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
951 i++)
952 eca_update_qp_info(adapter, dev_info,
953 queue_pair_id, 0);
954 } else {
955 eca_update_qp_info(adapter, dev_info,
956 (uint16_t)queue_pair_id, 0);
957 }
958
959 if (dev_info->num_qpairs == 0) {
960 rte_free(dev_info->qpairs);
961 dev_info->qpairs = NULL;
962 }
963
964 rte_spinlock_unlock(&adapter->lock);
965 rte_service_component_runstate_set(adapter->service_id,
966 adapter->nb_qps);
967 }
968
969 rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
970 queue_pair_id, ret);
971 return ret;
972 }
973
974 static int
eca_adapter_ctrl(uint8_t id,int start)975 eca_adapter_ctrl(uint8_t id, int start)
976 {
977 struct rte_event_crypto_adapter *adapter;
978 struct crypto_device_info *dev_info;
979 struct rte_eventdev *dev;
980 uint32_t i;
981 int use_service;
982 int stop = !start;
983
984 use_service = 0;
985 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
986 adapter = eca_id_to_adapter(id);
987 if (adapter == NULL)
988 return -EINVAL;
989
990 dev = &rte_eventdevs[adapter->eventdev_id];
991
992 for (i = 0; i < rte_cryptodev_count(); i++) {
993 dev_info = &adapter->cdevs[i];
994 /* if start check for num queue pairs */
995 if (start && !dev_info->num_qpairs)
996 continue;
997 /* if stop check if dev has been started */
998 if (stop && !dev_info->dev_started)
999 continue;
1000 use_service |= !dev_info->internal_event_port;
1001 dev_info->dev_started = start;
1002 if (dev_info->internal_event_port == 0)
1003 continue;
1004 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1005 &dev_info->dev[i]) :
1006 (*dev->dev_ops->crypto_adapter_stop)(dev,
1007 &dev_info->dev[i]);
1008 }
1009
1010 if (use_service)
1011 rte_service_runstate_set(adapter->service_id, start);
1012
1013 return 0;
1014 }
1015
1016 int
rte_event_crypto_adapter_start(uint8_t id)1017 rte_event_crypto_adapter_start(uint8_t id)
1018 {
1019 struct rte_event_crypto_adapter *adapter;
1020
1021 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1022 adapter = eca_id_to_adapter(id);
1023 if (adapter == NULL)
1024 return -EINVAL;
1025
1026 rte_eventdev_trace_crypto_adapter_start(id, adapter);
1027 return eca_adapter_ctrl(id, 1);
1028 }
1029
1030 int
rte_event_crypto_adapter_stop(uint8_t id)1031 rte_event_crypto_adapter_stop(uint8_t id)
1032 {
1033 rte_eventdev_trace_crypto_adapter_stop(id);
1034 return eca_adapter_ctrl(id, 0);
1035 }
1036
1037 int
rte_event_crypto_adapter_stats_get(uint8_t id,struct rte_event_crypto_adapter_stats * stats)1038 rte_event_crypto_adapter_stats_get(uint8_t id,
1039 struct rte_event_crypto_adapter_stats *stats)
1040 {
1041 struct rte_event_crypto_adapter *adapter;
1042 struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1043 struct rte_event_crypto_adapter_stats dev_stats;
1044 struct rte_eventdev *dev;
1045 struct crypto_device_info *dev_info;
1046 uint32_t i;
1047 int ret;
1048
1049 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1050
1051 adapter = eca_id_to_adapter(id);
1052 if (adapter == NULL || stats == NULL)
1053 return -EINVAL;
1054
1055 dev = &rte_eventdevs[adapter->eventdev_id];
1056 memset(stats, 0, sizeof(*stats));
1057 for (i = 0; i < rte_cryptodev_count(); i++) {
1058 dev_info = &adapter->cdevs[i];
1059 if (dev_info->internal_event_port == 0 ||
1060 dev->dev_ops->crypto_adapter_stats_get == NULL)
1061 continue;
1062 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1063 dev_info->dev,
1064 &dev_stats);
1065 if (ret)
1066 continue;
1067
1068 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1069 dev_stats_sum.event_enq_count +=
1070 dev_stats.event_enq_count;
1071 }
1072
1073 if (adapter->service_inited)
1074 *stats = adapter->crypto_stats;
1075
1076 stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1077 stats->event_enq_count += dev_stats_sum.event_enq_count;
1078
1079 return 0;
1080 }
1081
1082 int
rte_event_crypto_adapter_stats_reset(uint8_t id)1083 rte_event_crypto_adapter_stats_reset(uint8_t id)
1084 {
1085 struct rte_event_crypto_adapter *adapter;
1086 struct crypto_device_info *dev_info;
1087 struct rte_eventdev *dev;
1088 uint32_t i;
1089
1090 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1091
1092 adapter = eca_id_to_adapter(id);
1093 if (adapter == NULL)
1094 return -EINVAL;
1095
1096 dev = &rte_eventdevs[adapter->eventdev_id];
1097 for (i = 0; i < rte_cryptodev_count(); i++) {
1098 dev_info = &adapter->cdevs[i];
1099 if (dev_info->internal_event_port == 0 ||
1100 dev->dev_ops->crypto_adapter_stats_reset == NULL)
1101 continue;
1102 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1103 dev_info->dev);
1104 }
1105
1106 memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1107 return 0;
1108 }
1109
1110 int
rte_event_crypto_adapter_service_id_get(uint8_t id,uint32_t * service_id)1111 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1112 {
1113 struct rte_event_crypto_adapter *adapter;
1114
1115 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1116
1117 adapter = eca_id_to_adapter(id);
1118 if (adapter == NULL || service_id == NULL)
1119 return -EINVAL;
1120
1121 if (adapter->service_inited)
1122 *service_id = adapter->service_id;
1123
1124 return adapter->service_inited ? 0 : -ESRCH;
1125 }
1126
1127 int
rte_event_crypto_adapter_event_port_get(uint8_t id,uint8_t * event_port_id)1128 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1129 {
1130 struct rte_event_crypto_adapter *adapter;
1131
1132 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1133
1134 adapter = eca_id_to_adapter(id);
1135 if (adapter == NULL || event_port_id == NULL)
1136 return -EINVAL;
1137
1138 *event_port_id = adapter->event_port_id;
1139
1140 return 0;
1141 }
1142