1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
3 * All rights reserved.
4 */
5
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <rte_dev.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27
28 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE)
29 #define CRYPTO_ADAPTER_BUFFER_SZ 1024
30
31 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
32 * iterations of eca_crypto_adapter_enq_run()
33 */
34 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
35
36 struct crypto_ops_circular_buffer {
37 /* index of head element in circular buffer */
38 uint16_t head;
39 /* index of tail element in circular buffer */
40 uint16_t tail;
41 /* number of elements in buffer */
42 uint16_t count;
43 /* size of circular buffer */
44 uint16_t size;
45 /* Pointer to hold rte_crypto_ops for batching */
46 struct rte_crypto_op **op_buffer;
47 } __rte_cache_aligned;
48
49 struct event_crypto_adapter {
50 /* Event device identifier */
51 uint8_t eventdev_id;
52 /* Event port identifier */
53 uint8_t event_port_id;
54 /* Store event device's implicit release capability */
55 uint8_t implicit_release_disabled;
56 /* Flag to indicate backpressure at cryptodev
57 * Stop further dequeuing events from eventdev
58 */
59 bool stop_enq_to_cryptodev;
60 /* Max crypto ops processed in any service function invocation */
61 uint32_t max_nb;
62 /* Lock to serialize config updates with service function */
63 rte_spinlock_t lock;
64 /* Next crypto device to be processed */
65 uint16_t next_cdev_id;
66 /* Per crypto device structure */
67 struct crypto_device_info *cdevs;
68 /* Loop counter to flush crypto ops */
69 uint16_t transmit_loop_count;
70 /* Circular buffer for batching crypto ops to eventdev */
71 struct crypto_ops_circular_buffer ebuf;
72 /* Per instance stats structure */
73 struct rte_event_crypto_adapter_stats crypto_stats;
74 /* Configuration callback for rte_service configuration */
75 rte_event_crypto_adapter_conf_cb conf_cb;
76 /* Configuration callback argument */
77 void *conf_arg;
78 /* Set if default_cb is being used */
79 int default_cb_arg;
80 /* Service initialization state */
81 uint8_t service_inited;
82 /* Memory allocation name */
83 char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
84 /* Socket identifier cached from eventdev */
85 int socket_id;
86 /* Per adapter EAL service */
87 uint32_t service_id;
88 /* No. of queue pairs configured */
89 uint16_t nb_qps;
90 /* Adapter mode */
91 enum rte_event_crypto_adapter_mode mode;
92 } __rte_cache_aligned;
93
94 /* Per crypto device information */
95 struct crypto_device_info {
96 /* Pointer to cryptodev */
97 struct rte_cryptodev *dev;
98 /* Pointer to queue pair info */
99 struct crypto_queue_pair_info *qpairs;
100 /* Next queue pair to be processed */
101 uint16_t next_queue_pair_id;
102 /* Set to indicate cryptodev->eventdev packet
103 * transfer uses a hardware mechanism
104 */
105 uint8_t internal_event_port;
106 /* Set to indicate processing has been started */
107 uint8_t dev_started;
108 /* If num_qpairs > 0, the start callback will
109 * be invoked if not already invoked
110 */
111 uint16_t num_qpairs;
112 } __rte_cache_aligned;
113
114 /* Per queue pair information */
115 struct crypto_queue_pair_info {
116 /* Set to indicate queue pair is enabled */
117 bool qp_enabled;
118 /* Circular buffer for batching crypto ops to cdev */
119 struct crypto_ops_circular_buffer cbuf;
120 } __rte_cache_aligned;
121
122 static struct event_crypto_adapter **event_crypto_adapter;
123
124 /* Macros to check for valid adapter */
125 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
126 if (!eca_valid_id(id)) { \
127 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
128 return retval; \
129 } \
130 } while (0)
131
132 static inline int
eca_valid_id(uint8_t id)133 eca_valid_id(uint8_t id)
134 {
135 return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
136 }
137
138 static int
eca_init(void)139 eca_init(void)
140 {
141 const char *name = "crypto_adapter_array";
142 const struct rte_memzone *mz;
143 unsigned int sz;
144
145 sz = sizeof(*event_crypto_adapter) *
146 RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
147 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
148
149 mz = rte_memzone_lookup(name);
150 if (mz == NULL) {
151 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
152 RTE_CACHE_LINE_SIZE);
153 if (mz == NULL) {
154 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
155 PRId32, rte_errno);
156 return -rte_errno;
157 }
158 }
159
160 event_crypto_adapter = mz->addr;
161 return 0;
162 }
163
164 static inline bool
eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer * bufp)165 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
166 {
167 return bufp->count >= BATCH_SIZE;
168 }
169
170 static inline bool
eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer * bufp)171 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
172 {
173 return (bufp->size - bufp->count) >= BATCH_SIZE;
174 }
175
176 static inline void
eca_circular_buffer_free(struct crypto_ops_circular_buffer * bufp)177 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
178 {
179 rte_free(bufp->op_buffer);
180 }
181
182 static inline int
eca_circular_buffer_init(const char * name,struct crypto_ops_circular_buffer * bufp,uint16_t sz)183 eca_circular_buffer_init(const char *name,
184 struct crypto_ops_circular_buffer *bufp,
185 uint16_t sz)
186 {
187 bufp->op_buffer = rte_zmalloc(name,
188 sizeof(struct rte_crypto_op *) * sz,
189 0);
190 if (bufp->op_buffer == NULL)
191 return -ENOMEM;
192
193 bufp->size = sz;
194 return 0;
195 }
196
197 static inline int
eca_circular_buffer_add(struct crypto_ops_circular_buffer * bufp,struct rte_crypto_op * op)198 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
199 struct rte_crypto_op *op)
200 {
201 uint16_t *tailp = &bufp->tail;
202
203 bufp->op_buffer[*tailp] = op;
204 /* circular buffer, go round */
205 *tailp = (*tailp + 1) % bufp->size;
206 bufp->count++;
207
208 return 0;
209 }
210
211 static inline int
eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer * bufp,uint8_t cdev_id,uint16_t qp_id,uint16_t * nb_ops_flushed)212 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
213 uint8_t cdev_id, uint16_t qp_id,
214 uint16_t *nb_ops_flushed)
215 {
216 uint16_t n = 0;
217 uint16_t *headp = &bufp->head;
218 uint16_t *tailp = &bufp->tail;
219 struct rte_crypto_op **ops = bufp->op_buffer;
220
221 if (*tailp > *headp)
222 n = *tailp - *headp;
223 else if (*tailp < *headp)
224 n = bufp->size - *headp;
225 else {
226 *nb_ops_flushed = 0;
227 return 0; /* buffer empty */
228 }
229
230 *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
231 &ops[*headp], n);
232 bufp->count -= *nb_ops_flushed;
233 if (!bufp->count) {
234 *headp = 0;
235 *tailp = 0;
236 } else
237 *headp = (*headp + *nb_ops_flushed) % bufp->size;
238
239 return *nb_ops_flushed == n ? 0 : -1;
240 }
241
242 static inline struct event_crypto_adapter *
eca_id_to_adapter(uint8_t id)243 eca_id_to_adapter(uint8_t id)
244 {
245 return event_crypto_adapter ?
246 event_crypto_adapter[id] : NULL;
247 }
248
249 static int
eca_default_config_cb(uint8_t id,uint8_t dev_id,struct rte_event_crypto_adapter_conf * conf,void * arg)250 eca_default_config_cb(uint8_t id, uint8_t dev_id,
251 struct rte_event_crypto_adapter_conf *conf, void *arg)
252 {
253 struct rte_event_dev_config dev_conf;
254 struct rte_eventdev *dev;
255 uint8_t port_id;
256 int started;
257 int ret;
258 struct rte_event_port_conf *port_conf = arg;
259 struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
260
261 if (adapter == NULL)
262 return -EINVAL;
263
264 dev = &rte_eventdevs[adapter->eventdev_id];
265 dev_conf = dev->data->dev_conf;
266
267 started = dev->data->dev_started;
268 if (started)
269 rte_event_dev_stop(dev_id);
270 port_id = dev_conf.nb_event_ports;
271 dev_conf.nb_event_ports += 1;
272 ret = rte_event_dev_configure(dev_id, &dev_conf);
273 if (ret) {
274 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
275 if (started) {
276 if (rte_event_dev_start(dev_id))
277 return -EIO;
278 }
279 return ret;
280 }
281
282 ret = rte_event_port_setup(dev_id, port_id, port_conf);
283 if (ret) {
284 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
285 return ret;
286 }
287
288 conf->event_port_id = port_id;
289 conf->max_nb = DEFAULT_MAX_NB;
290 if (started)
291 ret = rte_event_dev_start(dev_id);
292
293 adapter->default_cb_arg = 1;
294 return ret;
295 }
296
297 int
rte_event_crypto_adapter_create_ext(uint8_t id,uint8_t dev_id,rte_event_crypto_adapter_conf_cb conf_cb,enum rte_event_crypto_adapter_mode mode,void * conf_arg)298 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
299 rte_event_crypto_adapter_conf_cb conf_cb,
300 enum rte_event_crypto_adapter_mode mode,
301 void *conf_arg)
302 {
303 struct event_crypto_adapter *adapter;
304 char mem_name[CRYPTO_ADAPTER_NAME_LEN];
305 struct rte_event_dev_info dev_info;
306 int socket_id;
307 uint8_t i;
308 int ret;
309
310 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
311 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
312 if (conf_cb == NULL)
313 return -EINVAL;
314
315 if (event_crypto_adapter == NULL) {
316 ret = eca_init();
317 if (ret)
318 return ret;
319 }
320
321 adapter = eca_id_to_adapter(id);
322 if (adapter != NULL) {
323 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
324 return -EEXIST;
325 }
326
327 socket_id = rte_event_dev_socket_id(dev_id);
328 snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
329 "rte_event_crypto_adapter_%d", id);
330
331 adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
332 RTE_CACHE_LINE_SIZE, socket_id);
333 if (adapter == NULL) {
334 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
335 return -ENOMEM;
336 }
337
338 if (eca_circular_buffer_init("eca_edev_circular_buffer",
339 &adapter->ebuf,
340 CRYPTO_ADAPTER_BUFFER_SZ)) {
341 RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
342 rte_free(adapter);
343 return -ENOMEM;
344 }
345
346 ret = rte_event_dev_info_get(dev_id, &dev_info);
347 if (ret < 0) {
348 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
349 dev_id, dev_info.driver_name);
350 eca_circular_buffer_free(&adapter->ebuf);
351 rte_free(adapter);
352 return ret;
353 }
354
355 adapter->implicit_release_disabled = (dev_info.event_dev_cap &
356 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
357 adapter->eventdev_id = dev_id;
358 adapter->socket_id = socket_id;
359 adapter->conf_cb = conf_cb;
360 adapter->conf_arg = conf_arg;
361 adapter->mode = mode;
362 strcpy(adapter->mem_name, mem_name);
363 adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
364 rte_cryptodev_count() *
365 sizeof(struct crypto_device_info), 0,
366 socket_id);
367 if (adapter->cdevs == NULL) {
368 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
369 eca_circular_buffer_free(&adapter->ebuf);
370 rte_free(adapter);
371 return -ENOMEM;
372 }
373
374 rte_spinlock_init(&adapter->lock);
375 for (i = 0; i < rte_cryptodev_count(); i++)
376 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
377
378 event_crypto_adapter[id] = adapter;
379
380 rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
381 mode);
382 return 0;
383 }
384
385
386 int
rte_event_crypto_adapter_create(uint8_t id,uint8_t dev_id,struct rte_event_port_conf * port_config,enum rte_event_crypto_adapter_mode mode)387 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
388 struct rte_event_port_conf *port_config,
389 enum rte_event_crypto_adapter_mode mode)
390 {
391 struct rte_event_port_conf *pc;
392 int ret;
393
394 if (port_config == NULL)
395 return -EINVAL;
396 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
397
398 pc = rte_malloc(NULL, sizeof(*pc), 0);
399 if (pc == NULL)
400 return -ENOMEM;
401 *pc = *port_config;
402 ret = rte_event_crypto_adapter_create_ext(id, dev_id,
403 eca_default_config_cb,
404 mode,
405 pc);
406 if (ret)
407 rte_free(pc);
408
409 return ret;
410 }
411
412 int
rte_event_crypto_adapter_free(uint8_t id)413 rte_event_crypto_adapter_free(uint8_t id)
414 {
415 struct event_crypto_adapter *adapter;
416
417 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
418
419 adapter = eca_id_to_adapter(id);
420 if (adapter == NULL)
421 return -EINVAL;
422
423 if (adapter->nb_qps) {
424 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
425 adapter->nb_qps);
426 return -EBUSY;
427 }
428
429 rte_eventdev_trace_crypto_adapter_free(id, adapter);
430 if (adapter->default_cb_arg)
431 rte_free(adapter->conf_arg);
432 rte_free(adapter->cdevs);
433 rte_free(adapter);
434 event_crypto_adapter[id] = NULL;
435
436 return 0;
437 }
438
439 static inline unsigned int
eca_enq_to_cryptodev(struct event_crypto_adapter * adapter,struct rte_event * ev,unsigned int cnt)440 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
441 unsigned int cnt)
442 {
443 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
444 union rte_event_crypto_metadata *m_data = NULL;
445 struct crypto_queue_pair_info *qp_info = NULL;
446 struct rte_crypto_op *crypto_op;
447 unsigned int i, n;
448 uint16_t qp_id, nb_enqueued = 0;
449 uint8_t cdev_id;
450 int ret;
451
452 ret = 0;
453 n = 0;
454 stats->event_deq_count += cnt;
455
456 for (i = 0; i < cnt; i++) {
457 crypto_op = ev[i].event_ptr;
458 if (crypto_op == NULL)
459 continue;
460 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
461 m_data = rte_cryptodev_sym_session_get_user_data(
462 crypto_op->sym->session);
463 if (m_data == NULL) {
464 rte_pktmbuf_free(crypto_op->sym->m_src);
465 rte_crypto_op_free(crypto_op);
466 continue;
467 }
468
469 cdev_id = m_data->request_info.cdev_id;
470 qp_id = m_data->request_info.queue_pair_id;
471 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
472 if (!qp_info->qp_enabled) {
473 rte_pktmbuf_free(crypto_op->sym->m_src);
474 rte_crypto_op_free(crypto_op);
475 continue;
476 }
477 eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
478 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
479 crypto_op->private_data_offset) {
480 m_data = (union rte_event_crypto_metadata *)
481 ((uint8_t *)crypto_op +
482 crypto_op->private_data_offset);
483 cdev_id = m_data->request_info.cdev_id;
484 qp_id = m_data->request_info.queue_pair_id;
485 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
486 if (!qp_info->qp_enabled) {
487 rte_pktmbuf_free(crypto_op->sym->m_src);
488 rte_crypto_op_free(crypto_op);
489 continue;
490 }
491 eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
492 } else {
493 rte_pktmbuf_free(crypto_op->sym->m_src);
494 rte_crypto_op_free(crypto_op);
495 continue;
496 }
497
498 if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
499 ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
500 cdev_id,
501 qp_id,
502 &nb_enqueued);
503 /**
504 * If some crypto ops failed to flush to cdev and
505 * space for another batch is not available, stop
506 * dequeue from eventdev momentarily
507 */
508 if (unlikely(ret < 0 &&
509 !eca_circular_buffer_space_for_batch(
510 &qp_info->cbuf)))
511 adapter->stop_enq_to_cryptodev = true;
512 }
513
514 stats->crypto_enq_count += nb_enqueued;
515 n += nb_enqueued;
516 }
517
518 return n;
519 }
520
521 static unsigned int
eca_crypto_cdev_flush(struct event_crypto_adapter * adapter,uint8_t cdev_id,uint16_t * nb_ops_flushed)522 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
523 uint8_t cdev_id, uint16_t *nb_ops_flushed)
524 {
525 struct crypto_device_info *curr_dev;
526 struct crypto_queue_pair_info *curr_queue;
527 struct rte_cryptodev *dev;
528 uint16_t nb = 0, nb_enqueued = 0;
529 uint16_t qp;
530
531 curr_dev = &adapter->cdevs[cdev_id];
532 dev = rte_cryptodev_pmd_get_dev(cdev_id);
533
534 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
535
536 curr_queue = &curr_dev->qpairs[qp];
537 if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
538 continue;
539
540 eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
541 cdev_id,
542 qp,
543 &nb_enqueued);
544 *nb_ops_flushed += curr_queue->cbuf.count;
545 nb += nb_enqueued;
546 }
547
548 return nb;
549 }
550
551 static unsigned int
eca_crypto_enq_flush(struct event_crypto_adapter * adapter)552 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
553 {
554 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
555 uint8_t cdev_id;
556 uint16_t nb_enqueued = 0;
557 uint16_t nb_ops_flushed = 0;
558 uint16_t num_cdev = rte_cryptodev_count();
559
560 for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
561 nb_enqueued += eca_crypto_cdev_flush(adapter,
562 cdev_id,
563 &nb_ops_flushed);
564 /**
565 * Enable dequeue from eventdev if all ops from circular
566 * buffer flushed to cdev
567 */
568 if (!nb_ops_flushed)
569 adapter->stop_enq_to_cryptodev = false;
570
571 stats->crypto_enq_count += nb_enqueued;
572
573 return nb_enqueued;
574 }
575
576 static int
eca_crypto_adapter_enq_run(struct event_crypto_adapter * adapter,unsigned int max_enq)577 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
578 unsigned int max_enq)
579 {
580 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
581 struct rte_event ev[BATCH_SIZE];
582 unsigned int nb_enq, nb_enqueued;
583 uint16_t n;
584 uint8_t event_dev_id = adapter->eventdev_id;
585 uint8_t event_port_id = adapter->event_port_id;
586
587 nb_enqueued = 0;
588 if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
589 return 0;
590
591 if (unlikely(adapter->stop_enq_to_cryptodev)) {
592 nb_enqueued += eca_crypto_enq_flush(adapter);
593
594 if (unlikely(adapter->stop_enq_to_cryptodev))
595 goto skip_event_dequeue_burst;
596 }
597
598 for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
599 stats->event_poll_count++;
600 n = rte_event_dequeue_burst(event_dev_id,
601 event_port_id, ev, BATCH_SIZE, 0);
602
603 if (!n)
604 break;
605
606 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
607 }
608
609 skip_event_dequeue_burst:
610
611 if ((++adapter->transmit_loop_count &
612 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
613 nb_enqueued += eca_crypto_enq_flush(adapter);
614 }
615
616 return nb_enqueued;
617 }
618
619 static inline uint16_t
eca_ops_enqueue_burst(struct event_crypto_adapter * adapter,struct rte_crypto_op ** ops,uint16_t num)620 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
621 struct rte_crypto_op **ops, uint16_t num)
622 {
623 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
624 union rte_event_crypto_metadata *m_data = NULL;
625 uint8_t event_dev_id = adapter->eventdev_id;
626 uint8_t event_port_id = adapter->event_port_id;
627 struct rte_event events[BATCH_SIZE];
628 uint16_t nb_enqueued, nb_ev;
629 uint8_t retry;
630 uint8_t i;
631
632 nb_ev = 0;
633 retry = 0;
634 nb_enqueued = 0;
635 num = RTE_MIN(num, BATCH_SIZE);
636 for (i = 0; i < num; i++) {
637 struct rte_event *ev = &events[nb_ev++];
638
639 m_data = NULL;
640 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
641 m_data = rte_cryptodev_sym_session_get_user_data(
642 ops[i]->sym->session);
643 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
644 ops[i]->private_data_offset) {
645 m_data = (union rte_event_crypto_metadata *)
646 ((uint8_t *)ops[i] +
647 ops[i]->private_data_offset);
648 }
649
650 if (unlikely(m_data == NULL)) {
651 rte_pktmbuf_free(ops[i]->sym->m_src);
652 rte_crypto_op_free(ops[i]);
653 continue;
654 }
655
656 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
657 ev->event_ptr = ops[i];
658 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
659 if (adapter->implicit_release_disabled)
660 ev->op = RTE_EVENT_OP_FORWARD;
661 else
662 ev->op = RTE_EVENT_OP_NEW;
663 }
664
665 do {
666 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
667 event_port_id,
668 &events[nb_enqueued],
669 nb_ev - nb_enqueued);
670
671 } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
672 nb_enqueued < nb_ev);
673
674 stats->event_enq_fail_count += nb_ev - nb_enqueued;
675 stats->event_enq_count += nb_enqueued;
676 stats->event_enq_retry_count += retry - 1;
677
678 return nb_enqueued;
679 }
680
681 static int
eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter * adapter,struct crypto_ops_circular_buffer * bufp)682 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
683 struct crypto_ops_circular_buffer *bufp)
684 {
685 uint16_t n = 0, nb_ops_flushed;
686 uint16_t *headp = &bufp->head;
687 uint16_t *tailp = &bufp->tail;
688 struct rte_crypto_op **ops = bufp->op_buffer;
689
690 if (*tailp > *headp)
691 n = *tailp - *headp;
692 else if (*tailp < *headp)
693 n = bufp->size - *headp;
694 else
695 return 0; /* buffer empty */
696
697 nb_ops_flushed = eca_ops_enqueue_burst(adapter, ops, n);
698 bufp->count -= nb_ops_flushed;
699 if (!bufp->count) {
700 *headp = 0;
701 *tailp = 0;
702 return 0; /* buffer empty */
703 }
704
705 *headp = (*headp + nb_ops_flushed) % bufp->size;
706 return 1;
707 }
708
709
710 static void
eca_ops_buffer_flush(struct event_crypto_adapter * adapter)711 eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
712 {
713 if (likely(adapter->ebuf.count == 0))
714 return;
715
716 while (eca_circular_buffer_flush_to_evdev(adapter,
717 &adapter->ebuf))
718 ;
719 }
720 static inline unsigned int
eca_crypto_adapter_deq_run(struct event_crypto_adapter * adapter,unsigned int max_deq)721 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
722 unsigned int max_deq)
723 {
724 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
725 struct crypto_device_info *curr_dev;
726 struct crypto_queue_pair_info *curr_queue;
727 struct rte_crypto_op *ops[BATCH_SIZE];
728 uint16_t n, nb_deq, nb_enqueued, i;
729 struct rte_cryptodev *dev;
730 uint8_t cdev_id;
731 uint16_t qp, dev_qps;
732 bool done;
733 uint16_t num_cdev = rte_cryptodev_count();
734
735 nb_deq = 0;
736 eca_ops_buffer_flush(adapter);
737
738 do {
739 done = true;
740
741 for (cdev_id = adapter->next_cdev_id;
742 cdev_id < num_cdev; cdev_id++) {
743 uint16_t queues = 0;
744
745 curr_dev = &adapter->cdevs[cdev_id];
746 dev = curr_dev->dev;
747 if (unlikely(dev == NULL))
748 continue;
749
750 dev_qps = dev->data->nb_queue_pairs;
751
752 for (qp = curr_dev->next_queue_pair_id;
753 queues < dev_qps; qp = (qp + 1) % dev_qps,
754 queues++) {
755
756 curr_queue = &curr_dev->qpairs[qp];
757 if (unlikely(curr_queue == NULL ||
758 !curr_queue->qp_enabled))
759 continue;
760
761 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
762 ops, BATCH_SIZE);
763 if (!n)
764 continue;
765
766 done = false;
767 nb_enqueued = 0;
768
769 stats->crypto_deq_count += n;
770
771 if (unlikely(!adapter->ebuf.count))
772 nb_enqueued = eca_ops_enqueue_burst(
773 adapter, ops, n);
774
775 if (likely(nb_enqueued == n))
776 goto check;
777
778 /* Failed to enqueue events case */
779 for (i = nb_enqueued; i < n; i++)
780 eca_circular_buffer_add(
781 &adapter->ebuf,
782 ops[nb_enqueued]);
783
784 check:
785 nb_deq += n;
786
787 if (nb_deq >= max_deq) {
788 if ((qp + 1) == dev_qps) {
789 adapter->next_cdev_id =
790 (cdev_id + 1)
791 % num_cdev;
792 }
793 curr_dev->next_queue_pair_id = (qp + 1)
794 % dev->data->nb_queue_pairs;
795
796 return nb_deq;
797 }
798 }
799 }
800 adapter->next_cdev_id = 0;
801 } while (done == false);
802 return nb_deq;
803 }
804
805 static void
eca_crypto_adapter_run(struct event_crypto_adapter * adapter,unsigned int max_ops)806 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
807 unsigned int max_ops)
808 {
809 unsigned int ops_left = max_ops;
810
811 while (ops_left > 0) {
812 unsigned int e_cnt, d_cnt;
813
814 e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
815 ops_left -= RTE_MIN(ops_left, e_cnt);
816
817 d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
818 ops_left -= RTE_MIN(ops_left, d_cnt);
819
820 if (e_cnt == 0 && d_cnt == 0)
821 break;
822
823 }
824
825 if (ops_left == max_ops)
826 rte_event_maintain(adapter->eventdev_id,
827 adapter->event_port_id, 0);
828 }
829
830 static int
eca_service_func(void * args)831 eca_service_func(void *args)
832 {
833 struct event_crypto_adapter *adapter = args;
834
835 if (rte_spinlock_trylock(&adapter->lock) == 0)
836 return 0;
837 eca_crypto_adapter_run(adapter, adapter->max_nb);
838 rte_spinlock_unlock(&adapter->lock);
839
840 return 0;
841 }
842
843 static int
eca_init_service(struct event_crypto_adapter * adapter,uint8_t id)844 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
845 {
846 struct rte_event_crypto_adapter_conf adapter_conf;
847 struct rte_service_spec service;
848 int ret;
849
850 if (adapter->service_inited)
851 return 0;
852
853 memset(&service, 0, sizeof(service));
854 snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
855 "rte_event_crypto_adapter_%d", id);
856 service.socket_id = adapter->socket_id;
857 service.callback = eca_service_func;
858 service.callback_userdata = adapter;
859 /* Service function handles locking for queue add/del updates */
860 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
861 ret = rte_service_component_register(&service, &adapter->service_id);
862 if (ret) {
863 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
864 service.name, ret);
865 return ret;
866 }
867
868 ret = adapter->conf_cb(id, adapter->eventdev_id,
869 &adapter_conf, adapter->conf_arg);
870 if (ret) {
871 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
872 ret);
873 return ret;
874 }
875
876 adapter->max_nb = adapter_conf.max_nb;
877 adapter->event_port_id = adapter_conf.event_port_id;
878 adapter->service_inited = 1;
879
880 return ret;
881 }
882
883 static void
eca_update_qp_info(struct event_crypto_adapter * adapter,struct crypto_device_info * dev_info,int32_t queue_pair_id,uint8_t add)884 eca_update_qp_info(struct event_crypto_adapter *adapter,
885 struct crypto_device_info *dev_info, int32_t queue_pair_id,
886 uint8_t add)
887 {
888 struct crypto_queue_pair_info *qp_info;
889 int enabled;
890 uint16_t i;
891
892 if (dev_info->qpairs == NULL)
893 return;
894
895 if (queue_pair_id == -1) {
896 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
897 eca_update_qp_info(adapter, dev_info, i, add);
898 } else {
899 qp_info = &dev_info->qpairs[queue_pair_id];
900 enabled = qp_info->qp_enabled;
901 if (add) {
902 adapter->nb_qps += !enabled;
903 dev_info->num_qpairs += !enabled;
904 } else {
905 adapter->nb_qps -= enabled;
906 dev_info->num_qpairs -= enabled;
907 }
908 qp_info->qp_enabled = !!add;
909 }
910 }
911
912 static int
eca_add_queue_pair(struct event_crypto_adapter * adapter,uint8_t cdev_id,int queue_pair_id)913 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
914 int queue_pair_id)
915 {
916 struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
917 struct crypto_queue_pair_info *qpairs;
918 uint32_t i;
919
920 if (dev_info->qpairs == NULL) {
921 dev_info->qpairs =
922 rte_zmalloc_socket(adapter->mem_name,
923 dev_info->dev->data->nb_queue_pairs *
924 sizeof(struct crypto_queue_pair_info),
925 0, adapter->socket_id);
926 if (dev_info->qpairs == NULL)
927 return -ENOMEM;
928
929 qpairs = dev_info->qpairs;
930
931 if (eca_circular_buffer_init("eca_cdev_circular_buffer",
932 &qpairs->cbuf,
933 CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
934 RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
935 "buffer");
936 rte_free(qpairs);
937 return -ENOMEM;
938 }
939 }
940
941 if (queue_pair_id == -1) {
942 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
943 eca_update_qp_info(adapter, dev_info, i, 1);
944 } else
945 eca_update_qp_info(adapter, dev_info,
946 (uint16_t)queue_pair_id, 1);
947
948 return 0;
949 }
950
951 int
rte_event_crypto_adapter_queue_pair_add(uint8_t id,uint8_t cdev_id,int32_t queue_pair_id,const struct rte_event * event)952 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
953 uint8_t cdev_id,
954 int32_t queue_pair_id,
955 const struct rte_event *event)
956 {
957 struct event_crypto_adapter *adapter;
958 struct rte_eventdev *dev;
959 struct crypto_device_info *dev_info;
960 uint32_t cap;
961 int ret;
962
963 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
964
965 if (!rte_cryptodev_is_valid_dev(cdev_id)) {
966 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
967 return -EINVAL;
968 }
969
970 adapter = eca_id_to_adapter(id);
971 if (adapter == NULL)
972 return -EINVAL;
973
974 dev = &rte_eventdevs[adapter->eventdev_id];
975 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
976 cdev_id,
977 &cap);
978 if (ret) {
979 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
980 " cdev %" PRIu8, id, cdev_id);
981 return ret;
982 }
983
984 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
985 (event == NULL)) {
986 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
987 cdev_id);
988 return -EINVAL;
989 }
990
991 dev_info = &adapter->cdevs[cdev_id];
992
993 if (queue_pair_id != -1 &&
994 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
995 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
996 (uint16_t)queue_pair_id);
997 return -EINVAL;
998 }
999
1000 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
1001 * no need of service core as HW supports event forward capability.
1002 */
1003 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1004 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
1005 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
1006 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1007 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1008 RTE_FUNC_PTR_OR_ERR_RET(
1009 *dev->dev_ops->crypto_adapter_queue_pair_add,
1010 -ENOTSUP);
1011 if (dev_info->qpairs == NULL) {
1012 dev_info->qpairs =
1013 rte_zmalloc_socket(adapter->mem_name,
1014 dev_info->dev->data->nb_queue_pairs *
1015 sizeof(struct crypto_queue_pair_info),
1016 0, adapter->socket_id);
1017 if (dev_info->qpairs == NULL)
1018 return -ENOMEM;
1019 }
1020
1021 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
1022 dev_info->dev,
1023 queue_pair_id,
1024 event);
1025 if (ret)
1026 return ret;
1027
1028 else
1029 eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
1030 queue_pair_id, 1);
1031 }
1032
1033 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
1034 * or SW adapter, initiate services so the application can choose
1035 * which ever way it wants to use the adapter.
1036 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
1037 * Application may wants to use one of below two mode
1038 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1039 * b. OP_NEW mode -> HW Dequeue
1040 * Case 2: No HW caps, use SW adapter
1041 * a. OP_FORWARD mode -> SW enqueue & dequeue
1042 * b. OP_NEW mode -> SW Dequeue
1043 */
1044 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1045 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1046 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
1047 (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1048 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1049 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
1050 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
1051 rte_spinlock_lock(&adapter->lock);
1052 ret = eca_init_service(adapter, id);
1053 if (ret == 0)
1054 ret = eca_add_queue_pair(adapter, cdev_id,
1055 queue_pair_id);
1056 rte_spinlock_unlock(&adapter->lock);
1057
1058 if (ret)
1059 return ret;
1060
1061 rte_service_component_runstate_set(adapter->service_id, 1);
1062 }
1063
1064 rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
1065 queue_pair_id);
1066 return 0;
1067 }
1068
1069 int
rte_event_crypto_adapter_queue_pair_del(uint8_t id,uint8_t cdev_id,int32_t queue_pair_id)1070 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
1071 int32_t queue_pair_id)
1072 {
1073 struct event_crypto_adapter *adapter;
1074 struct crypto_device_info *dev_info;
1075 struct rte_eventdev *dev;
1076 int ret;
1077 uint32_t cap;
1078 uint16_t i;
1079
1080 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1081
1082 if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1083 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1084 return -EINVAL;
1085 }
1086
1087 adapter = eca_id_to_adapter(id);
1088 if (adapter == NULL)
1089 return -EINVAL;
1090
1091 dev = &rte_eventdevs[adapter->eventdev_id];
1092 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1093 cdev_id,
1094 &cap);
1095 if (ret)
1096 return ret;
1097
1098 dev_info = &adapter->cdevs[cdev_id];
1099
1100 if (queue_pair_id != -1 &&
1101 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1102 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1103 (uint16_t)queue_pair_id);
1104 return -EINVAL;
1105 }
1106
1107 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1108 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1109 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1110 RTE_FUNC_PTR_OR_ERR_RET(
1111 *dev->dev_ops->crypto_adapter_queue_pair_del,
1112 -ENOTSUP);
1113 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
1114 dev_info->dev,
1115 queue_pair_id);
1116 if (ret == 0) {
1117 eca_update_qp_info(adapter,
1118 &adapter->cdevs[cdev_id],
1119 queue_pair_id,
1120 0);
1121 if (dev_info->num_qpairs == 0) {
1122 rte_free(dev_info->qpairs);
1123 dev_info->qpairs = NULL;
1124 }
1125 }
1126 } else {
1127 if (adapter->nb_qps == 0)
1128 return 0;
1129
1130 rte_spinlock_lock(&adapter->lock);
1131 if (queue_pair_id == -1) {
1132 for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
1133 i++)
1134 eca_update_qp_info(adapter, dev_info,
1135 queue_pair_id, 0);
1136 } else {
1137 eca_update_qp_info(adapter, dev_info,
1138 (uint16_t)queue_pair_id, 0);
1139 }
1140
1141 if (dev_info->num_qpairs == 0) {
1142 rte_free(dev_info->qpairs);
1143 dev_info->qpairs = NULL;
1144 }
1145
1146 rte_spinlock_unlock(&adapter->lock);
1147 rte_service_component_runstate_set(adapter->service_id,
1148 adapter->nb_qps);
1149 }
1150
1151 rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
1152 queue_pair_id, ret);
1153 return ret;
1154 }
1155
1156 static int
eca_adapter_ctrl(uint8_t id,int start)1157 eca_adapter_ctrl(uint8_t id, int start)
1158 {
1159 struct event_crypto_adapter *adapter;
1160 struct crypto_device_info *dev_info;
1161 struct rte_eventdev *dev;
1162 uint32_t i;
1163 int use_service;
1164 int stop = !start;
1165
1166 use_service = 0;
1167 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1168 adapter = eca_id_to_adapter(id);
1169 if (adapter == NULL)
1170 return -EINVAL;
1171
1172 dev = &rte_eventdevs[adapter->eventdev_id];
1173
1174 for (i = 0; i < rte_cryptodev_count(); i++) {
1175 dev_info = &adapter->cdevs[i];
1176 /* if start check for num queue pairs */
1177 if (start && !dev_info->num_qpairs)
1178 continue;
1179 /* if stop check if dev has been started */
1180 if (stop && !dev_info->dev_started)
1181 continue;
1182 use_service |= !dev_info->internal_event_port;
1183 dev_info->dev_started = start;
1184 if (dev_info->internal_event_port == 0)
1185 continue;
1186 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1187 &dev_info->dev[i]) :
1188 (*dev->dev_ops->crypto_adapter_stop)(dev,
1189 &dev_info->dev[i]);
1190 }
1191
1192 if (use_service)
1193 rte_service_runstate_set(adapter->service_id, start);
1194
1195 return 0;
1196 }
1197
1198 int
rte_event_crypto_adapter_start(uint8_t id)1199 rte_event_crypto_adapter_start(uint8_t id)
1200 {
1201 struct event_crypto_adapter *adapter;
1202
1203 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1204 adapter = eca_id_to_adapter(id);
1205 if (adapter == NULL)
1206 return -EINVAL;
1207
1208 rte_eventdev_trace_crypto_adapter_start(id, adapter);
1209 return eca_adapter_ctrl(id, 1);
1210 }
1211
1212 int
rte_event_crypto_adapter_stop(uint8_t id)1213 rte_event_crypto_adapter_stop(uint8_t id)
1214 {
1215 rte_eventdev_trace_crypto_adapter_stop(id);
1216 return eca_adapter_ctrl(id, 0);
1217 }
1218
1219 int
rte_event_crypto_adapter_stats_get(uint8_t id,struct rte_event_crypto_adapter_stats * stats)1220 rte_event_crypto_adapter_stats_get(uint8_t id,
1221 struct rte_event_crypto_adapter_stats *stats)
1222 {
1223 struct event_crypto_adapter *adapter;
1224 struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1225 struct rte_event_crypto_adapter_stats dev_stats;
1226 struct rte_eventdev *dev;
1227 struct crypto_device_info *dev_info;
1228 uint32_t i;
1229 int ret;
1230
1231 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1232
1233 adapter = eca_id_to_adapter(id);
1234 if (adapter == NULL || stats == NULL)
1235 return -EINVAL;
1236
1237 dev = &rte_eventdevs[adapter->eventdev_id];
1238 memset(stats, 0, sizeof(*stats));
1239 for (i = 0; i < rte_cryptodev_count(); i++) {
1240 dev_info = &adapter->cdevs[i];
1241 if (dev_info->internal_event_port == 0 ||
1242 dev->dev_ops->crypto_adapter_stats_get == NULL)
1243 continue;
1244 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1245 dev_info->dev,
1246 &dev_stats);
1247 if (ret)
1248 continue;
1249
1250 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1251 dev_stats_sum.event_enq_count +=
1252 dev_stats.event_enq_count;
1253 }
1254
1255 if (adapter->service_inited)
1256 *stats = adapter->crypto_stats;
1257
1258 stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1259 stats->event_enq_count += dev_stats_sum.event_enq_count;
1260
1261 return 0;
1262 }
1263
1264 int
rte_event_crypto_adapter_stats_reset(uint8_t id)1265 rte_event_crypto_adapter_stats_reset(uint8_t id)
1266 {
1267 struct event_crypto_adapter *adapter;
1268 struct crypto_device_info *dev_info;
1269 struct rte_eventdev *dev;
1270 uint32_t i;
1271
1272 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1273
1274 adapter = eca_id_to_adapter(id);
1275 if (adapter == NULL)
1276 return -EINVAL;
1277
1278 dev = &rte_eventdevs[adapter->eventdev_id];
1279 for (i = 0; i < rte_cryptodev_count(); i++) {
1280 dev_info = &adapter->cdevs[i];
1281 if (dev_info->internal_event_port == 0 ||
1282 dev->dev_ops->crypto_adapter_stats_reset == NULL)
1283 continue;
1284 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1285 dev_info->dev);
1286 }
1287
1288 memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1289 return 0;
1290 }
1291
1292 int
rte_event_crypto_adapter_service_id_get(uint8_t id,uint32_t * service_id)1293 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1294 {
1295 struct event_crypto_adapter *adapter;
1296
1297 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1298
1299 adapter = eca_id_to_adapter(id);
1300 if (adapter == NULL || service_id == NULL)
1301 return -EINVAL;
1302
1303 if (adapter->service_inited)
1304 *service_id = adapter->service_id;
1305
1306 return adapter->service_inited ? 0 : -ESRCH;
1307 }
1308
1309 int
rte_event_crypto_adapter_event_port_get(uint8_t id,uint8_t * event_port_id)1310 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1311 {
1312 struct event_crypto_adapter *adapter;
1313
1314 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1315
1316 adapter = eca_id_to_adapter(id);
1317 if (adapter == NULL || event_port_id == NULL)
1318 return -EINVAL;
1319
1320 *event_port_id = adapter->event_port_id;
1321
1322 return 0;
1323 }
1324