1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <stdarg.h>
7 #include <stdio.h>
8 #include <inttypes.h>
9
10 #include <rte_string_fns.h>
11 #include <rte_malloc.h>
12 #include <rte_eal.h>
13 #include <rte_memzone.h>
14
15 #include "rte_compressdev.h"
16 #include "rte_compressdev_internal.h"
17 #include "rte_compressdev_pmd.h"
18
19 #define RTE_COMPRESSDEV_DETACHED (0)
20 #define RTE_COMPRESSDEV_ATTACHED (1)
21
22 static struct rte_compressdev rte_comp_devices[RTE_COMPRESS_MAX_DEVS];
23
24 static struct rte_compressdev_global compressdev_globals = {
25 .devs = rte_comp_devices,
26 .data = { NULL },
27 .nb_devs = 0,
28 .max_devs = RTE_COMPRESS_MAX_DEVS
29 };
30
31 const struct rte_compressdev_capabilities *
rte_compressdev_capability_get(uint8_t dev_id,enum rte_comp_algorithm algo)32 rte_compressdev_capability_get(uint8_t dev_id,
33 enum rte_comp_algorithm algo)
34 {
35 const struct rte_compressdev_capabilities *capability;
36 struct rte_compressdev_info dev_info;
37 int i = 0;
38
39 if (dev_id >= compressdev_globals.nb_devs) {
40 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
41 return NULL;
42 }
43 rte_compressdev_info_get(dev_id, &dev_info);
44
45 while ((capability = &dev_info.capabilities[i++])->algo !=
46 RTE_COMP_ALGO_UNSPECIFIED){
47 if (capability->algo == algo)
48 return capability;
49 }
50
51 return NULL;
52 }
53
54 const char *
rte_compressdev_get_feature_name(uint64_t flag)55 rte_compressdev_get_feature_name(uint64_t flag)
56 {
57 switch (flag) {
58 case RTE_COMPDEV_FF_HW_ACCELERATED:
59 return "HW_ACCELERATED";
60 case RTE_COMPDEV_FF_CPU_SSE:
61 return "CPU_SSE";
62 case RTE_COMPDEV_FF_CPU_AVX:
63 return "CPU_AVX";
64 case RTE_COMPDEV_FF_CPU_AVX2:
65 return "CPU_AVX2";
66 case RTE_COMPDEV_FF_CPU_AVX512:
67 return "CPU_AVX512";
68 case RTE_COMPDEV_FF_CPU_NEON:
69 return "CPU_NEON";
70 case RTE_COMPDEV_FF_OP_DONE_IN_DEQUEUE:
71 return "OP_DONE_IN_DEQ";
72 default:
73 return NULL;
74 }
75 }
76
77 static struct rte_compressdev *
rte_compressdev_get_dev(uint8_t dev_id)78 rte_compressdev_get_dev(uint8_t dev_id)
79 {
80 return &compressdev_globals.devs[dev_id];
81 }
82
83 struct rte_compressdev *
rte_compressdev_pmd_get_named_dev(const char * name)84 rte_compressdev_pmd_get_named_dev(const char *name)
85 {
86 struct rte_compressdev *dev;
87 unsigned int i;
88
89 if (name == NULL)
90 return NULL;
91
92 for (i = 0; i < compressdev_globals.max_devs; i++) {
93 dev = &compressdev_globals.devs[i];
94
95 if ((dev->attached == RTE_COMPRESSDEV_ATTACHED) &&
96 (strcmp(dev->data->name, name) == 0))
97 return dev;
98 }
99
100 return NULL;
101 }
102
103 static unsigned int
rte_compressdev_is_valid_dev(uint8_t dev_id)104 rte_compressdev_is_valid_dev(uint8_t dev_id)
105 {
106 struct rte_compressdev *dev = NULL;
107
108 if (dev_id >= compressdev_globals.nb_devs)
109 return 0;
110
111 dev = rte_compressdev_get_dev(dev_id);
112 if (dev->attached != RTE_COMPRESSDEV_ATTACHED)
113 return 0;
114 else
115 return 1;
116 }
117
118
119 int
rte_compressdev_get_dev_id(const char * name)120 rte_compressdev_get_dev_id(const char *name)
121 {
122 unsigned int i;
123
124 if (name == NULL)
125 return -1;
126
127 for (i = 0; i < compressdev_globals.nb_devs; i++)
128 if ((strcmp(compressdev_globals.devs[i].data->name, name)
129 == 0) &&
130 (compressdev_globals.devs[i].attached ==
131 RTE_COMPRESSDEV_ATTACHED))
132 return i;
133
134 return -1;
135 }
136
137 uint8_t
rte_compressdev_count(void)138 rte_compressdev_count(void)
139 {
140 return compressdev_globals.nb_devs;
141 }
142
143 uint8_t
rte_compressdev_devices_get(const char * driver_name,uint8_t * devices,uint8_t nb_devices)144 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
145 uint8_t nb_devices)
146 {
147 uint8_t i, count = 0;
148 struct rte_compressdev *devs = compressdev_globals.devs;
149 uint8_t max_devs = compressdev_globals.max_devs;
150
151 for (i = 0; i < max_devs && count < nb_devices; i++) {
152
153 if (devs[i].attached == RTE_COMPRESSDEV_ATTACHED) {
154 int cmp;
155
156 cmp = strncmp(devs[i].device->driver->name,
157 driver_name,
158 strlen(driver_name));
159
160 if (cmp == 0)
161 devices[count++] = devs[i].data->dev_id;
162 }
163 }
164
165 return count;
166 }
167
168 int
rte_compressdev_socket_id(uint8_t dev_id)169 rte_compressdev_socket_id(uint8_t dev_id)
170 {
171 struct rte_compressdev *dev;
172
173 if (!rte_compressdev_is_valid_dev(dev_id))
174 return -1;
175
176 dev = rte_compressdev_get_dev(dev_id);
177
178 return dev->data->socket_id;
179 }
180
181 static inline int
rte_compressdev_data_alloc(uint8_t dev_id,struct rte_compressdev_data ** data,int socket_id)182 rte_compressdev_data_alloc(uint8_t dev_id, struct rte_compressdev_data **data,
183 int socket_id)
184 {
185 char mz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
186 const struct rte_memzone *mz;
187 int n;
188
189 /* generate memzone name */
190 n = snprintf(mz_name, sizeof(mz_name),
191 "rte_compressdev_data_%u", dev_id);
192 if (n >= (int)sizeof(mz_name))
193 return -EINVAL;
194
195 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
196 mz = rte_memzone_reserve(mz_name,
197 sizeof(struct rte_compressdev_data),
198 socket_id, 0);
199 } else
200 mz = rte_memzone_lookup(mz_name);
201
202 if (mz == NULL)
203 return -ENOMEM;
204
205 *data = mz->addr;
206 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
207 memset(*data, 0, sizeof(struct rte_compressdev_data));
208
209 return 0;
210 }
211
212 static uint8_t
rte_compressdev_find_free_device_index(void)213 rte_compressdev_find_free_device_index(void)
214 {
215 uint8_t dev_id;
216
217 for (dev_id = 0; dev_id < RTE_COMPRESS_MAX_DEVS; dev_id++) {
218 if (rte_comp_devices[dev_id].attached ==
219 RTE_COMPRESSDEV_DETACHED)
220 return dev_id;
221 }
222 return RTE_COMPRESS_MAX_DEVS;
223 }
224
225 struct rte_compressdev *
rte_compressdev_pmd_allocate(const char * name,int socket_id)226 rte_compressdev_pmd_allocate(const char *name, int socket_id)
227 {
228 struct rte_compressdev *compressdev;
229 uint8_t dev_id;
230
231 if (rte_compressdev_pmd_get_named_dev(name) != NULL) {
232 COMPRESSDEV_LOG(ERR,
233 "comp device with name %s already allocated!", name);
234 return NULL;
235 }
236
237 dev_id = rte_compressdev_find_free_device_index();
238 if (dev_id == RTE_COMPRESS_MAX_DEVS) {
239 COMPRESSDEV_LOG(ERR, "Reached maximum number of comp devices");
240 return NULL;
241 }
242 compressdev = rte_compressdev_get_dev(dev_id);
243
244 if (compressdev->data == NULL) {
245 struct rte_compressdev_data *compressdev_data =
246 compressdev_globals.data[dev_id];
247
248 int retval = rte_compressdev_data_alloc(dev_id,
249 &compressdev_data, socket_id);
250
251 if (retval < 0 || compressdev_data == NULL)
252 return NULL;
253
254 compressdev->data = compressdev_data;
255
256 strlcpy(compressdev->data->name, name,
257 RTE_COMPRESSDEV_NAME_MAX_LEN);
258
259 compressdev->data->dev_id = dev_id;
260 compressdev->data->socket_id = socket_id;
261 compressdev->data->dev_started = 0;
262
263 compressdev->attached = RTE_COMPRESSDEV_ATTACHED;
264
265 compressdev_globals.nb_devs++;
266 }
267
268 return compressdev;
269 }
270
271 int
rte_compressdev_pmd_release_device(struct rte_compressdev * compressdev)272 rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev)
273 {
274 int ret;
275
276 if (compressdev == NULL)
277 return -EINVAL;
278
279 /* Close device only if device operations have been set */
280 if (compressdev->dev_ops) {
281 ret = rte_compressdev_close(compressdev->data->dev_id);
282 if (ret < 0)
283 return ret;
284 }
285
286 compressdev->attached = RTE_COMPRESSDEV_DETACHED;
287 compressdev_globals.nb_devs--;
288 return 0;
289 }
290
291 uint16_t
rte_compressdev_queue_pair_count(uint8_t dev_id)292 rte_compressdev_queue_pair_count(uint8_t dev_id)
293 {
294 struct rte_compressdev *dev;
295
296 dev = &rte_comp_devices[dev_id];
297 return dev->data->nb_queue_pairs;
298 }
299
300 static int
rte_compressdev_queue_pairs_config(struct rte_compressdev * dev,uint16_t nb_qpairs,int socket_id)301 rte_compressdev_queue_pairs_config(struct rte_compressdev *dev,
302 uint16_t nb_qpairs, int socket_id)
303 {
304 struct rte_compressdev_info dev_info;
305 void **qp;
306 unsigned int i;
307
308 if ((dev == NULL) || (nb_qpairs < 1)) {
309 COMPRESSDEV_LOG(ERR, "invalid param: dev %p, nb_queues %u",
310 dev, nb_qpairs);
311 return -EINVAL;
312 }
313
314 COMPRESSDEV_LOG(DEBUG, "Setup %d queues pairs on device %u",
315 nb_qpairs, dev->data->dev_id);
316
317 memset(&dev_info, 0, sizeof(struct rte_compressdev_info));
318
319 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
320 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
321
322 if ((dev_info.max_nb_queue_pairs != 0) &&
323 (nb_qpairs > dev_info.max_nb_queue_pairs)) {
324 COMPRESSDEV_LOG(ERR, "Invalid num queue_pairs (%u) for dev %u",
325 nb_qpairs, dev->data->dev_id);
326 return -EINVAL;
327 }
328
329 if (dev->data->queue_pairs == NULL) { /* first time configuration */
330 dev->data->queue_pairs = rte_zmalloc_socket(
331 "compressdev->queue_pairs",
332 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
333 RTE_CACHE_LINE_SIZE, socket_id);
334
335 if (dev->data->queue_pairs == NULL) {
336 dev->data->nb_queue_pairs = 0;
337 COMPRESSDEV_LOG(ERR,
338 "failed to get memory for qp meta data, nb_queues %u",
339 nb_qpairs);
340 return -(ENOMEM);
341 }
342 } else { /* re-configure */
343 int ret;
344 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
345
346 qp = dev->data->queue_pairs;
347
348 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
349 -ENOTSUP);
350
351 for (i = nb_qpairs; i < old_nb_queues; i++) {
352 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
353 if (ret < 0)
354 return ret;
355 }
356
357 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
358 RTE_CACHE_LINE_SIZE);
359 if (qp == NULL) {
360 COMPRESSDEV_LOG(ERR,
361 "failed to realloc qp meta data, nb_queues %u",
362 nb_qpairs);
363 return -(ENOMEM);
364 }
365
366 if (nb_qpairs > old_nb_queues) {
367 uint16_t new_qs = nb_qpairs - old_nb_queues;
368
369 memset(qp + old_nb_queues, 0,
370 sizeof(qp[0]) * new_qs);
371 }
372
373 dev->data->queue_pairs = qp;
374
375 }
376 dev->data->nb_queue_pairs = nb_qpairs;
377 return 0;
378 }
379
380 static int
rte_compressdev_queue_pairs_release(struct rte_compressdev * dev)381 rte_compressdev_queue_pairs_release(struct rte_compressdev *dev)
382 {
383 uint16_t num_qps, i;
384 int ret;
385
386 if (dev == NULL) {
387 COMPRESSDEV_LOG(ERR, "invalid param: dev %p", dev);
388 return -EINVAL;
389 }
390
391 num_qps = dev->data->nb_queue_pairs;
392
393 if (num_qps == 0)
394 return 0;
395
396 COMPRESSDEV_LOG(DEBUG, "Free %d queues pairs on device %u",
397 dev->data->nb_queue_pairs, dev->data->dev_id);
398
399 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
400 -ENOTSUP);
401
402 for (i = 0; i < num_qps; i++) {
403 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
404 if (ret < 0)
405 return ret;
406 }
407
408 if (dev->data->queue_pairs != NULL)
409 rte_free(dev->data->queue_pairs);
410 dev->data->queue_pairs = NULL;
411 dev->data->nb_queue_pairs = 0;
412
413 return 0;
414 }
415
416 int
rte_compressdev_configure(uint8_t dev_id,struct rte_compressdev_config * config)417 rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
418 {
419 struct rte_compressdev *dev;
420 int diag;
421
422 if (!rte_compressdev_is_valid_dev(dev_id)) {
423 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
424 return -EINVAL;
425 }
426
427 dev = &rte_comp_devices[dev_id];
428
429 if (dev->data->dev_started) {
430 COMPRESSDEV_LOG(ERR,
431 "device %d must be stopped to allow configuration", dev_id);
432 return -EBUSY;
433 }
434
435 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
436
437 /* Setup new number of queue pairs and reconfigure device. */
438 diag = rte_compressdev_queue_pairs_config(dev, config->nb_queue_pairs,
439 config->socket_id);
440 if (diag != 0) {
441 COMPRESSDEV_LOG(ERR,
442 "dev%d rte_comp_dev_queue_pairs_config = %d",
443 dev_id, diag);
444 return diag;
445 }
446
447 return (*dev->dev_ops->dev_configure)(dev, config);
448 }
449
450 int
rte_compressdev_start(uint8_t dev_id)451 rte_compressdev_start(uint8_t dev_id)
452 {
453 struct rte_compressdev *dev;
454 int diag;
455
456 COMPRESSDEV_LOG(DEBUG, "Start dev_id=%" PRIu8, dev_id);
457
458 if (!rte_compressdev_is_valid_dev(dev_id)) {
459 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
460 return -EINVAL;
461 }
462
463 dev = &rte_comp_devices[dev_id];
464
465 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
466
467 if (dev->data->dev_started != 0) {
468 COMPRESSDEV_LOG(ERR,
469 "Device with dev_id=%" PRIu8 " already started", dev_id);
470 return 0;
471 }
472
473 diag = (*dev->dev_ops->dev_start)(dev);
474 if (diag == 0)
475 dev->data->dev_started = 1;
476 else
477 return diag;
478
479 return 0;
480 }
481
482 void
rte_compressdev_stop(uint8_t dev_id)483 rte_compressdev_stop(uint8_t dev_id)
484 {
485 struct rte_compressdev *dev;
486
487 if (!rte_compressdev_is_valid_dev(dev_id)) {
488 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
489 return;
490 }
491
492 dev = &rte_comp_devices[dev_id];
493
494 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
495
496 if (dev->data->dev_started == 0) {
497 COMPRESSDEV_LOG(ERR,
498 "Device with dev_id=%" PRIu8 " already stopped", dev_id);
499 return;
500 }
501
502 (*dev->dev_ops->dev_stop)(dev);
503 dev->data->dev_started = 0;
504 }
505
506 int
rte_compressdev_close(uint8_t dev_id)507 rte_compressdev_close(uint8_t dev_id)
508 {
509 struct rte_compressdev *dev;
510 int retval;
511
512 if (!rte_compressdev_is_valid_dev(dev_id)) {
513 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
514 return -1;
515 }
516
517 dev = &rte_comp_devices[dev_id];
518
519 /* Device must be stopped before it can be closed */
520 if (dev->data->dev_started == 1) {
521 COMPRESSDEV_LOG(ERR, "Device %u must be stopped before closing",
522 dev_id);
523 return -EBUSY;
524 }
525
526 /* Free queue pairs memory */
527 retval = rte_compressdev_queue_pairs_release(dev);
528
529 if (retval < 0)
530 return retval;
531
532 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
533 retval = (*dev->dev_ops->dev_close)(dev);
534
535 if (retval < 0)
536 return retval;
537
538 return 0;
539 }
540
541 int
rte_compressdev_queue_pair_setup(uint8_t dev_id,uint16_t queue_pair_id,uint32_t max_inflight_ops,int socket_id)542 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
543 uint32_t max_inflight_ops, int socket_id)
544 {
545 struct rte_compressdev *dev;
546
547 if (!rte_compressdev_is_valid_dev(dev_id)) {
548 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
549 return -EINVAL;
550 }
551
552 dev = &rte_comp_devices[dev_id];
553 if (queue_pair_id >= dev->data->nb_queue_pairs) {
554 COMPRESSDEV_LOG(ERR, "Invalid queue_pair_id=%d", queue_pair_id);
555 return -EINVAL;
556 }
557
558 if (dev->data->dev_started) {
559 COMPRESSDEV_LOG(ERR,
560 "device %d must be stopped to allow configuration", dev_id);
561 return -EBUSY;
562 }
563
564 if (max_inflight_ops == 0) {
565 COMPRESSDEV_LOG(ERR,
566 "Invalid maximum number of inflight operations");
567 return -EINVAL;
568 }
569
570 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
571
572 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id,
573 max_inflight_ops, socket_id);
574 }
575
576 uint16_t
rte_compressdev_dequeue_burst(uint8_t dev_id,uint16_t qp_id,struct rte_comp_op ** ops,uint16_t nb_ops)577 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
578 struct rte_comp_op **ops, uint16_t nb_ops)
579 {
580 struct rte_compressdev *dev = &rte_comp_devices[dev_id];
581
582 nb_ops = (*dev->dequeue_burst)
583 (dev->data->queue_pairs[qp_id], ops, nb_ops);
584
585 return nb_ops;
586 }
587
588 uint16_t
rte_compressdev_enqueue_burst(uint8_t dev_id,uint16_t qp_id,struct rte_comp_op ** ops,uint16_t nb_ops)589 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
590 struct rte_comp_op **ops, uint16_t nb_ops)
591 {
592 struct rte_compressdev *dev = &rte_comp_devices[dev_id];
593
594 return (*dev->enqueue_burst)(
595 dev->data->queue_pairs[qp_id], ops, nb_ops);
596 }
597
598 int
rte_compressdev_stats_get(uint8_t dev_id,struct rte_compressdev_stats * stats)599 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
600 {
601 struct rte_compressdev *dev;
602
603 if (!rte_compressdev_is_valid_dev(dev_id)) {
604 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
605 return -ENODEV;
606 }
607
608 if (stats == NULL) {
609 COMPRESSDEV_LOG(ERR, "Invalid stats ptr");
610 return -EINVAL;
611 }
612
613 dev = &rte_comp_devices[dev_id];
614 memset(stats, 0, sizeof(*stats));
615
616 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
617 (*dev->dev_ops->stats_get)(dev, stats);
618 return 0;
619 }
620
621 void
rte_compressdev_stats_reset(uint8_t dev_id)622 rte_compressdev_stats_reset(uint8_t dev_id)
623 {
624 struct rte_compressdev *dev;
625
626 if (!rte_compressdev_is_valid_dev(dev_id)) {
627 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
628 return;
629 }
630
631 dev = &rte_comp_devices[dev_id];
632
633 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
634 (*dev->dev_ops->stats_reset)(dev);
635 }
636
637
638 void
rte_compressdev_info_get(uint8_t dev_id,struct rte_compressdev_info * dev_info)639 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
640 {
641 struct rte_compressdev *dev;
642
643 if (dev_id >= compressdev_globals.nb_devs) {
644 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
645 return;
646 }
647
648 dev = &rte_comp_devices[dev_id];
649
650 memset(dev_info, 0, sizeof(struct rte_compressdev_info));
651
652 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
653 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
654
655 dev_info->driver_name = dev->device->driver->name;
656 }
657
658 int
rte_compressdev_private_xform_create(uint8_t dev_id,const struct rte_comp_xform * xform,void ** priv_xform)659 rte_compressdev_private_xform_create(uint8_t dev_id,
660 const struct rte_comp_xform *xform,
661 void **priv_xform)
662 {
663 struct rte_compressdev *dev;
664 int ret;
665
666 dev = rte_compressdev_get_dev(dev_id);
667
668 if (xform == NULL || priv_xform == NULL || dev == NULL)
669 return -EINVAL;
670
671 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_create, -ENOTSUP);
672 ret = (*dev->dev_ops->private_xform_create)(dev, xform, priv_xform);
673 if (ret < 0) {
674 COMPRESSDEV_LOG(ERR,
675 "dev_id %d failed to create private_xform: err=%d",
676 dev_id, ret);
677 return ret;
678 };
679
680 return 0;
681 }
682
683 int
rte_compressdev_private_xform_free(uint8_t dev_id,void * priv_xform)684 rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform)
685 {
686 struct rte_compressdev *dev;
687 int ret;
688
689 dev = rte_compressdev_get_dev(dev_id);
690
691 if (dev == NULL || priv_xform == NULL)
692 return -EINVAL;
693
694 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_free, -ENOTSUP);
695 ret = dev->dev_ops->private_xform_free(dev, priv_xform);
696 if (ret < 0) {
697 COMPRESSDEV_LOG(ERR,
698 "dev_id %d failed to free private xform: err=%d",
699 dev_id, ret);
700 return ret;
701 };
702
703 return 0;
704 }
705
706 int
rte_compressdev_stream_create(uint8_t dev_id,const struct rte_comp_xform * xform,void ** stream)707 rte_compressdev_stream_create(uint8_t dev_id,
708 const struct rte_comp_xform *xform,
709 void **stream)
710 {
711 struct rte_compressdev *dev;
712 int ret;
713
714 dev = rte_compressdev_get_dev(dev_id);
715
716 if (xform == NULL || dev == NULL || stream == NULL)
717 return -EINVAL;
718
719 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_create, -ENOTSUP);
720 ret = (*dev->dev_ops->stream_create)(dev, xform, stream);
721 if (ret < 0) {
722 COMPRESSDEV_LOG(ERR,
723 "dev_id %d failed to create stream: err=%d",
724 dev_id, ret);
725 return ret;
726 };
727
728 return 0;
729 }
730
731
732 int
rte_compressdev_stream_free(uint8_t dev_id,void * stream)733 rte_compressdev_stream_free(uint8_t dev_id, void *stream)
734 {
735 struct rte_compressdev *dev;
736 int ret;
737
738 dev = rte_compressdev_get_dev(dev_id);
739
740 if (dev == NULL || stream == NULL)
741 return -EINVAL;
742
743 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_free, -ENOTSUP);
744 ret = dev->dev_ops->stream_free(dev, stream);
745 if (ret < 0) {
746 COMPRESSDEV_LOG(ERR,
747 "dev_id %d failed to free stream: err=%d",
748 dev_id, ret);
749 return ret;
750 };
751
752 return 0;
753 }
754
755 const char *
rte_compressdev_name_get(uint8_t dev_id)756 rte_compressdev_name_get(uint8_t dev_id)
757 {
758 struct rte_compressdev *dev = rte_compressdev_get_dev(dev_id);
759
760 if (dev == NULL)
761 return NULL;
762
763 return dev->data->name;
764 }
765
766 RTE_LOG_REGISTER(compressdev_logtype, lib.compressdev, NOTICE);
767