1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 * Copyright 2020 Mellanox Technologies, Ltd
4 */
5
6 #include <string.h>
7
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
10
11 #include "rte_regexdev.h"
12 #include "rte_regexdev_core.h"
13 #include "rte_regexdev_driver.h"
14
15 static const char *MZ_RTE_REGEXDEV_DATA = "rte_regexdev_data";
16 struct rte_regexdev rte_regex_devices[RTE_MAX_REGEXDEV_DEVS];
17 /* Shared memory between primary and secondary processes. */
18 static struct {
19 struct rte_regexdev_data data[RTE_MAX_REGEXDEV_DEVS];
20 } *rte_regexdev_shared_data;
21
22 int rte_regexdev_logtype;
23
24 static uint16_t
regexdev_find_free_dev(void)25 regexdev_find_free_dev(void)
26 {
27 uint16_t i;
28
29 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
30 if (rte_regex_devices[i].state == RTE_REGEXDEV_UNUSED)
31 return i;
32 }
33 return RTE_MAX_REGEXDEV_DEVS;
34 }
35
36 static struct rte_regexdev*
regexdev_allocated(const char * name)37 regexdev_allocated(const char *name)
38 {
39 uint16_t i;
40
41 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
42 if (rte_regex_devices[i].state != RTE_REGEXDEV_UNUSED)
43 if (!strcmp(name, rte_regex_devices[i].data->dev_name))
44 return &rte_regex_devices[i];
45 }
46 return NULL;
47 }
48
49 static int
regexdev_shared_data_prepare(void)50 regexdev_shared_data_prepare(void)
51 {
52 const unsigned int flags = 0;
53 const struct rte_memzone *mz;
54
55 if (rte_regexdev_shared_data == NULL) {
56 /* Allocate port data and ownership shared memory. */
57 mz = rte_memzone_reserve(MZ_RTE_REGEXDEV_DATA,
58 sizeof(*rte_regexdev_shared_data),
59 rte_socket_id(), flags);
60 if (mz == NULL)
61 return -ENOMEM;
62
63 rte_regexdev_shared_data = mz->addr;
64 memset(rte_regexdev_shared_data->data, 0,
65 sizeof(rte_regexdev_shared_data->data));
66 }
67 return 0;
68 }
69
70 static int
regexdev_check_name(const char * name)71 regexdev_check_name(const char *name)
72 {
73 size_t name_len;
74
75 if (name == NULL) {
76 RTE_REGEXDEV_LOG(ERR, "Name can't be NULL\n");
77 return -EINVAL;
78 }
79 name_len = strnlen(name, RTE_REGEXDEV_NAME_MAX_LEN);
80 if (name_len == 0) {
81 RTE_REGEXDEV_LOG(ERR, "Zero length RegEx device name\n");
82 return -EINVAL;
83 }
84 if (name_len >= RTE_REGEXDEV_NAME_MAX_LEN) {
85 RTE_REGEXDEV_LOG(ERR, "RegEx device name is too long\n");
86 return -EINVAL;
87 }
88 return (int)name_len;
89
90 }
91
92 struct rte_regexdev *
rte_regexdev_register(const char * name)93 rte_regexdev_register(const char *name)
94 {
95 uint16_t dev_id;
96 int name_len;
97 struct rte_regexdev *dev;
98
99 name_len = regexdev_check_name(name);
100 if (name_len < 0)
101 return NULL;
102 dev = regexdev_allocated(name);
103 if (dev != NULL) {
104 RTE_REGEXDEV_LOG(ERR, "RegEx device already allocated\n");
105 return NULL;
106 }
107 dev_id = regexdev_find_free_dev();
108 if (dev_id == RTE_MAX_REGEXDEV_DEVS) {
109 RTE_REGEXDEV_LOG
110 (ERR, "Reached maximum number of RegEx devices\n");
111 return NULL;
112 }
113 if (regexdev_shared_data_prepare() < 0) {
114 RTE_REGEXDEV_LOG(ERR, "Cannot allocate RegEx shared data\n");
115 return NULL;
116 }
117
118 dev = &rte_regex_devices[dev_id];
119 dev->state = RTE_REGEXDEV_REGISTERED;
120 if (dev->data == NULL)
121 dev->data = &rte_regexdev_shared_data->data[dev_id];
122 else
123 memset(dev->data, 1, sizeof(*dev->data));
124 dev->data->dev_id = dev_id;
125 strlcpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
126 return dev;
127 }
128
129 void
rte_regexdev_unregister(struct rte_regexdev * dev)130 rte_regexdev_unregister(struct rte_regexdev *dev)
131 {
132 dev->state = RTE_REGEXDEV_UNUSED;
133 }
134
135 struct rte_regexdev *
rte_regexdev_get_device_by_name(const char * name)136 rte_regexdev_get_device_by_name(const char *name)
137 {
138 if (regexdev_check_name(name) < 0)
139 return NULL;
140 return regexdev_allocated(name);
141 }
142
143 uint8_t
rte_regexdev_count(void)144 rte_regexdev_count(void)
145 {
146 int i;
147 int count = 0;
148
149 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
150 if (rte_regex_devices[i].state != RTE_REGEXDEV_UNUSED)
151 count++;
152 }
153 return count;
154 }
155
156 int
rte_regexdev_get_dev_id(const char * name)157 rte_regexdev_get_dev_id(const char *name)
158 {
159 int i;
160 int id = -EINVAL;
161
162 if (name == NULL)
163 return -EINVAL;
164 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
165 if (rte_regex_devices[i].state != RTE_REGEXDEV_UNUSED)
166 if (strcmp(name, rte_regex_devices[i].data->dev_name)) {
167 id = rte_regex_devices[i].data->dev_id;
168 break;
169 }
170 }
171 return id;
172 }
173
174 int
rte_regexdev_is_valid_dev(uint16_t dev_id)175 rte_regexdev_is_valid_dev(uint16_t dev_id)
176 {
177 if (dev_id >= RTE_MAX_REGEXDEV_DEVS ||
178 rte_regex_devices[dev_id].state != RTE_REGEXDEV_READY)
179 return 0;
180 return 1;
181 }
182
183 static int
regexdev_info_get(uint8_t dev_id,struct rte_regexdev_info * dev_info)184 regexdev_info_get(uint8_t dev_id, struct rte_regexdev_info *dev_info)
185 {
186 struct rte_regexdev *dev;
187
188 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
189 if (dev_info == NULL)
190 return -EINVAL;
191 dev = &rte_regex_devices[dev_id];
192 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
193 return (*dev->dev_ops->dev_info_get)(dev, dev_info);
194
195 }
196
197 int
rte_regexdev_info_get(uint8_t dev_id,struct rte_regexdev_info * dev_info)198 rte_regexdev_info_get(uint8_t dev_id, struct rte_regexdev_info *dev_info)
199 {
200 return regexdev_info_get(dev_id, dev_info);
201 }
202
203 int
rte_regexdev_configure(uint8_t dev_id,const struct rte_regexdev_config * cfg)204 rte_regexdev_configure(uint8_t dev_id, const struct rte_regexdev_config *cfg)
205 {
206 struct rte_regexdev *dev;
207 struct rte_regexdev_info dev_info;
208 int ret;
209
210 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
211 if (cfg == NULL)
212 return -EINVAL;
213 dev = &rte_regex_devices[dev_id];
214 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
215 if (dev->data->dev_started) {
216 RTE_REGEXDEV_LOG
217 (ERR, "Dev %u must be stopped to allow configuration\n",
218 dev_id);
219 return -EBUSY;
220 }
221 ret = regexdev_info_get(dev_id, &dev_info);
222 if (ret < 0)
223 return ret;
224 if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_CROSS_BUFFER_SCAN_F) &&
225 !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_CROSS_BUFFER_F)) {
226 RTE_REGEXDEV_LOG(ERR,
227 "Dev %u doesn't support cross buffer scan\n",
228 dev_id);
229 return -EINVAL;
230 }
231 if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_MATCH_AS_END_F) &&
232 !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_MATCH_AS_END_F)) {
233 RTE_REGEXDEV_LOG(ERR,
234 "Dev %u doesn't support match as end\n",
235 dev_id);
236 return -EINVAL;
237 }
238 if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_MATCH_ALL_F) &&
239 !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_MATCH_ALL_F)) {
240 RTE_REGEXDEV_LOG(ERR,
241 "Dev %u doesn't support match all\n",
242 dev_id);
243 return -EINVAL;
244 }
245 if (cfg->nb_groups == 0) {
246 RTE_REGEXDEV_LOG(ERR, "Dev %u num of groups must be > 0\n",
247 dev_id);
248 return -EINVAL;
249 }
250 if (cfg->nb_groups > dev_info.max_groups) {
251 RTE_REGEXDEV_LOG(ERR, "Dev %u num of groups %d > %d\n",
252 dev_id, cfg->nb_groups, dev_info.max_groups);
253 return -EINVAL;
254 }
255 if (cfg->nb_max_matches == 0) {
256 RTE_REGEXDEV_LOG(ERR, "Dev %u num of matches must be > 0\n",
257 dev_id);
258 return -EINVAL;
259 }
260 if (cfg->nb_max_matches > dev_info.max_matches) {
261 RTE_REGEXDEV_LOG(ERR, "Dev %u num of matches %d > %d\n",
262 dev_id, cfg->nb_max_matches,
263 dev_info.max_matches);
264 return -EINVAL;
265 }
266 if (cfg->nb_queue_pairs == 0) {
267 RTE_REGEXDEV_LOG(ERR, "Dev %u num of queues must be > 0\n",
268 dev_id);
269 return -EINVAL;
270 }
271 if (cfg->nb_queue_pairs > dev_info.max_queue_pairs) {
272 RTE_REGEXDEV_LOG(ERR, "Dev %u num of queues %d > %d\n",
273 dev_id, cfg->nb_queue_pairs,
274 dev_info.max_queue_pairs);
275 return -EINVAL;
276 }
277 if (cfg->nb_rules_per_group == 0) {
278 RTE_REGEXDEV_LOG(ERR,
279 "Dev %u num of rules per group must be > 0\n",
280 dev_id);
281 return -EINVAL;
282 }
283 if (cfg->nb_rules_per_group > dev_info.max_rules_per_group) {
284 RTE_REGEXDEV_LOG(ERR,
285 "Dev %u num of rules per group %d > %d\n",
286 dev_id, cfg->nb_rules_per_group,
287 dev_info.max_rules_per_group);
288 return -EINVAL;
289 }
290 ret = (*dev->dev_ops->dev_configure)(dev, cfg);
291 if (ret == 0)
292 dev->data->dev_conf = *cfg;
293 return ret;
294 }
295
296 int
rte_regexdev_queue_pair_setup(uint8_t dev_id,uint16_t queue_pair_id,const struct rte_regexdev_qp_conf * qp_conf)297 rte_regexdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
298 const struct rte_regexdev_qp_conf *qp_conf)
299 {
300 struct rte_regexdev *dev;
301
302 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
303 dev = &rte_regex_devices[dev_id];
304 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_qp_setup, -ENOTSUP);
305 if (dev->data->dev_started) {
306 RTE_REGEXDEV_LOG
307 (ERR, "Dev %u must be stopped to allow configuration\n",
308 dev_id);
309 return -EBUSY;
310 }
311 if (queue_pair_id >= dev->data->dev_conf.nb_queue_pairs) {
312 RTE_REGEXDEV_LOG(ERR,
313 "Dev %u invalid queue %d > %d\n",
314 dev_id, queue_pair_id,
315 dev->data->dev_conf.nb_queue_pairs);
316 return -EINVAL;
317 }
318 if (dev->data->dev_started) {
319 RTE_REGEXDEV_LOG
320 (ERR, "Dev %u must be stopped to allow configuration\n",
321 dev_id);
322 return -EBUSY;
323 }
324 return (*dev->dev_ops->dev_qp_setup)(dev, queue_pair_id, qp_conf);
325 }
326
327 int
rte_regexdev_start(uint8_t dev_id)328 rte_regexdev_start(uint8_t dev_id)
329 {
330 struct rte_regexdev *dev;
331 int ret;
332
333 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
334 dev = &rte_regex_devices[dev_id];
335 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
336 ret = (*dev->dev_ops->dev_start)(dev);
337 if (ret == 0)
338 dev->data->dev_started = 1;
339 return ret;
340 }
341
342 int
rte_regexdev_stop(uint8_t dev_id)343 rte_regexdev_stop(uint8_t dev_id)
344 {
345 struct rte_regexdev *dev;
346
347 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
348 dev = &rte_regex_devices[dev_id];
349 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
350 (*dev->dev_ops->dev_stop)(dev);
351 dev->data->dev_started = 0;
352 return 0;
353 }
354
355 int
rte_regexdev_close(uint8_t dev_id)356 rte_regexdev_close(uint8_t dev_id)
357 {
358 struct rte_regexdev *dev;
359
360 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
361 dev = &rte_regex_devices[dev_id];
362 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
363 (*dev->dev_ops->dev_close)(dev);
364 dev->data->dev_started = 0;
365 dev->state = RTE_REGEXDEV_UNUSED;
366 return 0;
367 }
368
369 int
rte_regexdev_attr_get(uint8_t dev_id,enum rte_regexdev_attr_id attr_id,void * attr_value)370 rte_regexdev_attr_get(uint8_t dev_id, enum rte_regexdev_attr_id attr_id,
371 void *attr_value)
372 {
373 struct rte_regexdev *dev;
374
375 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
376 dev = &rte_regex_devices[dev_id];
377 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_attr_get, -ENOTSUP);
378 if (attr_value == NULL) {
379 RTE_REGEXDEV_LOG(ERR, "Dev %d attribute value can't be NULL\n",
380 dev_id);
381 return -EINVAL;
382 }
383 return (*dev->dev_ops->dev_attr_get)(dev, attr_id, attr_value);
384 }
385
386 int
rte_regexdev_attr_set(uint8_t dev_id,enum rte_regexdev_attr_id attr_id,const void * attr_value)387 rte_regexdev_attr_set(uint8_t dev_id, enum rte_regexdev_attr_id attr_id,
388 const void *attr_value)
389 {
390 struct rte_regexdev *dev;
391
392 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
393 dev = &rte_regex_devices[dev_id];
394 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_attr_set, -ENOTSUP);
395 if (attr_value == NULL) {
396 RTE_REGEXDEV_LOG(ERR, "Dev %d attribute value can't be NULL\n",
397 dev_id);
398 return -EINVAL;
399 }
400 return (*dev->dev_ops->dev_attr_set)(dev, attr_id, attr_value);
401 }
402
403 int
rte_regexdev_rule_db_update(uint8_t dev_id,const struct rte_regexdev_rule * rules,uint32_t nb_rules)404 rte_regexdev_rule_db_update(uint8_t dev_id,
405 const struct rte_regexdev_rule *rules,
406 uint32_t nb_rules)
407 {
408 struct rte_regexdev *dev;
409
410 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
411 dev = &rte_regex_devices[dev_id];
412 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_rule_db_update, -ENOTSUP);
413 if (rules == NULL) {
414 RTE_REGEXDEV_LOG(ERR, "Dev %d rules can't be NULL\n",
415 dev_id);
416 return -EINVAL;
417 }
418 return (*dev->dev_ops->dev_rule_db_update)(dev, rules, nb_rules);
419 }
420
421 int
rte_regexdev_rule_db_compile_activate(uint8_t dev_id)422 rte_regexdev_rule_db_compile_activate(uint8_t dev_id)
423 {
424 struct rte_regexdev *dev;
425
426 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
427 dev = &rte_regex_devices[dev_id];
428 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_rule_db_compile_activate,
429 -ENOTSUP);
430 return (*dev->dev_ops->dev_rule_db_compile_activate)(dev);
431 }
432
433 int
rte_regexdev_rule_db_import(uint8_t dev_id,const char * rule_db,uint32_t rule_db_len)434 rte_regexdev_rule_db_import(uint8_t dev_id, const char *rule_db,
435 uint32_t rule_db_len)
436 {
437 struct rte_regexdev *dev;
438
439 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
440 dev = &rte_regex_devices[dev_id];
441 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_db_import,
442 -ENOTSUP);
443 if (rule_db == NULL) {
444 RTE_REGEXDEV_LOG(ERR, "Dev %d rules can't be NULL\n",
445 dev_id);
446 return -EINVAL;
447 }
448 return (*dev->dev_ops->dev_db_import)(dev, rule_db, rule_db_len);
449 }
450
451 int
rte_regexdev_rule_db_export(uint8_t dev_id,char * rule_db)452 rte_regexdev_rule_db_export(uint8_t dev_id, char *rule_db)
453 {
454 struct rte_regexdev *dev;
455
456 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
457 dev = &rte_regex_devices[dev_id];
458 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_db_export,
459 -ENOTSUP);
460 return (*dev->dev_ops->dev_db_export)(dev, rule_db);
461 }
462
463 int
rte_regexdev_xstats_names_get(uint8_t dev_id,struct rte_regexdev_xstats_map * xstats_map)464 rte_regexdev_xstats_names_get(uint8_t dev_id,
465 struct rte_regexdev_xstats_map *xstats_map)
466 {
467 struct rte_regexdev *dev;
468
469 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
470 dev = &rte_regex_devices[dev_id];
471 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_names_get,
472 -ENOTSUP);
473 if (xstats_map == NULL) {
474 RTE_REGEXDEV_LOG(ERR, "Dev %d xstats map can't be NULL\n",
475 dev_id);
476 return -EINVAL;
477 }
478 return (*dev->dev_ops->dev_xstats_names_get)(dev, xstats_map);
479 }
480
481 int
rte_regexdev_xstats_get(uint8_t dev_id,const uint16_t * ids,uint64_t * values,uint16_t n)482 rte_regexdev_xstats_get(uint8_t dev_id, const uint16_t *ids,
483 uint64_t *values, uint16_t n)
484 {
485 struct rte_regexdev *dev;
486
487 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
488 dev = &rte_regex_devices[dev_id];
489 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_get, -ENOTSUP);
490 if (ids == NULL) {
491 RTE_REGEXDEV_LOG(ERR, "Dev %d ids can't be NULL\n", dev_id);
492 return -EINVAL;
493 }
494 if (values == NULL) {
495 RTE_REGEXDEV_LOG(ERR, "Dev %d values can't be NULL\n", dev_id);
496 return -EINVAL;
497 }
498 return (*dev->dev_ops->dev_xstats_get)(dev, ids, values, n);
499 }
500
501 int
rte_regexdev_xstats_by_name_get(uint8_t dev_id,const char * name,uint16_t * id,uint64_t * value)502 rte_regexdev_xstats_by_name_get(uint8_t dev_id, const char *name,
503 uint16_t *id, uint64_t *value)
504 {
505 struct rte_regexdev *dev;
506
507 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
508 dev = &rte_regex_devices[dev_id];
509 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_by_name_get,
510 -ENOTSUP);
511 if (name == NULL) {
512 RTE_REGEXDEV_LOG(ERR, "Dev %d name can't be NULL\n", dev_id);
513 return -EINVAL;
514 }
515 if (id == NULL) {
516 RTE_REGEXDEV_LOG(ERR, "Dev %d id can't be NULL\n", dev_id);
517 return -EINVAL;
518 }
519 if (value == NULL) {
520 RTE_REGEXDEV_LOG(ERR, "Dev %d value can't be NULL\n", dev_id);
521 return -EINVAL;
522 }
523 return (*dev->dev_ops->dev_xstats_by_name_get)(dev, name, id, value);
524 }
525
526 int
rte_regexdev_xstats_reset(uint8_t dev_id,const uint16_t * ids,uint16_t nb_ids)527 rte_regexdev_xstats_reset(uint8_t dev_id, const uint16_t *ids,
528 uint16_t nb_ids)
529 {
530 struct rte_regexdev *dev;
531
532 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
533 dev = &rte_regex_devices[dev_id];
534 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_reset, -ENOTSUP);
535 if (ids == NULL) {
536 RTE_REGEXDEV_LOG(ERR, "Dev %d ids can't be NULL\n", dev_id);
537 return -EINVAL;
538 }
539 return (*dev->dev_ops->dev_xstats_reset)(dev, ids, nb_ids);
540 }
541
542 int
rte_regexdev_selftest(uint8_t dev_id)543 rte_regexdev_selftest(uint8_t dev_id)
544 {
545 struct rte_regexdev *dev;
546
547 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
548 dev = &rte_regex_devices[dev_id];
549 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
550 return (*dev->dev_ops->dev_selftest)(dev);
551 }
552
553 int
rte_regexdev_dump(uint8_t dev_id,FILE * f)554 rte_regexdev_dump(uint8_t dev_id, FILE *f)
555 {
556 struct rte_regexdev *dev;
557
558 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
559 dev = &rte_regex_devices[dev_id];
560 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_dump, -ENOTSUP);
561 if (f == NULL) {
562 RTE_REGEXDEV_LOG(ERR, "Dev %d file can't be NULL\n", dev_id);
563 return -EINVAL;
564 }
565 return (*dev->dev_ops->dev_dump)(dev, f);
566 }
567