1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 * Copyright 2020 Mellanox Technologies, Ltd
4 */
5
6 #include <string.h>
7
8 #include <rte_memory.h>
9 #include <rte_memcpy.h>
10 #include <rte_memzone.h>
11 #include <rte_string_fns.h>
12
13 #include "rte_regexdev.h"
14 #include "rte_regexdev_core.h"
15 #include "rte_regexdev_driver.h"
16
17 static const char *MZ_RTE_REGEXDEV_DATA = "rte_regexdev_data";
18 struct rte_regexdev rte_regex_devices[RTE_MAX_REGEXDEV_DEVS];
19 /* Shared memory between primary and secondary processes. */
20 static struct {
21 struct rte_regexdev_data data[RTE_MAX_REGEXDEV_DEVS];
22 } *rte_regexdev_shared_data;
23
24 int rte_regexdev_logtype;
25
26 static uint16_t
regexdev_find_free_dev(void)27 regexdev_find_free_dev(void)
28 {
29 uint16_t i;
30
31 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
32 if (rte_regex_devices[i].state == RTE_REGEXDEV_UNUSED)
33 return i;
34 }
35 return RTE_MAX_REGEXDEV_DEVS;
36 }
37
38 static struct rte_regexdev*
regexdev_allocated(const char * name)39 regexdev_allocated(const char *name)
40 {
41 uint16_t i;
42
43 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
44 if (rte_regex_devices[i].state != RTE_REGEXDEV_UNUSED)
45 if (!strcmp(name, rte_regex_devices[i].data->dev_name))
46 return &rte_regex_devices[i];
47 }
48 return NULL;
49 }
50
51 static int
regexdev_shared_data_prepare(void)52 regexdev_shared_data_prepare(void)
53 {
54 const unsigned int flags = 0;
55 const struct rte_memzone *mz;
56
57 if (rte_regexdev_shared_data == NULL) {
58 /* Allocate port data and ownership shared memory. */
59 mz = rte_memzone_reserve(MZ_RTE_REGEXDEV_DATA,
60 sizeof(*rte_regexdev_shared_data),
61 rte_socket_id(), flags);
62 if (mz == NULL)
63 return -ENOMEM;
64
65 rte_regexdev_shared_data = mz->addr;
66 memset(rte_regexdev_shared_data->data, 0,
67 sizeof(rte_regexdev_shared_data->data));
68 }
69 return 0;
70 }
71
72 static int
regexdev_check_name(const char * name)73 regexdev_check_name(const char *name)
74 {
75 size_t name_len;
76
77 if (name == NULL) {
78 RTE_REGEXDEV_LOG(ERR, "Name can't be NULL\n");
79 return -EINVAL;
80 }
81 name_len = strnlen(name, RTE_REGEXDEV_NAME_MAX_LEN);
82 if (name_len == 0) {
83 RTE_REGEXDEV_LOG(ERR, "Zero length RegEx device name\n");
84 return -EINVAL;
85 }
86 if (name_len >= RTE_REGEXDEV_NAME_MAX_LEN) {
87 RTE_REGEXDEV_LOG(ERR, "RegEx device name is too long\n");
88 return -EINVAL;
89 }
90 return (int)name_len;
91
92 }
93
94 struct rte_regexdev *
rte_regexdev_register(const char * name)95 rte_regexdev_register(const char *name)
96 {
97 uint16_t dev_id;
98 int name_len;
99 struct rte_regexdev *dev;
100
101 name_len = regexdev_check_name(name);
102 if (name_len < 0)
103 return NULL;
104 dev = regexdev_allocated(name);
105 if (dev != NULL) {
106 RTE_REGEXDEV_LOG(ERR, "RegEx device already allocated\n");
107 return NULL;
108 }
109 dev_id = regexdev_find_free_dev();
110 if (dev_id == RTE_MAX_REGEXDEV_DEVS) {
111 RTE_REGEXDEV_LOG
112 (ERR, "Reached maximum number of RegEx devices\n");
113 return NULL;
114 }
115 if (regexdev_shared_data_prepare() < 0) {
116 RTE_REGEXDEV_LOG(ERR, "Cannot allocate RegEx shared data\n");
117 return NULL;
118 }
119
120 dev = &rte_regex_devices[dev_id];
121 dev->state = RTE_REGEXDEV_REGISTERED;
122 if (dev->data == NULL)
123 dev->data = &rte_regexdev_shared_data->data[dev_id];
124 else
125 memset(dev->data, 1, sizeof(*dev->data));
126 dev->data->dev_id = dev_id;
127 strlcpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
128 return dev;
129 }
130
131 void
rte_regexdev_unregister(struct rte_regexdev * dev)132 rte_regexdev_unregister(struct rte_regexdev *dev)
133 {
134 dev->state = RTE_REGEXDEV_UNUSED;
135 }
136
137 struct rte_regexdev *
rte_regexdev_get_device_by_name(const char * name)138 rte_regexdev_get_device_by_name(const char *name)
139 {
140 if (regexdev_check_name(name) < 0)
141 return NULL;
142 return regexdev_allocated(name);
143 }
144
145 uint8_t
rte_regexdev_count(void)146 rte_regexdev_count(void)
147 {
148 int i;
149 int count = 0;
150
151 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
152 if (rte_regex_devices[i].state != RTE_REGEXDEV_UNUSED)
153 count++;
154 }
155 return count;
156 }
157
158 int
rte_regexdev_get_dev_id(const char * name)159 rte_regexdev_get_dev_id(const char *name)
160 {
161 int i;
162 int id = -EINVAL;
163
164 if (name == NULL)
165 return -EINVAL;
166 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
167 if (rte_regex_devices[i].state != RTE_REGEXDEV_UNUSED)
168 if (strcmp(name, rte_regex_devices[i].data->dev_name)) {
169 id = rte_regex_devices[i].data->dev_id;
170 break;
171 }
172 }
173 return id;
174 }
175
176 int
rte_regexdev_is_valid_dev(uint16_t dev_id)177 rte_regexdev_is_valid_dev(uint16_t dev_id)
178 {
179 if (dev_id >= RTE_MAX_REGEXDEV_DEVS ||
180 rte_regex_devices[dev_id].state != RTE_REGEXDEV_READY)
181 return 0;
182 return 1;
183 }
184
185 static int
regexdev_info_get(uint8_t dev_id,struct rte_regexdev_info * dev_info)186 regexdev_info_get(uint8_t dev_id, struct rte_regexdev_info *dev_info)
187 {
188 struct rte_regexdev *dev;
189
190 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
191 if (dev_info == NULL)
192 return -EINVAL;
193 dev = &rte_regex_devices[dev_id];
194 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
195 return (*dev->dev_ops->dev_info_get)(dev, dev_info);
196
197 }
198
199 int
rte_regexdev_info_get(uint8_t dev_id,struct rte_regexdev_info * dev_info)200 rte_regexdev_info_get(uint8_t dev_id, struct rte_regexdev_info *dev_info)
201 {
202 return regexdev_info_get(dev_id, dev_info);
203 }
204
205 int
rte_regexdev_configure(uint8_t dev_id,const struct rte_regexdev_config * cfg)206 rte_regexdev_configure(uint8_t dev_id, const struct rte_regexdev_config *cfg)
207 {
208 struct rte_regexdev *dev;
209 struct rte_regexdev_info dev_info;
210 int ret;
211
212 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
213 if (cfg == NULL)
214 return -EINVAL;
215 dev = &rte_regex_devices[dev_id];
216 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
217 if (dev->data->dev_started) {
218 RTE_REGEXDEV_LOG
219 (ERR, "Dev %u must be stopped to allow configuration\n",
220 dev_id);
221 return -EBUSY;
222 }
223 ret = regexdev_info_get(dev_id, &dev_info);
224 if (ret < 0)
225 return ret;
226 if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_CROSS_BUFFER_SCAN_F) &&
227 !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_CROSS_BUFFER_F)) {
228 RTE_REGEXDEV_LOG(ERR,
229 "Dev %u doesn't support cross buffer scan\n",
230 dev_id);
231 return -EINVAL;
232 }
233 if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_MATCH_AS_END_F) &&
234 !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_MATCH_AS_END_F)) {
235 RTE_REGEXDEV_LOG(ERR,
236 "Dev %u doesn't support match as end\n",
237 dev_id);
238 return -EINVAL;
239 }
240 if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_MATCH_ALL_F) &&
241 !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_MATCH_ALL_F)) {
242 RTE_REGEXDEV_LOG(ERR,
243 "Dev %u doesn't support match all\n",
244 dev_id);
245 return -EINVAL;
246 }
247 if (cfg->nb_groups == 0) {
248 RTE_REGEXDEV_LOG(ERR, "Dev %u num of groups must be > 0\n",
249 dev_id);
250 return -EINVAL;
251 }
252 if (cfg->nb_groups > dev_info.max_groups) {
253 RTE_REGEXDEV_LOG(ERR, "Dev %u num of groups %d > %d\n",
254 dev_id, cfg->nb_groups, dev_info.max_groups);
255 return -EINVAL;
256 }
257 if (cfg->nb_max_matches == 0) {
258 RTE_REGEXDEV_LOG(ERR, "Dev %u num of matches must be > 0\n",
259 dev_id);
260 return -EINVAL;
261 }
262 if (cfg->nb_max_matches > dev_info.max_matches) {
263 RTE_REGEXDEV_LOG(ERR, "Dev %u num of matches %d > %d\n",
264 dev_id, cfg->nb_max_matches,
265 dev_info.max_matches);
266 return -EINVAL;
267 }
268 if (cfg->nb_queue_pairs == 0) {
269 RTE_REGEXDEV_LOG(ERR, "Dev %u num of queues must be > 0\n",
270 dev_id);
271 return -EINVAL;
272 }
273 if (cfg->nb_queue_pairs > dev_info.max_queue_pairs) {
274 RTE_REGEXDEV_LOG(ERR, "Dev %u num of queues %d > %d\n",
275 dev_id, cfg->nb_queue_pairs,
276 dev_info.max_queue_pairs);
277 return -EINVAL;
278 }
279 if (cfg->nb_rules_per_group == 0) {
280 RTE_REGEXDEV_LOG(ERR,
281 "Dev %u num of rules per group must be > 0\n",
282 dev_id);
283 return -EINVAL;
284 }
285 if (cfg->nb_rules_per_group > dev_info.max_rules_per_group) {
286 RTE_REGEXDEV_LOG(ERR,
287 "Dev %u num of rules per group %d > %d\n",
288 dev_id, cfg->nb_rules_per_group,
289 dev_info.max_rules_per_group);
290 return -EINVAL;
291 }
292 ret = (*dev->dev_ops->dev_configure)(dev, cfg);
293 if (ret == 0)
294 dev->data->dev_conf = *cfg;
295 return ret;
296 }
297
298 int
rte_regexdev_queue_pair_setup(uint8_t dev_id,uint16_t queue_pair_id,const struct rte_regexdev_qp_conf * qp_conf)299 rte_regexdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
300 const struct rte_regexdev_qp_conf *qp_conf)
301 {
302 struct rte_regexdev *dev;
303
304 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
305 dev = &rte_regex_devices[dev_id];
306 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_qp_setup, -ENOTSUP);
307 if (dev->data->dev_started) {
308 RTE_REGEXDEV_LOG
309 (ERR, "Dev %u must be stopped to allow configuration\n",
310 dev_id);
311 return -EBUSY;
312 }
313 if (queue_pair_id >= dev->data->dev_conf.nb_queue_pairs) {
314 RTE_REGEXDEV_LOG(ERR,
315 "Dev %u invalid queue %d > %d\n",
316 dev_id, queue_pair_id,
317 dev->data->dev_conf.nb_queue_pairs);
318 return -EINVAL;
319 }
320 if (dev->data->dev_started) {
321 RTE_REGEXDEV_LOG
322 (ERR, "Dev %u must be stopped to allow configuration\n",
323 dev_id);
324 return -EBUSY;
325 }
326 return (*dev->dev_ops->dev_qp_setup)(dev, queue_pair_id, qp_conf);
327 }
328
329 int
rte_regexdev_start(uint8_t dev_id)330 rte_regexdev_start(uint8_t dev_id)
331 {
332 struct rte_regexdev *dev;
333 int ret;
334
335 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
336 dev = &rte_regex_devices[dev_id];
337 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
338 ret = (*dev->dev_ops->dev_start)(dev);
339 if (ret == 0)
340 dev->data->dev_started = 1;
341 return ret;
342 }
343
344 int
rte_regexdev_stop(uint8_t dev_id)345 rte_regexdev_stop(uint8_t dev_id)
346 {
347 struct rte_regexdev *dev;
348
349 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
350 dev = &rte_regex_devices[dev_id];
351 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
352 (*dev->dev_ops->dev_stop)(dev);
353 dev->data->dev_started = 0;
354 return 0;
355 }
356
357 int
rte_regexdev_close(uint8_t dev_id)358 rte_regexdev_close(uint8_t dev_id)
359 {
360 struct rte_regexdev *dev;
361
362 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
363 dev = &rte_regex_devices[dev_id];
364 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
365 (*dev->dev_ops->dev_close)(dev);
366 dev->data->dev_started = 0;
367 dev->state = RTE_REGEXDEV_UNUSED;
368 return 0;
369 }
370
371 int
rte_regexdev_attr_get(uint8_t dev_id,enum rte_regexdev_attr_id attr_id,void * attr_value)372 rte_regexdev_attr_get(uint8_t dev_id, enum rte_regexdev_attr_id attr_id,
373 void *attr_value)
374 {
375 struct rte_regexdev *dev;
376
377 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
378 dev = &rte_regex_devices[dev_id];
379 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_attr_get, -ENOTSUP);
380 if (attr_value == NULL) {
381 RTE_REGEXDEV_LOG(ERR, "Dev %d attribute value can't be NULL\n",
382 dev_id);
383 return -EINVAL;
384 }
385 return (*dev->dev_ops->dev_attr_get)(dev, attr_id, attr_value);
386 }
387
388 int
rte_regexdev_attr_set(uint8_t dev_id,enum rte_regexdev_attr_id attr_id,const void * attr_value)389 rte_regexdev_attr_set(uint8_t dev_id, enum rte_regexdev_attr_id attr_id,
390 const void *attr_value)
391 {
392 struct rte_regexdev *dev;
393
394 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
395 dev = &rte_regex_devices[dev_id];
396 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_attr_set, -ENOTSUP);
397 if (attr_value == NULL) {
398 RTE_REGEXDEV_LOG(ERR, "Dev %d attribute value can't be NULL\n",
399 dev_id);
400 return -EINVAL;
401 }
402 return (*dev->dev_ops->dev_attr_set)(dev, attr_id, attr_value);
403 }
404
405 int
rte_regexdev_rule_db_update(uint8_t dev_id,const struct rte_regexdev_rule * rules,uint32_t nb_rules)406 rte_regexdev_rule_db_update(uint8_t dev_id,
407 const struct rte_regexdev_rule *rules,
408 uint32_t nb_rules)
409 {
410 struct rte_regexdev *dev;
411
412 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
413 dev = &rte_regex_devices[dev_id];
414 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_rule_db_update, -ENOTSUP);
415 if (rules == NULL) {
416 RTE_REGEXDEV_LOG(ERR, "Dev %d rules can't be NULL\n",
417 dev_id);
418 return -EINVAL;
419 }
420 return (*dev->dev_ops->dev_rule_db_update)(dev, rules, nb_rules);
421 }
422
423 int
rte_regexdev_rule_db_compile_activate(uint8_t dev_id)424 rte_regexdev_rule_db_compile_activate(uint8_t dev_id)
425 {
426 struct rte_regexdev *dev;
427
428 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
429 dev = &rte_regex_devices[dev_id];
430 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_rule_db_compile_activate,
431 -ENOTSUP);
432 return (*dev->dev_ops->dev_rule_db_compile_activate)(dev);
433 }
434
435 int
rte_regexdev_rule_db_import(uint8_t dev_id,const char * rule_db,uint32_t rule_db_len)436 rte_regexdev_rule_db_import(uint8_t dev_id, const char *rule_db,
437 uint32_t rule_db_len)
438 {
439 struct rte_regexdev *dev;
440
441 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
442 dev = &rte_regex_devices[dev_id];
443 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_db_import,
444 -ENOTSUP);
445 if (rule_db == NULL) {
446 RTE_REGEXDEV_LOG(ERR, "Dev %d rules can't be NULL\n",
447 dev_id);
448 return -EINVAL;
449 }
450 return (*dev->dev_ops->dev_db_import)(dev, rule_db, rule_db_len);
451 }
452
453 int
rte_regexdev_rule_db_export(uint8_t dev_id,char * rule_db)454 rte_regexdev_rule_db_export(uint8_t dev_id, char *rule_db)
455 {
456 struct rte_regexdev *dev;
457
458 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
459 dev = &rte_regex_devices[dev_id];
460 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_db_export,
461 -ENOTSUP);
462 return (*dev->dev_ops->dev_db_export)(dev, rule_db);
463 }
464
465 int
rte_regexdev_xstats_names_get(uint8_t dev_id,struct rte_regexdev_xstats_map * xstats_map)466 rte_regexdev_xstats_names_get(uint8_t dev_id,
467 struct rte_regexdev_xstats_map *xstats_map)
468 {
469 struct rte_regexdev *dev;
470
471 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
472 dev = &rte_regex_devices[dev_id];
473 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_names_get,
474 -ENOTSUP);
475 if (xstats_map == NULL) {
476 RTE_REGEXDEV_LOG(ERR, "Dev %d xstats map can't be NULL\n",
477 dev_id);
478 return -EINVAL;
479 }
480 return (*dev->dev_ops->dev_xstats_names_get)(dev, xstats_map);
481 }
482
483 int
rte_regexdev_xstats_get(uint8_t dev_id,const uint16_t * ids,uint64_t * values,uint16_t n)484 rte_regexdev_xstats_get(uint8_t dev_id, const uint16_t *ids,
485 uint64_t *values, uint16_t n)
486 {
487 struct rte_regexdev *dev;
488
489 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
490 dev = &rte_regex_devices[dev_id];
491 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_get, -ENOTSUP);
492 if (ids == NULL) {
493 RTE_REGEXDEV_LOG(ERR, "Dev %d ids can't be NULL\n", dev_id);
494 return -EINVAL;
495 }
496 if (values == NULL) {
497 RTE_REGEXDEV_LOG(ERR, "Dev %d values can't be NULL\n", dev_id);
498 return -EINVAL;
499 }
500 return (*dev->dev_ops->dev_xstats_get)(dev, ids, values, n);
501 }
502
503 int
rte_regexdev_xstats_by_name_get(uint8_t dev_id,const char * name,uint16_t * id,uint64_t * value)504 rte_regexdev_xstats_by_name_get(uint8_t dev_id, const char *name,
505 uint16_t *id, uint64_t *value)
506 {
507 struct rte_regexdev *dev;
508
509 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
510 dev = &rte_regex_devices[dev_id];
511 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_by_name_get,
512 -ENOTSUP);
513 if (name == NULL) {
514 RTE_REGEXDEV_LOG(ERR, "Dev %d name can't be NULL\n", dev_id);
515 return -EINVAL;
516 }
517 if (id == NULL) {
518 RTE_REGEXDEV_LOG(ERR, "Dev %d id can't be NULL\n", dev_id);
519 return -EINVAL;
520 }
521 if (value == NULL) {
522 RTE_REGEXDEV_LOG(ERR, "Dev %d value can't be NULL\n", dev_id);
523 return -EINVAL;
524 }
525 return (*dev->dev_ops->dev_xstats_by_name_get)(dev, name, id, value);
526 }
527
528 int
rte_regexdev_xstats_reset(uint8_t dev_id,const uint16_t * ids,uint16_t nb_ids)529 rte_regexdev_xstats_reset(uint8_t dev_id, const uint16_t *ids,
530 uint16_t nb_ids)
531 {
532 struct rte_regexdev *dev;
533
534 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
535 dev = &rte_regex_devices[dev_id];
536 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_reset, -ENOTSUP);
537 if (ids == NULL) {
538 RTE_REGEXDEV_LOG(ERR, "Dev %d ids can't be NULL\n", dev_id);
539 return -EINVAL;
540 }
541 return (*dev->dev_ops->dev_xstats_reset)(dev, ids, nb_ids);
542 }
543
544 int
rte_regexdev_selftest(uint8_t dev_id)545 rte_regexdev_selftest(uint8_t dev_id)
546 {
547 struct rte_regexdev *dev;
548
549 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
550 dev = &rte_regex_devices[dev_id];
551 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
552 return (*dev->dev_ops->dev_selftest)(dev);
553 }
554
555 int
rte_regexdev_dump(uint8_t dev_id,FILE * f)556 rte_regexdev_dump(uint8_t dev_id, FILE *f)
557 {
558 struct rte_regexdev *dev;
559
560 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
561 dev = &rte_regex_devices[dev_id];
562 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_dump, -ENOTSUP);
563 if (f == NULL) {
564 RTE_REGEXDEV_LOG(ERR, "Dev %d file can't be NULL\n", dev_id);
565 return -EINVAL;
566 }
567 return (*dev->dev_ops->dev_dump)(dev, f);
568 }
569