1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <rte_string_fns.h>
6 #include <rte_compat.h>
7 #include <rte_flow_classify.h>
8 #include "rte_flow_classify_parse.h"
9 #include <rte_flow_driver.h>
10 #include <rte_table_acl.h>
11 #include <stdbool.h>
12
13 static uint32_t unique_id = 1;
14
15 enum rte_flow_classify_table_type table_type
16 = RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE;
17
18 struct rte_flow_classify_table_entry {
19 /* meta-data for classify rule */
20 uint32_t rule_id;
21
22 /* Flow action */
23 struct classify_action action;
24 };
25
26 struct rte_cls_table {
27 /* Input parameters */
28 struct rte_table_ops ops;
29 uint32_t entry_size;
30 enum rte_flow_classify_table_type type;
31
32 /* Handle to the low-level table object */
33 void *h_table;
34 };
35
36 #define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
37
38 struct rte_flow_classifier {
39 /* Input parameters */
40 char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
41 int socket_id;
42
43 /* Internal */
44 /* ntuple_filter */
45 struct rte_eth_ntuple_filter ntuple_filter;
46
47 /* classifier tables */
48 struct rte_cls_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
49 uint32_t table_mask;
50 uint32_t num_tables;
51
52 uint16_t nb_pkts;
53 struct rte_flow_classify_table_entry
54 *entries[RTE_PORT_IN_BURST_SIZE_MAX];
55 } __rte_cache_aligned;
56
57 enum {
58 PROTO_FIELD_IPV4,
59 SRC_FIELD_IPV4,
60 DST_FIELD_IPV4,
61 SRCP_FIELD_IPV4,
62 DSTP_FIELD_IPV4,
63 NUM_FIELDS_IPV4
64 };
65
66 struct acl_keys {
67 struct rte_table_acl_rule_add_params key_add; /* add key */
68 struct rte_table_acl_rule_delete_params key_del; /* delete key */
69 };
70
71 struct classify_rules {
72 enum rte_flow_classify_rule_type type;
73 union {
74 struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
75 } u;
76 };
77
78 struct rte_flow_classify_rule {
79 uint32_t id; /* unique ID of classify rule */
80 enum rte_flow_classify_table_type tbl_type; /* rule table */
81 struct classify_rules rules; /* union of rules */
82 union {
83 struct acl_keys key;
84 } u;
85 int key_found; /* rule key found in table */
86 struct rte_flow_classify_table_entry entry; /* rule meta data */
87 void *entry_ptr; /* handle to the table entry for rule meta data */
88 };
89
90 int
rte_flow_classify_validate(struct rte_flow_classifier * cls,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)91 rte_flow_classify_validate(
92 struct rte_flow_classifier *cls,
93 const struct rte_flow_attr *attr,
94 const struct rte_flow_item pattern[],
95 const struct rte_flow_action actions[],
96 struct rte_flow_error *error)
97 {
98 struct rte_flow_item *items;
99 parse_filter_t parse_filter;
100 uint32_t item_num = 0;
101 uint32_t i = 0;
102 int ret;
103
104 if (error == NULL)
105 return -EINVAL;
106
107 if (cls == NULL) {
108 RTE_FLOW_CLASSIFY_LOG(ERR,
109 "%s: rte_flow_classifier parameter is NULL\n",
110 __func__);
111 return -EINVAL;
112 }
113
114 if (!attr) {
115 rte_flow_error_set(error, EINVAL,
116 RTE_FLOW_ERROR_TYPE_ATTR,
117 NULL, "NULL attribute.");
118 return -EINVAL;
119 }
120
121 if (!pattern) {
122 rte_flow_error_set(error,
123 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
124 NULL, "NULL pattern.");
125 return -EINVAL;
126 }
127
128 if (!actions) {
129 rte_flow_error_set(error, EINVAL,
130 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
131 NULL, "NULL action.");
132 return -EINVAL;
133 }
134
135 memset(&cls->ntuple_filter, 0, sizeof(cls->ntuple_filter));
136
137 /* Get the non-void item number of pattern */
138 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
139 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
140 item_num++;
141 i++;
142 }
143 item_num++;
144
145 items = malloc(item_num * sizeof(struct rte_flow_item));
146 if (!items) {
147 rte_flow_error_set(error, ENOMEM,
148 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
149 NULL, "No memory for pattern items.");
150 return -ENOMEM;
151 }
152
153 memset(items, 0, item_num * sizeof(struct rte_flow_item));
154 classify_pattern_skip_void_item(items, pattern);
155
156 parse_filter = classify_find_parse_filter_func(items);
157 if (!parse_filter) {
158 rte_flow_error_set(error, EINVAL,
159 RTE_FLOW_ERROR_TYPE_ITEM,
160 pattern, "Unsupported pattern");
161 free(items);
162 return -EINVAL;
163 }
164
165 ret = parse_filter(attr, items, actions, &cls->ntuple_filter, error);
166 free(items);
167 return ret;
168 }
169
170
171 #define uint32_t_to_char(ip, a, b, c, d) do {\
172 *a = (unsigned char)(ip >> 24 & 0xff);\
173 *b = (unsigned char)(ip >> 16 & 0xff);\
174 *c = (unsigned char)(ip >> 8 & 0xff);\
175 *d = (unsigned char)(ip & 0xff);\
176 } while (0)
177
178 static inline void
print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params * key)179 print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
180 {
181 unsigned char a, b, c, d;
182
183 printf("%s: 0x%02hhx/0x%hhx ", __func__,
184 key->field_value[PROTO_FIELD_IPV4].value.u8,
185 key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
186
187 uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
188 &a, &b, &c, &d);
189 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
190 key->field_value[SRC_FIELD_IPV4].mask_range.u32);
191
192 uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
193 &a, &b, &c, &d);
194 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
195 key->field_value[DST_FIELD_IPV4].mask_range.u32);
196
197 printf("%hu : 0x%x %hu : 0x%x",
198 key->field_value[SRCP_FIELD_IPV4].value.u16,
199 key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
200 key->field_value[DSTP_FIELD_IPV4].value.u16,
201 key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
202
203 printf(" priority: 0x%x\n", key->priority);
204 }
205
206 static inline void
print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params * key)207 print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
208 {
209 unsigned char a, b, c, d;
210
211 printf("%s: 0x%02hhx/0x%hhx ", __func__,
212 key->field_value[PROTO_FIELD_IPV4].value.u8,
213 key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
214
215 uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
216 &a, &b, &c, &d);
217 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
218 key->field_value[SRC_FIELD_IPV4].mask_range.u32);
219
220 uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
221 &a, &b, &c, &d);
222 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
223 key->field_value[DST_FIELD_IPV4].mask_range.u32);
224
225 printf("%hu : 0x%x %hu : 0x%x\n",
226 key->field_value[SRCP_FIELD_IPV4].value.u16,
227 key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
228 key->field_value[DSTP_FIELD_IPV4].value.u16,
229 key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
230 }
231
232 static int
rte_flow_classifier_check_params(struct rte_flow_classifier_params * params)233 rte_flow_classifier_check_params(struct rte_flow_classifier_params *params)
234 {
235 if (params == NULL) {
236 RTE_FLOW_CLASSIFY_LOG(ERR,
237 "%s: Incorrect value for parameter params\n", __func__);
238 return -EINVAL;
239 }
240
241 /* name */
242 if (params->name == NULL) {
243 RTE_FLOW_CLASSIFY_LOG(ERR,
244 "%s: Incorrect value for parameter name\n", __func__);
245 return -EINVAL;
246 }
247
248 /* socket */
249 if (params->socket_id < 0) {
250 RTE_FLOW_CLASSIFY_LOG(ERR,
251 "%s: Incorrect value for parameter socket_id\n",
252 __func__);
253 return -EINVAL;
254 }
255
256 return 0;
257 }
258
259 struct rte_flow_classifier *
rte_flow_classifier_create(struct rte_flow_classifier_params * params)260 rte_flow_classifier_create(struct rte_flow_classifier_params *params)
261 {
262 struct rte_flow_classifier *cls;
263 int ret;
264
265 /* Check input parameters */
266 ret = rte_flow_classifier_check_params(params);
267 if (ret != 0) {
268 RTE_FLOW_CLASSIFY_LOG(ERR,
269 "%s: flow classifier params check failed (%d)\n",
270 __func__, ret);
271 return NULL;
272 }
273
274 /* Allocate memory for the flow classifier */
275 cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
276 sizeof(struct rte_flow_classifier),
277 RTE_CACHE_LINE_SIZE, params->socket_id);
278
279 if (cls == NULL) {
280 RTE_FLOW_CLASSIFY_LOG(ERR,
281 "%s: flow classifier memory allocation failed\n",
282 __func__);
283 return NULL;
284 }
285
286 /* Save input parameters */
287 strlcpy(cls->name, params->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ);
288
289 cls->socket_id = params->socket_id;
290
291 return cls;
292 }
293
294 static void
rte_flow_classify_table_free(struct rte_cls_table * table)295 rte_flow_classify_table_free(struct rte_cls_table *table)
296 {
297 if (table->ops.f_free != NULL)
298 table->ops.f_free(table->h_table);
299 }
300
301 int
rte_flow_classifier_free(struct rte_flow_classifier * cls)302 rte_flow_classifier_free(struct rte_flow_classifier *cls)
303 {
304 uint32_t i;
305
306 /* Check input parameters */
307 if (cls == NULL) {
308 RTE_FLOW_CLASSIFY_LOG(ERR,
309 "%s: rte_flow_classifier parameter is NULL\n",
310 __func__);
311 return -EINVAL;
312 }
313
314 /* Free tables */
315 for (i = 0; i < cls->num_tables; i++) {
316 struct rte_cls_table *table = &cls->tables[i];
317
318 rte_flow_classify_table_free(table);
319 }
320
321 /* Free flow classifier memory */
322 rte_free(cls);
323
324 return 0;
325 }
326
327 static int
rte_table_check_params(struct rte_flow_classifier * cls,struct rte_flow_classify_table_params * params)328 rte_table_check_params(struct rte_flow_classifier *cls,
329 struct rte_flow_classify_table_params *params)
330 {
331 if (cls == NULL) {
332 RTE_FLOW_CLASSIFY_LOG(ERR,
333 "%s: flow classifier parameter is NULL\n",
334 __func__);
335 return -EINVAL;
336 }
337 if (params == NULL) {
338 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params parameter is NULL\n",
339 __func__);
340 return -EINVAL;
341 }
342
343 /* ops */
344 if (params->ops == NULL) {
345 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params->ops is NULL\n",
346 __func__);
347 return -EINVAL;
348 }
349
350 if (params->ops->f_create == NULL) {
351 RTE_FLOW_CLASSIFY_LOG(ERR,
352 "%s: f_create function pointer is NULL\n", __func__);
353 return -EINVAL;
354 }
355
356 if (params->ops->f_lookup == NULL) {
357 RTE_FLOW_CLASSIFY_LOG(ERR,
358 "%s: f_lookup function pointer is NULL\n", __func__);
359 return -EINVAL;
360 }
361
362 /* De we have room for one more table? */
363 if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
364 RTE_FLOW_CLASSIFY_LOG(ERR,
365 "%s: Incorrect value for num_tables parameter\n",
366 __func__);
367 return -EINVAL;
368 }
369
370 return 0;
371 }
372
373 int
rte_flow_classify_table_create(struct rte_flow_classifier * cls,struct rte_flow_classify_table_params * params)374 rte_flow_classify_table_create(struct rte_flow_classifier *cls,
375 struct rte_flow_classify_table_params *params)
376 {
377 struct rte_cls_table *table;
378 void *h_table;
379 uint32_t entry_size;
380 int ret;
381
382 /* Check input arguments */
383 ret = rte_table_check_params(cls, params);
384 if (ret != 0)
385 return ret;
386
387 /* calculate table entry size */
388 entry_size = sizeof(struct rte_flow_classify_table_entry);
389
390 /* Create the table */
391 h_table = params->ops->f_create(params->arg_create, cls->socket_id,
392 entry_size);
393 if (h_table == NULL) {
394 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: Table creation failed\n",
395 __func__);
396 return -EINVAL;
397 }
398
399 /* Commit current table to the classifier */
400 table = &cls->tables[cls->num_tables];
401 table->type = params->type;
402 cls->num_tables++;
403
404 /* Save input parameters */
405 memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
406
407 /* Initialize table internal data structure */
408 table->entry_size = entry_size;
409 table->h_table = h_table;
410
411 return 0;
412 }
413
414 static struct rte_flow_classify_rule *
allocate_acl_ipv4_5tuple_rule(struct rte_flow_classifier * cls)415 allocate_acl_ipv4_5tuple_rule(struct rte_flow_classifier *cls)
416 {
417 struct rte_flow_classify_rule *rule;
418
419 rule = malloc(sizeof(struct rte_flow_classify_rule));
420 if (!rule)
421 return rule;
422
423 memset(rule, 0, sizeof(struct rte_flow_classify_rule));
424 rule->id = unique_id++;
425 rule->rules.type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
426
427 /* key add values */
428 rule->u.key.key_add.priority = cls->ntuple_filter.priority;
429 rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
430 cls->ntuple_filter.proto_mask;
431 rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
432 cls->ntuple_filter.proto;
433 rule->rules.u.ipv4_5tuple.proto = cls->ntuple_filter.proto;
434 rule->rules.u.ipv4_5tuple.proto_mask = cls->ntuple_filter.proto_mask;
435
436 rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
437 cls->ntuple_filter.src_ip_mask;
438 rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
439 cls->ntuple_filter.src_ip;
440 rule->rules.u.ipv4_5tuple.src_ip_mask = cls->ntuple_filter.src_ip_mask;
441 rule->rules.u.ipv4_5tuple.src_ip = cls->ntuple_filter.src_ip;
442
443 rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
444 cls->ntuple_filter.dst_ip_mask;
445 rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
446 cls->ntuple_filter.dst_ip;
447 rule->rules.u.ipv4_5tuple.dst_ip_mask = cls->ntuple_filter.dst_ip_mask;
448 rule->rules.u.ipv4_5tuple.dst_ip = cls->ntuple_filter.dst_ip;
449
450 rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
451 cls->ntuple_filter.src_port_mask;
452 rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
453 cls->ntuple_filter.src_port;
454 rule->rules.u.ipv4_5tuple.src_port_mask =
455 cls->ntuple_filter.src_port_mask;
456 rule->rules.u.ipv4_5tuple.src_port = cls->ntuple_filter.src_port;
457
458 rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
459 cls->ntuple_filter.dst_port_mask;
460 rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
461 cls->ntuple_filter.dst_port;
462 rule->rules.u.ipv4_5tuple.dst_port_mask =
463 cls->ntuple_filter.dst_port_mask;
464 rule->rules.u.ipv4_5tuple.dst_port = cls->ntuple_filter.dst_port;
465
466 if (rte_log_can_log(librte_flow_classify_logtype, RTE_LOG_DEBUG))
467 print_acl_ipv4_key_add(&rule->u.key.key_add);
468
469 /* key delete values */
470 memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
471 &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
472 NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
473
474 if (rte_log_can_log(librte_flow_classify_logtype, RTE_LOG_DEBUG))
475 print_acl_ipv4_key_delete(&rule->u.key.key_del);
476
477 return rule;
478 }
479
480 struct rte_flow_classify_rule *
rte_flow_classify_table_entry_add(struct rte_flow_classifier * cls,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],int * key_found,struct rte_flow_error * error)481 rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
482 const struct rte_flow_attr *attr,
483 const struct rte_flow_item pattern[],
484 const struct rte_flow_action actions[],
485 int *key_found,
486 struct rte_flow_error *error)
487 {
488 struct rte_flow_classify_rule *rule;
489 struct rte_flow_classify_table_entry *table_entry;
490 struct classify_action *action;
491 uint32_t i;
492 int ret;
493
494 if (!error)
495 return NULL;
496
497 if (key_found == NULL) {
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
500 NULL, "NULL key_found.");
501 return NULL;
502 }
503
504 /* parse attr, pattern and actions */
505 ret = rte_flow_classify_validate(cls, attr, pattern, actions, error);
506 if (ret < 0)
507 return NULL;
508
509 switch (table_type) {
510 case RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE:
511 rule = allocate_acl_ipv4_5tuple_rule(cls);
512 if (!rule)
513 return NULL;
514 rule->tbl_type = table_type;
515 cls->table_mask |= table_type;
516 break;
517 default:
518 return NULL;
519 }
520
521 action = classify_get_flow_action();
522 table_entry = &rule->entry;
523 table_entry->rule_id = rule->id;
524 table_entry->action.action_mask = action->action_mask;
525
526 /* Copy actions */
527 if (action->action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_COUNT)) {
528 memcpy(&table_entry->action.act.counter, &action->act.counter,
529 sizeof(table_entry->action.act.counter));
530 }
531 if (action->action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_MARK)) {
532 memcpy(&table_entry->action.act.mark, &action->act.mark,
533 sizeof(table_entry->action.act.mark));
534 }
535
536 for (i = 0; i < cls->num_tables; i++) {
537 struct rte_cls_table *table = &cls->tables[i];
538
539 if (table->type == table_type) {
540 if (table->ops.f_add != NULL) {
541 ret = table->ops.f_add(
542 table->h_table,
543 &rule->u.key.key_add,
544 &rule->entry,
545 &rule->key_found,
546 &rule->entry_ptr);
547 if (ret) {
548 free(rule);
549 return NULL;
550 }
551
552 *key_found = rule->key_found;
553 }
554
555 return rule;
556 }
557 }
558 free(rule);
559 return NULL;
560 }
561
562 int
rte_flow_classify_table_entry_delete(struct rte_flow_classifier * cls,struct rte_flow_classify_rule * rule)563 rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
564 struct rte_flow_classify_rule *rule)
565 {
566 uint32_t i;
567 int ret = -EINVAL;
568
569 if (!cls || !rule)
570 return ret;
571 enum rte_flow_classify_table_type tbl_type = rule->tbl_type;
572
573 for (i = 0; i < cls->num_tables; i++) {
574 struct rte_cls_table *table = &cls->tables[i];
575
576 if (table->type == tbl_type) {
577 if (table->ops.f_delete != NULL) {
578 ret = table->ops.f_delete(table->h_table,
579 &rule->u.key.key_del,
580 &rule->key_found,
581 &rule->entry);
582
583 return ret;
584 }
585 }
586 }
587 free(rule);
588 return ret;
589 }
590
591 static int
flow_classifier_lookup(struct rte_flow_classifier * cls,struct rte_cls_table * table,struct rte_mbuf ** pkts,const uint16_t nb_pkts)592 flow_classifier_lookup(struct rte_flow_classifier *cls,
593 struct rte_cls_table *table,
594 struct rte_mbuf **pkts,
595 const uint16_t nb_pkts)
596 {
597 int ret = -EINVAL;
598 uint64_t pkts_mask;
599 uint64_t lookup_hit_mask;
600
601 pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
602 ret = table->ops.f_lookup(table->h_table,
603 pkts, pkts_mask, &lookup_hit_mask,
604 (void **)cls->entries);
605
606 if (!ret && lookup_hit_mask)
607 cls->nb_pkts = nb_pkts;
608 else
609 cls->nb_pkts = 0;
610
611 return ret;
612 }
613
614 static int
action_apply(struct rte_flow_classifier * cls,struct rte_flow_classify_rule * rule,struct rte_flow_classify_stats * stats)615 action_apply(struct rte_flow_classifier *cls,
616 struct rte_flow_classify_rule *rule,
617 struct rte_flow_classify_stats *stats)
618 {
619 struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
620 struct rte_flow_classify_table_entry *entry = &rule->entry;
621 uint64_t count = 0;
622 uint32_t action_mask = entry->action.action_mask;
623 int i, ret = -EINVAL;
624
625 if (action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_COUNT)) {
626 for (i = 0; i < cls->nb_pkts; i++) {
627 if (rule->id == cls->entries[i]->rule_id)
628 count++;
629 }
630 if (count) {
631 ret = 0;
632 ntuple_stats = stats->stats;
633 ntuple_stats->counter1 = count;
634 ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
635 }
636 }
637 return ret;
638 }
639
640 int
rte_flow_classifier_query(struct rte_flow_classifier * cls,struct rte_mbuf ** pkts,const uint16_t nb_pkts,struct rte_flow_classify_rule * rule,struct rte_flow_classify_stats * stats)641 rte_flow_classifier_query(struct rte_flow_classifier *cls,
642 struct rte_mbuf **pkts,
643 const uint16_t nb_pkts,
644 struct rte_flow_classify_rule *rule,
645 struct rte_flow_classify_stats *stats)
646 {
647 enum rte_flow_classify_table_type tbl_type;
648 uint32_t i;
649 int ret = -EINVAL;
650
651 if (!cls || !rule || !stats || !pkts || nb_pkts == 0)
652 return ret;
653
654 tbl_type = rule->tbl_type;
655 for (i = 0; i < cls->num_tables; i++) {
656 struct rte_cls_table *table = &cls->tables[i];
657
658 if (table->type == tbl_type) {
659 ret = flow_classifier_lookup(cls, table,
660 pkts, nb_pkts);
661 if (!ret) {
662 ret = action_apply(cls, rule, stats);
663 return ret;
664 }
665 }
666 }
667 return ret;
668 }
669
670 RTE_LOG_REGISTER(librte_flow_classify_logtype, lib.flow_classify, INFO);
671