1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_flow_classify.h>
7 #include "rte_flow_classify_parse.h"
8 #include <rte_table_acl.h>
9 
10 static uint32_t unique_id = 1;
11 
12 enum rte_flow_classify_table_type table_type
13 	= RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE;
14 
15 struct rte_flow_classify_table_entry {
16 	/* meta-data for classify rule */
17 	uint32_t rule_id;
18 
19 	/* Flow action */
20 	struct classify_action action;
21 };
22 
23 struct rte_cls_table {
24 	/* Input parameters */
25 	struct rte_table_ops ops;
26 	uint32_t entry_size;
27 	enum rte_flow_classify_table_type type;
28 
29 	/* Handle to the low-level table object */
30 	void *h_table;
31 };
32 
33 #define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
34 
35 struct rte_flow_classifier {
36 	/* Input parameters */
37 	char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
38 	int socket_id;
39 
40 	/* Internal */
41 	/* ntuple_filter */
42 	struct rte_eth_ntuple_filter ntuple_filter;
43 
44 	/* classifier tables */
45 	struct rte_cls_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
46 	uint32_t table_mask;
47 	uint32_t num_tables;
48 
49 	uint16_t nb_pkts;
50 	struct rte_flow_classify_table_entry
51 		*entries[RTE_PORT_IN_BURST_SIZE_MAX];
52 } __rte_cache_aligned;
53 
54 enum {
55 	PROTO_FIELD_IPV4,
56 	SRC_FIELD_IPV4,
57 	DST_FIELD_IPV4,
58 	SRCP_FIELD_IPV4,
59 	DSTP_FIELD_IPV4,
60 	NUM_FIELDS_IPV4
61 };
62 
63 struct acl_keys {
64 	struct rte_table_acl_rule_add_params key_add; /* add key */
65 	struct rte_table_acl_rule_delete_params	key_del; /* delete key */
66 };
67 
68 struct classify_rules {
69 	enum rte_flow_classify_rule_type type;
70 	union {
71 		struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
72 	} u;
73 };
74 
75 struct rte_flow_classify_rule {
76 	uint32_t id; /* unique ID of classify rule */
77 	enum rte_flow_classify_table_type tbl_type; /* rule table */
78 	struct classify_rules rules; /* union of rules */
79 	union {
80 		struct acl_keys key;
81 	} u;
82 	int key_found;   /* rule key found in table */
83 	struct rte_flow_classify_table_entry entry;  /* rule meta data */
84 	void *entry_ptr; /* handle to the table entry for rule meta data */
85 };
86 
87 int
88 rte_flow_classify_validate(
89 		   struct rte_flow_classifier *cls,
90 		   const struct rte_flow_attr *attr,
91 		   const struct rte_flow_item pattern[],
92 		   const struct rte_flow_action actions[],
93 		   struct rte_flow_error *error)
94 {
95 	struct rte_flow_item *items;
96 	parse_filter_t parse_filter;
97 	uint32_t item_num = 0;
98 	uint32_t i = 0;
99 	int ret;
100 
101 	if (error == NULL)
102 		return -EINVAL;
103 
104 	if (cls == NULL) {
105 		RTE_FLOW_CLASSIFY_LOG(ERR,
106 			"%s: rte_flow_classifier parameter is NULL\n",
107 			__func__);
108 		return -EINVAL;
109 	}
110 
111 	if (!attr) {
112 		rte_flow_error_set(error, EINVAL,
113 				   RTE_FLOW_ERROR_TYPE_ATTR,
114 				   NULL, "NULL attribute.");
115 		return -EINVAL;
116 	}
117 
118 	if (!pattern) {
119 		rte_flow_error_set(error,
120 			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
121 			NULL, "NULL pattern.");
122 		return -EINVAL;
123 	}
124 
125 	if (!actions) {
126 		rte_flow_error_set(error, EINVAL,
127 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
128 				   NULL, "NULL action.");
129 		return -EINVAL;
130 	}
131 
132 	memset(&cls->ntuple_filter, 0, sizeof(cls->ntuple_filter));
133 
134 	/* Get the non-void item number of pattern */
135 	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
136 		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
137 			item_num++;
138 		i++;
139 	}
140 	item_num++;
141 
142 	items = malloc(item_num * sizeof(struct rte_flow_item));
143 	if (!items) {
144 		rte_flow_error_set(error, ENOMEM,
145 				RTE_FLOW_ERROR_TYPE_ITEM_NUM,
146 				NULL, "No memory for pattern items.");
147 		return -ENOMEM;
148 	}
149 
150 	memset(items, 0, item_num * sizeof(struct rte_flow_item));
151 	classify_pattern_skip_void_item(items, pattern);
152 
153 	parse_filter = classify_find_parse_filter_func(items);
154 	if (!parse_filter) {
155 		rte_flow_error_set(error, EINVAL,
156 				RTE_FLOW_ERROR_TYPE_ITEM,
157 				pattern, "Unsupported pattern");
158 		free(items);
159 		return -EINVAL;
160 	}
161 
162 	ret = parse_filter(attr, items, actions, &cls->ntuple_filter, error);
163 	free(items);
164 	return ret;
165 }
166 
167 
168 #define uint32_t_to_char(ip, a, b, c, d) do {\
169 		*a = (unsigned char)(ip >> 24 & 0xff);\
170 		*b = (unsigned char)(ip >> 16 & 0xff);\
171 		*c = (unsigned char)(ip >> 8 & 0xff);\
172 		*d = (unsigned char)(ip & 0xff);\
173 	} while (0)
174 
175 static inline void
176 print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
177 {
178 	unsigned char a, b, c, d;
179 
180 	printf("%s:    0x%02hhx/0x%hhx ", __func__,
181 		key->field_value[PROTO_FIELD_IPV4].value.u8,
182 		key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
183 
184 	uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
185 			&a, &b, &c, &d);
186 	printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
187 			key->field_value[SRC_FIELD_IPV4].mask_range.u32);
188 
189 	uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
190 			&a, &b, &c, &d);
191 	printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
192 			key->field_value[DST_FIELD_IPV4].mask_range.u32);
193 
194 	printf("%hu : 0x%x %hu : 0x%x",
195 		key->field_value[SRCP_FIELD_IPV4].value.u16,
196 		key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
197 		key->field_value[DSTP_FIELD_IPV4].value.u16,
198 		key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
199 
200 	printf(" priority: 0x%x\n", key->priority);
201 }
202 
203 static inline void
204 print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
205 {
206 	unsigned char a, b, c, d;
207 
208 	printf("%s: 0x%02hhx/0x%hhx ", __func__,
209 		key->field_value[PROTO_FIELD_IPV4].value.u8,
210 		key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
211 
212 	uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
213 			&a, &b, &c, &d);
214 	printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
215 			key->field_value[SRC_FIELD_IPV4].mask_range.u32);
216 
217 	uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
218 			&a, &b, &c, &d);
219 	printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
220 			key->field_value[DST_FIELD_IPV4].mask_range.u32);
221 
222 	printf("%hu : 0x%x %hu : 0x%x\n",
223 		key->field_value[SRCP_FIELD_IPV4].value.u16,
224 		key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
225 		key->field_value[DSTP_FIELD_IPV4].value.u16,
226 		key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
227 }
228 
229 static int
230 rte_flow_classifier_check_params(struct rte_flow_classifier_params *params)
231 {
232 	if (params == NULL) {
233 		RTE_FLOW_CLASSIFY_LOG(ERR,
234 			"%s: Incorrect value for parameter params\n", __func__);
235 		return -EINVAL;
236 	}
237 
238 	/* name */
239 	if (params->name == NULL) {
240 		RTE_FLOW_CLASSIFY_LOG(ERR,
241 			"%s: Incorrect value for parameter name\n", __func__);
242 		return -EINVAL;
243 	}
244 
245 	/* socket */
246 	if (params->socket_id < 0) {
247 		RTE_FLOW_CLASSIFY_LOG(ERR,
248 			"%s: Incorrect value for parameter socket_id\n",
249 			__func__);
250 		return -EINVAL;
251 	}
252 
253 	return 0;
254 }
255 
256 struct rte_flow_classifier *
257 rte_flow_classifier_create(struct rte_flow_classifier_params *params)
258 {
259 	struct rte_flow_classifier *cls;
260 	int ret;
261 
262 	/* Check input parameters */
263 	ret = rte_flow_classifier_check_params(params);
264 	if (ret != 0) {
265 		RTE_FLOW_CLASSIFY_LOG(ERR,
266 			"%s: flow classifier params check failed (%d)\n",
267 			__func__, ret);
268 		return NULL;
269 	}
270 
271 	/* Allocate memory for the flow classifier */
272 	cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
273 			sizeof(struct rte_flow_classifier),
274 			RTE_CACHE_LINE_SIZE, params->socket_id);
275 
276 	if (cls == NULL) {
277 		RTE_FLOW_CLASSIFY_LOG(ERR,
278 			"%s: flow classifier memory allocation failed\n",
279 			__func__);
280 		return NULL;
281 	}
282 
283 	/* Save input parameters */
284 	strlcpy(cls->name, params->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ);
285 
286 	cls->socket_id = params->socket_id;
287 
288 	return cls;
289 }
290 
291 static void
292 rte_flow_classify_table_free(struct rte_cls_table *table)
293 {
294 	if (table->ops.f_free != NULL)
295 		table->ops.f_free(table->h_table);
296 }
297 
298 int
299 rte_flow_classifier_free(struct rte_flow_classifier *cls)
300 {
301 	uint32_t i;
302 
303 	/* Check input parameters */
304 	if (cls == NULL) {
305 		RTE_FLOW_CLASSIFY_LOG(ERR,
306 			"%s: rte_flow_classifier parameter is NULL\n",
307 			__func__);
308 		return -EINVAL;
309 	}
310 
311 	/* Free tables */
312 	for (i = 0; i < cls->num_tables; i++) {
313 		struct rte_cls_table *table = &cls->tables[i];
314 
315 		rte_flow_classify_table_free(table);
316 	}
317 
318 	/* Free flow classifier memory */
319 	rte_free(cls);
320 
321 	return 0;
322 }
323 
324 static int
325 rte_table_check_params(struct rte_flow_classifier *cls,
326 		struct rte_flow_classify_table_params *params)
327 {
328 	if (cls == NULL) {
329 		RTE_FLOW_CLASSIFY_LOG(ERR,
330 			"%s: flow classifier parameter is NULL\n",
331 			__func__);
332 		return -EINVAL;
333 	}
334 	if (params == NULL) {
335 		RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params parameter is NULL\n",
336 			__func__);
337 		return -EINVAL;
338 	}
339 
340 	/* ops */
341 	if (params->ops == NULL) {
342 		RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params->ops is NULL\n",
343 			__func__);
344 		return -EINVAL;
345 	}
346 
347 	if (params->ops->f_create == NULL) {
348 		RTE_FLOW_CLASSIFY_LOG(ERR,
349 			"%s: f_create function pointer is NULL\n", __func__);
350 		return -EINVAL;
351 	}
352 
353 	if (params->ops->f_lookup == NULL) {
354 		RTE_FLOW_CLASSIFY_LOG(ERR,
355 			"%s: f_lookup function pointer is NULL\n", __func__);
356 		return -EINVAL;
357 	}
358 
359 	/* De we have room for one more table? */
360 	if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
361 		RTE_FLOW_CLASSIFY_LOG(ERR,
362 			"%s: Incorrect value for num_tables parameter\n",
363 			__func__);
364 		return -EINVAL;
365 	}
366 
367 	return 0;
368 }
369 
370 int
371 rte_flow_classify_table_create(struct rte_flow_classifier *cls,
372 	struct rte_flow_classify_table_params *params)
373 {
374 	struct rte_cls_table *table;
375 	void *h_table;
376 	uint32_t entry_size;
377 	int ret;
378 
379 	/* Check input arguments */
380 	ret = rte_table_check_params(cls, params);
381 	if (ret != 0)
382 		return ret;
383 
384 	/* calculate table entry size */
385 	entry_size = sizeof(struct rte_flow_classify_table_entry);
386 
387 	/* Create the table */
388 	h_table = params->ops->f_create(params->arg_create, cls->socket_id,
389 		entry_size);
390 	if (h_table == NULL) {
391 		RTE_FLOW_CLASSIFY_LOG(ERR, "%s: Table creation failed\n",
392 			__func__);
393 		return -EINVAL;
394 	}
395 
396 	/* Commit current table to the classifier */
397 	table = &cls->tables[cls->num_tables];
398 	table->type = params->type;
399 	cls->num_tables++;
400 
401 	/* Save input parameters */
402 	memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
403 
404 	/* Initialize table internal data structure */
405 	table->entry_size = entry_size;
406 	table->h_table = h_table;
407 
408 	return 0;
409 }
410 
411 static struct rte_flow_classify_rule *
412 allocate_acl_ipv4_5tuple_rule(struct rte_flow_classifier *cls)
413 {
414 	struct rte_flow_classify_rule *rule;
415 
416 	rule = malloc(sizeof(struct rte_flow_classify_rule));
417 	if (!rule)
418 		return rule;
419 
420 	memset(rule, 0, sizeof(struct rte_flow_classify_rule));
421 	rule->id = unique_id++;
422 	rule->rules.type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
423 
424 	/* key add values */
425 	rule->u.key.key_add.priority = cls->ntuple_filter.priority;
426 	rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
427 			cls->ntuple_filter.proto_mask;
428 	rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
429 			cls->ntuple_filter.proto;
430 	rule->rules.u.ipv4_5tuple.proto = cls->ntuple_filter.proto;
431 	rule->rules.u.ipv4_5tuple.proto_mask = cls->ntuple_filter.proto_mask;
432 
433 	rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
434 			cls->ntuple_filter.src_ip_mask;
435 	rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
436 			cls->ntuple_filter.src_ip;
437 	rule->rules.u.ipv4_5tuple.src_ip_mask = cls->ntuple_filter.src_ip_mask;
438 	rule->rules.u.ipv4_5tuple.src_ip = cls->ntuple_filter.src_ip;
439 
440 	rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
441 			cls->ntuple_filter.dst_ip_mask;
442 	rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
443 			cls->ntuple_filter.dst_ip;
444 	rule->rules.u.ipv4_5tuple.dst_ip_mask = cls->ntuple_filter.dst_ip_mask;
445 	rule->rules.u.ipv4_5tuple.dst_ip = cls->ntuple_filter.dst_ip;
446 
447 	rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
448 			cls->ntuple_filter.src_port_mask;
449 	rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
450 			cls->ntuple_filter.src_port;
451 	rule->rules.u.ipv4_5tuple.src_port_mask =
452 			cls->ntuple_filter.src_port_mask;
453 	rule->rules.u.ipv4_5tuple.src_port = cls->ntuple_filter.src_port;
454 
455 	rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
456 			cls->ntuple_filter.dst_port_mask;
457 	rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
458 			cls->ntuple_filter.dst_port;
459 	rule->rules.u.ipv4_5tuple.dst_port_mask =
460 			cls->ntuple_filter.dst_port_mask;
461 	rule->rules.u.ipv4_5tuple.dst_port = cls->ntuple_filter.dst_port;
462 
463 	if (rte_log_can_log(librte_flow_classify_logtype, RTE_LOG_DEBUG))
464 		print_acl_ipv4_key_add(&rule->u.key.key_add);
465 
466 	/* key delete values */
467 	memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
468 	       &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
469 	       NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
470 
471 	if (rte_log_can_log(librte_flow_classify_logtype, RTE_LOG_DEBUG))
472 		print_acl_ipv4_key_delete(&rule->u.key.key_del);
473 
474 	return rule;
475 }
476 
477 struct rte_flow_classify_rule *
478 rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
479 		const struct rte_flow_attr *attr,
480 		const struct rte_flow_item pattern[],
481 		const struct rte_flow_action actions[],
482 		int *key_found,
483 		struct rte_flow_error *error)
484 {
485 	struct rte_flow_classify_rule *rule;
486 	struct rte_flow_classify_table_entry *table_entry;
487 	struct classify_action *action;
488 	uint32_t i;
489 	int ret;
490 
491 	if (!error)
492 		return NULL;
493 
494 	if (key_found == NULL) {
495 		rte_flow_error_set(error, EINVAL,
496 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
497 				NULL, "NULL key_found.");
498 		return NULL;
499 	}
500 
501 	/* parse attr, pattern and actions */
502 	ret = rte_flow_classify_validate(cls, attr, pattern, actions, error);
503 	if (ret < 0)
504 		return NULL;
505 
506 	switch (table_type) {
507 	case RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE:
508 		rule = allocate_acl_ipv4_5tuple_rule(cls);
509 		if (!rule)
510 			return NULL;
511 		rule->tbl_type = table_type;
512 		cls->table_mask |= table_type;
513 		break;
514 	default:
515 		return NULL;
516 	}
517 
518 	action = classify_get_flow_action();
519 	table_entry = &rule->entry;
520 	table_entry->rule_id = rule->id;
521 	table_entry->action.action_mask = action->action_mask;
522 
523 	/* Copy actions */
524 	if (action->action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_COUNT)) {
525 		memcpy(&table_entry->action.act.counter, &action->act.counter,
526 				sizeof(table_entry->action.act.counter));
527 	}
528 	if (action->action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_MARK)) {
529 		memcpy(&table_entry->action.act.mark, &action->act.mark,
530 				sizeof(table_entry->action.act.mark));
531 	}
532 
533 	for (i = 0; i < cls->num_tables; i++) {
534 		struct rte_cls_table *table = &cls->tables[i];
535 
536 		if (table->type == table_type) {
537 			if (table->ops.f_add != NULL) {
538 				ret = table->ops.f_add(
539 					table->h_table,
540 					&rule->u.key.key_add,
541 					&rule->entry,
542 					&rule->key_found,
543 					&rule->entry_ptr);
544 				if (ret) {
545 					free(rule);
546 					return NULL;
547 				}
548 
549 			*key_found = rule->key_found;
550 			}
551 
552 			return rule;
553 		}
554 	}
555 	free(rule);
556 	return NULL;
557 }
558 
559 int
560 rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
561 		struct rte_flow_classify_rule *rule)
562 {
563 	uint32_t i;
564 	int ret = -EINVAL;
565 
566 	if (!cls || !rule)
567 		return ret;
568 	enum rte_flow_classify_table_type tbl_type = rule->tbl_type;
569 
570 	for (i = 0; i < cls->num_tables; i++) {
571 		struct rte_cls_table *table = &cls->tables[i];
572 
573 		if (table->type == tbl_type) {
574 			if (table->ops.f_delete != NULL) {
575 				ret = table->ops.f_delete(table->h_table,
576 						&rule->u.key.key_del,
577 						&rule->key_found,
578 						&rule->entry);
579 				if (ret == 0)
580 					free(rule);
581 				return ret;
582 			}
583 		}
584 	}
585 	return ret;
586 }
587 
588 static int
589 flow_classifier_lookup(struct rte_flow_classifier *cls,
590 		struct rte_cls_table *table,
591 		struct rte_mbuf **pkts,
592 		const uint16_t nb_pkts)
593 {
594 	int ret = -EINVAL;
595 	uint64_t pkts_mask;
596 	uint64_t lookup_hit_mask;
597 
598 	pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
599 	ret = table->ops.f_lookup(table->h_table,
600 		pkts, pkts_mask, &lookup_hit_mask,
601 		(void **)cls->entries);
602 
603 	if (!ret && lookup_hit_mask)
604 		cls->nb_pkts = nb_pkts;
605 	else
606 		cls->nb_pkts = 0;
607 
608 	return ret;
609 }
610 
611 static int
612 action_apply(struct rte_flow_classifier *cls,
613 		struct rte_flow_classify_rule *rule,
614 		struct rte_flow_classify_stats *stats)
615 {
616 	struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
617 	struct rte_flow_classify_table_entry *entry = &rule->entry;
618 	uint64_t count = 0;
619 	uint32_t action_mask = entry->action.action_mask;
620 	int i, ret = -EINVAL;
621 
622 	if (action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_COUNT)) {
623 		for (i = 0; i < cls->nb_pkts; i++) {
624 			if (rule->id == cls->entries[i]->rule_id)
625 				count++;
626 		}
627 		if (count) {
628 			ret = 0;
629 			ntuple_stats = stats->stats;
630 			ntuple_stats->counter1 = count;
631 			ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
632 		}
633 	}
634 	return ret;
635 }
636 
637 int
638 rte_flow_classifier_query(struct rte_flow_classifier *cls,
639 		struct rte_mbuf **pkts,
640 		const uint16_t nb_pkts,
641 		struct rte_flow_classify_rule *rule,
642 		struct rte_flow_classify_stats *stats)
643 {
644 	enum rte_flow_classify_table_type tbl_type;
645 	uint32_t i;
646 	int ret = -EINVAL;
647 
648 	if (!cls || !rule || !stats || !pkts  || nb_pkts == 0)
649 		return ret;
650 
651 	tbl_type = rule->tbl_type;
652 	for (i = 0; i < cls->num_tables; i++) {
653 		struct rte_cls_table *table = &cls->tables[i];
654 
655 			if (table->type == tbl_type) {
656 				ret = flow_classifier_lookup(cls, table,
657 						pkts, nb_pkts);
658 				if (!ret) {
659 					ret = action_apply(cls, rule, stats);
660 					return ret;
661 				}
662 			}
663 	}
664 	return ret;
665 }
666 
667 RTE_LOG_REGISTER_DEFAULT(librte_flow_classify_logtype, INFO);
668