1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_flow_classify.h> 35 #include "rte_flow_classify_parse.h" 36 #include <rte_flow_driver.h> 37 #include <rte_table_acl.h> 38 #include <stdbool.h> 39 40 int librte_flow_classify_logtype; 41 42 static struct rte_eth_ntuple_filter ntuple_filter; 43 static uint32_t unique_id = 1; 44 45 46 struct rte_flow_classify_table_entry { 47 /* meta-data for classify rule */ 48 uint32_t rule_id; 49 }; 50 51 struct rte_table { 52 /* Input parameters */ 53 struct rte_table_ops ops; 54 uint32_t entry_size; 55 enum rte_flow_classify_table_type type; 56 57 /* Handle to the low-level table object */ 58 void *h_table; 59 }; 60 61 #define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256 62 63 struct rte_flow_classifier { 64 /* Input parameters */ 65 char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ]; 66 int socket_id; 67 enum rte_flow_classify_table_type type; 68 69 /* Internal tables */ 70 struct rte_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX]; 71 uint32_t num_tables; 72 uint16_t nb_pkts; 73 struct rte_flow_classify_table_entry 74 *entries[RTE_PORT_IN_BURST_SIZE_MAX]; 75 } __rte_cache_aligned; 76 77 enum { 78 PROTO_FIELD_IPV4, 79 SRC_FIELD_IPV4, 80 DST_FIELD_IPV4, 81 SRCP_FIELD_IPV4, 82 DSTP_FIELD_IPV4, 83 NUM_FIELDS_IPV4 84 }; 85 86 struct acl_keys { 87 struct rte_table_acl_rule_add_params key_add; /* add key */ 88 struct rte_table_acl_rule_delete_params key_del; /* delete key */ 89 }; 90 91 struct classify_rules { 92 enum rte_flow_classify_rule_type type; 93 union { 94 struct rte_flow_classify_ipv4_5tuple ipv4_5tuple; 95 } u; 96 }; 97 98 struct rte_flow_classify_rule { 99 uint32_t id; /* unique ID of classify rule */ 100 struct rte_flow_action action; /* action when match found */ 101 struct classify_rules rules; /* union of rules */ 102 union { 103 struct acl_keys key; 104 } u; 105 int key_found; /* rule key found in table */ 106 void *entry; /* pointer to buffer to hold rule meta data */ 107 void *entry_ptr; /* handle to the table entry for rule meta data */ 108 }; 109 110 static int 111 flow_classify_parse_flow( 112 const struct rte_flow_attr *attr, 113 const struct rte_flow_item pattern[], 114 const struct rte_flow_action actions[], 115 struct rte_flow_error *error) 116 { 117 struct rte_flow_item *items; 118 parse_filter_t parse_filter; 119 uint32_t item_num = 0; 120 uint32_t i = 0; 121 int ret; 122 123 memset(&ntuple_filter, 0, sizeof(ntuple_filter)); 124 125 /* Get the non-void item number of pattern */ 126 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) { 127 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) 128 item_num++; 129 i++; 130 } 131 item_num++; 132 133 items = malloc(item_num * sizeof(struct rte_flow_item)); 134 if (!items) { 135 rte_flow_error_set(error, ENOMEM, 136 RTE_FLOW_ERROR_TYPE_ITEM_NUM, 137 NULL, "No memory for pattern items."); 138 return -ENOMEM; 139 } 140 141 memset(items, 0, item_num * sizeof(struct rte_flow_item)); 142 classify_pattern_skip_void_item(items, pattern); 143 144 parse_filter = classify_find_parse_filter_func(items); 145 if (!parse_filter) { 146 rte_flow_error_set(error, EINVAL, 147 RTE_FLOW_ERROR_TYPE_ITEM, 148 pattern, "Unsupported pattern"); 149 free(items); 150 return -EINVAL; 151 } 152 153 ret = parse_filter(attr, items, actions, &ntuple_filter, error); 154 free(items); 155 return ret; 156 } 157 158 159 #define uint32_t_to_char(ip, a, b, c, d) do {\ 160 *a = (unsigned char)(ip >> 24 & 0xff);\ 161 *b = (unsigned char)(ip >> 16 & 0xff);\ 162 *c = (unsigned char)(ip >> 8 & 0xff);\ 163 *d = (unsigned char)(ip & 0xff);\ 164 } while (0) 165 166 static inline void 167 print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params *key) 168 { 169 unsigned char a, b, c, d; 170 171 printf("%s: 0x%02hhx/0x%hhx ", __func__, 172 key->field_value[PROTO_FIELD_IPV4].value.u8, 173 key->field_value[PROTO_FIELD_IPV4].mask_range.u8); 174 175 uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32, 176 &a, &b, &c, &d); 177 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d, 178 key->field_value[SRC_FIELD_IPV4].mask_range.u32); 179 180 uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32, 181 &a, &b, &c, &d); 182 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d, 183 key->field_value[DST_FIELD_IPV4].mask_range.u32); 184 185 printf("%hu : 0x%x %hu : 0x%x", 186 key->field_value[SRCP_FIELD_IPV4].value.u16, 187 key->field_value[SRCP_FIELD_IPV4].mask_range.u16, 188 key->field_value[DSTP_FIELD_IPV4].value.u16, 189 key->field_value[DSTP_FIELD_IPV4].mask_range.u16); 190 191 printf(" priority: 0x%x\n", key->priority); 192 } 193 194 static inline void 195 print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key) 196 { 197 unsigned char a, b, c, d; 198 199 printf("%s: 0x%02hhx/0x%hhx ", __func__, 200 key->field_value[PROTO_FIELD_IPV4].value.u8, 201 key->field_value[PROTO_FIELD_IPV4].mask_range.u8); 202 203 uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32, 204 &a, &b, &c, &d); 205 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d, 206 key->field_value[SRC_FIELD_IPV4].mask_range.u32); 207 208 uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32, 209 &a, &b, &c, &d); 210 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d, 211 key->field_value[DST_FIELD_IPV4].mask_range.u32); 212 213 printf("%hu : 0x%x %hu : 0x%x\n", 214 key->field_value[SRCP_FIELD_IPV4].value.u16, 215 key->field_value[SRCP_FIELD_IPV4].mask_range.u16, 216 key->field_value[DSTP_FIELD_IPV4].value.u16, 217 key->field_value[DSTP_FIELD_IPV4].mask_range.u16); 218 } 219 220 static int 221 rte_flow_classifier_check_params(struct rte_flow_classifier_params *params) 222 { 223 if (params == NULL) { 224 RTE_FLOW_CLASSIFY_LOG(ERR, 225 "%s: Incorrect value for parameter params\n", __func__); 226 return -EINVAL; 227 } 228 229 /* name */ 230 if (params->name == NULL) { 231 RTE_FLOW_CLASSIFY_LOG(ERR, 232 "%s: Incorrect value for parameter name\n", __func__); 233 return -EINVAL; 234 } 235 236 /* socket */ 237 if ((params->socket_id < 0) || 238 (params->socket_id >= RTE_MAX_NUMA_NODES)) { 239 RTE_FLOW_CLASSIFY_LOG(ERR, 240 "%s: Incorrect value for parameter socket_id\n", 241 __func__); 242 return -EINVAL; 243 } 244 245 return 0; 246 } 247 248 struct rte_flow_classifier * 249 rte_flow_classifier_create(struct rte_flow_classifier_params *params) 250 { 251 struct rte_flow_classifier *cls; 252 int ret; 253 254 /* Check input parameters */ 255 ret = rte_flow_classifier_check_params(params); 256 if (ret != 0) { 257 RTE_FLOW_CLASSIFY_LOG(ERR, 258 "%s: flow classifier params check failed (%d)\n", 259 __func__, ret); 260 return NULL; 261 } 262 263 /* Allocate memory for the flow classifier */ 264 cls = rte_zmalloc_socket("FLOW_CLASSIFIER", 265 sizeof(struct rte_flow_classifier), 266 RTE_CACHE_LINE_SIZE, params->socket_id); 267 268 if (cls == NULL) { 269 RTE_FLOW_CLASSIFY_LOG(ERR, 270 "%s: flow classifier memory allocation failed\n", 271 __func__); 272 return NULL; 273 } 274 275 /* Save input parameters */ 276 snprintf(cls->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ, "%s", 277 params->name); 278 cls->socket_id = params->socket_id; 279 cls->type = params->type; 280 281 /* Initialize flow classifier internal data structure */ 282 cls->num_tables = 0; 283 284 return cls; 285 } 286 287 static void 288 rte_flow_classify_table_free(struct rte_table *table) 289 { 290 if (table->ops.f_free != NULL) 291 table->ops.f_free(table->h_table); 292 } 293 294 int 295 rte_flow_classifier_free(struct rte_flow_classifier *cls) 296 { 297 uint32_t i; 298 299 /* Check input parameters */ 300 if (cls == NULL) { 301 RTE_FLOW_CLASSIFY_LOG(ERR, 302 "%s: rte_flow_classifier parameter is NULL\n", 303 __func__); 304 return -EINVAL; 305 } 306 307 /* Free tables */ 308 for (i = 0; i < cls->num_tables; i++) { 309 struct rte_table *table = &cls->tables[i]; 310 311 rte_flow_classify_table_free(table); 312 } 313 314 /* Free flow classifier memory */ 315 rte_free(cls); 316 317 return 0; 318 } 319 320 static int 321 rte_table_check_params(struct rte_flow_classifier *cls, 322 struct rte_flow_classify_table_params *params, 323 uint32_t *table_id) 324 { 325 if (cls == NULL) { 326 RTE_FLOW_CLASSIFY_LOG(ERR, 327 "%s: flow classifier parameter is NULL\n", 328 __func__); 329 return -EINVAL; 330 } 331 if (params == NULL) { 332 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params parameter is NULL\n", 333 __func__); 334 return -EINVAL; 335 } 336 if (table_id == NULL) { 337 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: table_id parameter is NULL\n", 338 __func__); 339 return -EINVAL; 340 } 341 342 /* ops */ 343 if (params->ops == NULL) { 344 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params->ops is NULL\n", 345 __func__); 346 return -EINVAL; 347 } 348 349 if (params->ops->f_create == NULL) { 350 RTE_FLOW_CLASSIFY_LOG(ERR, 351 "%s: f_create function pointer is NULL\n", __func__); 352 return -EINVAL; 353 } 354 355 if (params->ops->f_lookup == NULL) { 356 RTE_FLOW_CLASSIFY_LOG(ERR, 357 "%s: f_lookup function pointer is NULL\n", __func__); 358 return -EINVAL; 359 } 360 361 /* De we have room for one more table? */ 362 if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) { 363 RTE_FLOW_CLASSIFY_LOG(ERR, 364 "%s: Incorrect value for num_tables parameter\n", 365 __func__); 366 return -EINVAL; 367 } 368 369 return 0; 370 } 371 372 int 373 rte_flow_classify_table_create(struct rte_flow_classifier *cls, 374 struct rte_flow_classify_table_params *params, 375 uint32_t *table_id) 376 { 377 struct rte_table *table; 378 void *h_table; 379 uint32_t entry_size, id; 380 int ret; 381 382 /* Check input arguments */ 383 ret = rte_table_check_params(cls, params, table_id); 384 if (ret != 0) 385 return ret; 386 387 id = cls->num_tables; 388 table = &cls->tables[id]; 389 390 /* calculate table entry size */ 391 entry_size = sizeof(struct rte_flow_classify_table_entry); 392 393 /* Create the table */ 394 h_table = params->ops->f_create(params->arg_create, cls->socket_id, 395 entry_size); 396 if (h_table == NULL) { 397 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: Table creation failed\n", 398 __func__); 399 return -EINVAL; 400 } 401 402 /* Commit current table to the classifier */ 403 cls->num_tables++; 404 *table_id = id; 405 406 /* Save input parameters */ 407 memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops)); 408 409 /* Initialize table internal data structure */ 410 table->entry_size = entry_size; 411 table->h_table = h_table; 412 413 return 0; 414 } 415 416 static struct rte_flow_classify_rule * 417 allocate_acl_ipv4_5tuple_rule(void) 418 { 419 struct rte_flow_classify_rule *rule; 420 int log_level; 421 422 rule = malloc(sizeof(struct rte_flow_classify_rule)); 423 if (!rule) 424 return rule; 425 426 memset(rule, 0, sizeof(struct rte_flow_classify_rule)); 427 rule->id = unique_id++; 428 rule->rules.type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE; 429 430 memcpy(&rule->action, classify_get_flow_action(), 431 sizeof(struct rte_flow_action)); 432 433 /* key add values */ 434 rule->u.key.key_add.priority = ntuple_filter.priority; 435 rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 = 436 ntuple_filter.proto_mask; 437 rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 = 438 ntuple_filter.proto; 439 rule->rules.u.ipv4_5tuple.proto = ntuple_filter.proto; 440 rule->rules.u.ipv4_5tuple.proto_mask = ntuple_filter.proto_mask; 441 442 rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 = 443 ntuple_filter.src_ip_mask; 444 rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 = 445 ntuple_filter.src_ip; 446 rule->rules.u.ipv4_5tuple.src_ip_mask = ntuple_filter.src_ip_mask; 447 rule->rules.u.ipv4_5tuple.src_ip = ntuple_filter.src_ip; 448 449 rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 = 450 ntuple_filter.dst_ip_mask; 451 rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 = 452 ntuple_filter.dst_ip; 453 rule->rules.u.ipv4_5tuple.dst_ip_mask = ntuple_filter.dst_ip_mask; 454 rule->rules.u.ipv4_5tuple.dst_ip = ntuple_filter.dst_ip; 455 456 rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 = 457 ntuple_filter.src_port_mask; 458 rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 = 459 ntuple_filter.src_port; 460 rule->rules.u.ipv4_5tuple.src_port_mask = ntuple_filter.src_port_mask; 461 rule->rules.u.ipv4_5tuple.src_port = ntuple_filter.src_port; 462 463 rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 = 464 ntuple_filter.dst_port_mask; 465 rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 = 466 ntuple_filter.dst_port; 467 rule->rules.u.ipv4_5tuple.dst_port_mask = ntuple_filter.dst_port_mask; 468 rule->rules.u.ipv4_5tuple.dst_port = ntuple_filter.dst_port; 469 470 log_level = rte_log_get_level(librte_flow_classify_logtype); 471 472 if (log_level == RTE_LOG_DEBUG) 473 print_acl_ipv4_key_add(&rule->u.key.key_add); 474 475 /* key delete values */ 476 memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4], 477 &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4], 478 NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field)); 479 480 if (log_level == RTE_LOG_DEBUG) 481 print_acl_ipv4_key_delete(&rule->u.key.key_del); 482 483 return rule; 484 } 485 486 struct rte_flow_classify_rule * 487 rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls, 488 uint32_t table_id, 489 int *key_found, 490 const struct rte_flow_attr *attr, 491 const struct rte_flow_item pattern[], 492 const struct rte_flow_action actions[], 493 struct rte_flow_error *error) 494 { 495 struct rte_flow_classify_rule *rule; 496 struct rte_flow_classify_table_entry *table_entry; 497 int ret; 498 499 if (!error) 500 return NULL; 501 502 if (!cls) { 503 rte_flow_error_set(error, EINVAL, 504 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 505 NULL, "NULL classifier."); 506 return NULL; 507 } 508 509 if (table_id >= cls->num_tables) { 510 rte_flow_error_set(error, EINVAL, 511 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 512 NULL, "invalid table_id."); 513 return NULL; 514 } 515 516 if (key_found == NULL) { 517 rte_flow_error_set(error, EINVAL, 518 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 519 NULL, "NULL key_found."); 520 return NULL; 521 } 522 523 if (!pattern) { 524 rte_flow_error_set(error, EINVAL, 525 RTE_FLOW_ERROR_TYPE_ITEM_NUM, 526 NULL, "NULL pattern."); 527 return NULL; 528 } 529 530 if (!actions) { 531 rte_flow_error_set(error, EINVAL, 532 RTE_FLOW_ERROR_TYPE_ACTION_NUM, 533 NULL, "NULL action."); 534 return NULL; 535 } 536 537 if (!attr) { 538 rte_flow_error_set(error, EINVAL, 539 RTE_FLOW_ERROR_TYPE_ATTR, 540 NULL, "NULL attribute."); 541 return NULL; 542 } 543 544 /* parse attr, pattern and actions */ 545 ret = flow_classify_parse_flow(attr, pattern, actions, error); 546 if (ret < 0) 547 return NULL; 548 549 switch (cls->type) { 550 case RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL: 551 rule = allocate_acl_ipv4_5tuple_rule(); 552 if (!rule) 553 return NULL; 554 break; 555 default: 556 return NULL; 557 } 558 559 rule->entry = malloc(sizeof(struct rte_flow_classify_table_entry)); 560 if (!rule->entry) { 561 free(rule); 562 return NULL; 563 } 564 565 table_entry = rule->entry; 566 table_entry->rule_id = rule->id; 567 568 if (cls->tables[table_id].ops.f_add != NULL) { 569 ret = cls->tables[table_id].ops.f_add( 570 cls->tables[table_id].h_table, 571 &rule->u.key.key_add, 572 rule->entry, 573 &rule->key_found, 574 &rule->entry_ptr); 575 if (ret) { 576 free(rule->entry); 577 free(rule); 578 return NULL; 579 } 580 *key_found = rule->key_found; 581 } 582 return rule; 583 } 584 585 int 586 rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls, 587 uint32_t table_id, 588 struct rte_flow_classify_rule *rule) 589 { 590 int ret = -EINVAL; 591 592 if (!cls || !rule || table_id >= cls->num_tables) 593 return ret; 594 595 if (cls->tables[table_id].ops.f_delete != NULL) 596 ret = cls->tables[table_id].ops.f_delete( 597 cls->tables[table_id].h_table, 598 &rule->u.key.key_del, 599 &rule->key_found, 600 &rule->entry); 601 602 return ret; 603 } 604 605 static int 606 flow_classifier_lookup(struct rte_flow_classifier *cls, 607 uint32_t table_id, 608 struct rte_mbuf **pkts, 609 const uint16_t nb_pkts) 610 { 611 int ret = -EINVAL; 612 uint64_t pkts_mask; 613 uint64_t lookup_hit_mask; 614 615 pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t); 616 ret = cls->tables[table_id].ops.f_lookup( 617 cls->tables[table_id].h_table, 618 pkts, pkts_mask, &lookup_hit_mask, 619 (void **)cls->entries); 620 621 if (!ret && lookup_hit_mask) 622 cls->nb_pkts = nb_pkts; 623 else 624 cls->nb_pkts = 0; 625 626 return ret; 627 } 628 629 static int 630 action_apply(struct rte_flow_classifier *cls, 631 struct rte_flow_classify_rule *rule, 632 struct rte_flow_classify_stats *stats) 633 { 634 struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats; 635 uint64_t count = 0; 636 int i; 637 int ret = -EINVAL; 638 639 switch (rule->action.type) { 640 case RTE_FLOW_ACTION_TYPE_COUNT: 641 for (i = 0; i < cls->nb_pkts; i++) { 642 if (rule->id == cls->entries[i]->rule_id) 643 count++; 644 } 645 if (count) { 646 ret = 0; 647 ntuple_stats = 648 (struct rte_flow_classify_ipv4_5tuple_stats *) 649 stats->stats; 650 ntuple_stats->counter1 = count; 651 ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple; 652 } 653 break; 654 default: 655 ret = -ENOTSUP; 656 break; 657 } 658 659 return ret; 660 } 661 662 int 663 rte_flow_classifier_query(struct rte_flow_classifier *cls, 664 uint32_t table_id, 665 struct rte_mbuf **pkts, 666 const uint16_t nb_pkts, 667 struct rte_flow_classify_rule *rule, 668 struct rte_flow_classify_stats *stats) 669 { 670 int ret = -EINVAL; 671 672 if (!cls || !rule || !stats || !pkts || nb_pkts == 0 || 673 table_id >= cls->num_tables) 674 return ret; 675 676 ret = flow_classifier_lookup(cls, table_id, pkts, nb_pkts); 677 if (!ret) 678 ret = action_apply(cls, rule, stats); 679 return ret; 680 } 681 682 RTE_INIT(librte_flow_classify_init_log); 683 684 static void 685 librte_flow_classify_init_log(void) 686 { 687 librte_flow_classify_logtype = 688 rte_log_register("librte.flow_classify"); 689 if (librte_flow_classify_logtype >= 0) 690 rte_log_set_level(librte_flow_classify_logtype, RTE_LOG_INFO); 691 } 692