1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdint.h> 35 #include <inttypes.h> 36 #include <getopt.h> 37 38 #include <rte_eal.h> 39 #include <rte_ethdev.h> 40 #include <rte_cycles.h> 41 #include <rte_lcore.h> 42 #include <rte_mbuf.h> 43 #include <rte_flow.h> 44 #include <rte_flow_classify.h> 45 #include <rte_table_acl.h> 46 47 #define RX_RING_SIZE 128 48 #define TX_RING_SIZE 512 49 50 #define NUM_MBUFS 8191 51 #define MBUF_CACHE_SIZE 250 52 #define BURST_SIZE 32 53 54 #define MAX_NUM_CLASSIFY 30 55 #define FLOW_CLASSIFY_MAX_RULE_NUM 91 56 #define FLOW_CLASSIFY_MAX_PRIORITY 8 57 #define FLOW_CLASSIFIER_NAME_SIZE 64 58 59 #define COMMENT_LEAD_CHAR ('#') 60 #define OPTION_RULE_IPV4 "rule_ipv4" 61 #define RTE_LOGTYPE_FLOW_CLASSIFY RTE_LOGTYPE_USER3 62 #define flow_classify_log(format, ...) \ 63 RTE_LOG(ERR, FLOW_CLASSIFY, format, ##__VA_ARGS__) 64 65 #define uint32_t_to_char(ip, a, b, c, d) do {\ 66 *a = (unsigned char)(ip >> 24 & 0xff);\ 67 *b = (unsigned char)(ip >> 16 & 0xff);\ 68 *c = (unsigned char)(ip >> 8 & 0xff);\ 69 *d = (unsigned char)(ip & 0xff);\ 70 } while (0) 71 72 enum { 73 CB_FLD_SRC_ADDR, 74 CB_FLD_DST_ADDR, 75 CB_FLD_SRC_PORT, 76 CB_FLD_SRC_PORT_DLM, 77 CB_FLD_SRC_PORT_MASK, 78 CB_FLD_DST_PORT, 79 CB_FLD_DST_PORT_DLM, 80 CB_FLD_DST_PORT_MASK, 81 CB_FLD_PROTO, 82 CB_FLD_PRIORITY, 83 CB_FLD_NUM, 84 }; 85 86 static struct{ 87 const char *rule_ipv4_name; 88 } parm_config; 89 const char cb_port_delim[] = ":"; 90 91 static const struct rte_eth_conf port_conf_default = { 92 .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN } 93 }; 94 95 struct flow_classifier { 96 struct rte_flow_classifier *cls; 97 uint32_t table_id[RTE_FLOW_CLASSIFY_TABLE_MAX]; 98 }; 99 100 struct flow_classifier_acl { 101 struct flow_classifier cls; 102 } __rte_cache_aligned; 103 104 /* ACL field definitions for IPv4 5 tuple rule */ 105 106 enum { 107 PROTO_FIELD_IPV4, 108 SRC_FIELD_IPV4, 109 DST_FIELD_IPV4, 110 SRCP_FIELD_IPV4, 111 DSTP_FIELD_IPV4, 112 NUM_FIELDS_IPV4 113 }; 114 115 enum { 116 PROTO_INPUT_IPV4, 117 SRC_INPUT_IPV4, 118 DST_INPUT_IPV4, 119 SRCP_DESTP_INPUT_IPV4 120 }; 121 122 static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { 123 /* first input field - always one byte long. */ 124 { 125 .type = RTE_ACL_FIELD_TYPE_BITMASK, 126 .size = sizeof(uint8_t), 127 .field_index = PROTO_FIELD_IPV4, 128 .input_index = PROTO_INPUT_IPV4, 129 .offset = sizeof(struct ether_hdr) + 130 offsetof(struct ipv4_hdr, next_proto_id), 131 }, 132 /* next input field (IPv4 source address) - 4 consecutive bytes. */ 133 { 134 /* rte_flow uses a bit mask for IPv4 addresses */ 135 .type = RTE_ACL_FIELD_TYPE_BITMASK, 136 .size = sizeof(uint32_t), 137 .field_index = SRC_FIELD_IPV4, 138 .input_index = SRC_INPUT_IPV4, 139 .offset = sizeof(struct ether_hdr) + 140 offsetof(struct ipv4_hdr, src_addr), 141 }, 142 /* next input field (IPv4 destination address) - 4 consecutive bytes. */ 143 { 144 /* rte_flow uses a bit mask for IPv4 addresses */ 145 .type = RTE_ACL_FIELD_TYPE_BITMASK, 146 .size = sizeof(uint32_t), 147 .field_index = DST_FIELD_IPV4, 148 .input_index = DST_INPUT_IPV4, 149 .offset = sizeof(struct ether_hdr) + 150 offsetof(struct ipv4_hdr, dst_addr), 151 }, 152 /* 153 * Next 2 fields (src & dst ports) form 4 consecutive bytes. 154 * They share the same input index. 155 */ 156 { 157 /* rte_flow uses a bit mask for protocol ports */ 158 .type = RTE_ACL_FIELD_TYPE_BITMASK, 159 .size = sizeof(uint16_t), 160 .field_index = SRCP_FIELD_IPV4, 161 .input_index = SRCP_DESTP_INPUT_IPV4, 162 .offset = sizeof(struct ether_hdr) + 163 sizeof(struct ipv4_hdr) + 164 offsetof(struct tcp_hdr, src_port), 165 }, 166 { 167 /* rte_flow uses a bit mask for protocol ports */ 168 .type = RTE_ACL_FIELD_TYPE_BITMASK, 169 .size = sizeof(uint16_t), 170 .field_index = DSTP_FIELD_IPV4, 171 .input_index = SRCP_DESTP_INPUT_IPV4, 172 .offset = sizeof(struct ether_hdr) + 173 sizeof(struct ipv4_hdr) + 174 offsetof(struct tcp_hdr, dst_port), 175 }, 176 }; 177 178 /* flow classify data */ 179 static int num_classify_rules; 180 static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY]; 181 static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats; 182 static struct rte_flow_classify_stats classify_stats = { 183 .stats = (void **)&ntuple_stats 184 }; 185 186 /* parameters for rte_flow_classify_validate and 187 * rte_flow_classify_table_entry_add functions 188 */ 189 190 static struct rte_flow_item eth_item = { RTE_FLOW_ITEM_TYPE_ETH, 191 0, 0, 0 }; 192 static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END, 193 0, 0, 0 }; 194 195 /* sample actions: 196 * "actions count / end" 197 */ 198 static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, 0}; 199 static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0}; 200 static struct rte_flow_action actions[2]; 201 202 /* sample attributes */ 203 static struct rte_flow_attr attr; 204 205 /* flow_classify.c: * Based on DPDK skeleton forwarding example. */ 206 207 /* 208 * Initializes a given port using global settings and with the RX buffers 209 * coming from the mbuf_pool passed as a parameter. 210 */ 211 static inline int 212 port_init(uint8_t port, struct rte_mempool *mbuf_pool) 213 { 214 struct rte_eth_conf port_conf = port_conf_default; 215 struct ether_addr addr; 216 const uint16_t rx_rings = 1, tx_rings = 1; 217 int retval; 218 uint16_t q; 219 220 if (port >= rte_eth_dev_count()) 221 return -1; 222 223 /* Configure the Ethernet device. */ 224 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); 225 if (retval != 0) 226 return retval; 227 228 /* Allocate and set up 1 RX queue per Ethernet port. */ 229 for (q = 0; q < rx_rings; q++) { 230 retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE, 231 rte_eth_dev_socket_id(port), NULL, mbuf_pool); 232 if (retval < 0) 233 return retval; 234 } 235 236 /* Allocate and set up 1 TX queue per Ethernet port. */ 237 for (q = 0; q < tx_rings; q++) { 238 retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE, 239 rte_eth_dev_socket_id(port), NULL); 240 if (retval < 0) 241 return retval; 242 } 243 244 /* Start the Ethernet port. */ 245 retval = rte_eth_dev_start(port); 246 if (retval < 0) 247 return retval; 248 249 /* Display the port MAC address. */ 250 rte_eth_macaddr_get(port, &addr); 251 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8 252 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n", 253 port, 254 addr.addr_bytes[0], addr.addr_bytes[1], 255 addr.addr_bytes[2], addr.addr_bytes[3], 256 addr.addr_bytes[4], addr.addr_bytes[5]); 257 258 /* Enable RX in promiscuous mode for the Ethernet device. */ 259 rte_eth_promiscuous_enable(port); 260 261 return 0; 262 } 263 264 /* 265 * The lcore main. This is the main thread that does the work, reading from 266 * an input port classifying the packets and writing to an output port. 267 */ 268 static __attribute__((noreturn)) void 269 lcore_main(struct flow_classifier *cls_app) 270 { 271 const uint8_t nb_ports = rte_eth_dev_count(); 272 uint8_t port; 273 int ret; 274 int i = 0; 275 276 ret = rte_flow_classify_table_entry_delete(cls_app->cls, 277 cls_app->table_id[0], rules[7]); 278 if (ret) 279 printf("table_entry_delete failed [7] %d\n\n", ret); 280 else 281 printf("table_entry_delete succeeded [7]\n\n"); 282 283 /* 284 * Check that the port is on the same NUMA node as the polling thread 285 * for best performance. 286 */ 287 for (port = 0; port < nb_ports; port++) 288 if (rte_eth_dev_socket_id(port) > 0 && 289 rte_eth_dev_socket_id(port) != (int)rte_socket_id()) { 290 printf("\n\n"); 291 printf("WARNING: port %u is on remote NUMA node\n", 292 port); 293 printf("to polling thread.\n"); 294 printf("Performance will not be optimal.\n"); 295 296 printf("\nCore %u forwarding packets. ", 297 rte_lcore_id()); 298 printf("[Ctrl+C to quit]\n"); 299 } 300 /* Run until the application is quit or killed. */ 301 for (;;) { 302 /* 303 * Receive packets on a port, classify them and forward them 304 * on the paired port. 305 * The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc. 306 */ 307 for (port = 0; port < nb_ports; port++) { 308 /* Get burst of RX packets, from first port of pair. */ 309 struct rte_mbuf *bufs[BURST_SIZE]; 310 const uint16_t nb_rx = rte_eth_rx_burst(port, 0, 311 bufs, BURST_SIZE); 312 313 if (unlikely(nb_rx == 0)) 314 continue; 315 316 for (i = 0; i < MAX_NUM_CLASSIFY; i++) { 317 if (rules[i]) { 318 ret = rte_flow_classifier_query( 319 cls_app->cls, 320 cls_app->table_id[0], 321 bufs, nb_rx, rules[i], 322 &classify_stats); 323 if (ret) 324 printf( 325 "rule [%d] query failed ret [%d]\n\n", 326 i, ret); 327 else { 328 printf( 329 "rule[%d] count=%"PRIu64"\n", 330 i, ntuple_stats.counter1); 331 332 printf("proto = %d\n", 333 ntuple_stats.ipv4_5tuple.proto); 334 } 335 } 336 } 337 338 /* Send burst of TX packets, to second port of pair. */ 339 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0, 340 bufs, nb_rx); 341 342 /* Free any unsent packets. */ 343 if (unlikely(nb_tx < nb_rx)) { 344 uint16_t buf; 345 346 for (buf = nb_tx; buf < nb_rx; buf++) 347 rte_pktmbuf_free(bufs[buf]); 348 } 349 } 350 } 351 } 352 353 /* 354 * Parse IPv4 5 tuple rules file, ipv4_rules_file.txt. 355 * Expected format: 356 * <src_ipv4_addr>'/'<masklen> <space> \ 357 * <dst_ipv4_addr>'/'<masklen> <space> \ 358 * <src_port> <space> ":" <src_port_mask> <space> \ 359 * <dst_port> <space> ":" <dst_port_mask> <space> \ 360 * <proto>'/'<proto_mask> <space> \ 361 * <priority> 362 */ 363 364 static int 365 get_cb_field(char **in, uint32_t *fd, int base, unsigned long lim, 366 char dlm) 367 { 368 unsigned long val; 369 char *end; 370 371 errno = 0; 372 val = strtoul(*in, &end, base); 373 if (errno != 0 || end[0] != dlm || val > lim) 374 return -EINVAL; 375 *fd = (uint32_t)val; 376 *in = end + 1; 377 return 0; 378 } 379 380 static int 381 parse_ipv4_net(char *in, uint32_t *addr, uint32_t *mask_len) 382 { 383 uint32_t a, b, c, d, m; 384 385 if (get_cb_field(&in, &a, 0, UINT8_MAX, '.')) 386 return -EINVAL; 387 if (get_cb_field(&in, &b, 0, UINT8_MAX, '.')) 388 return -EINVAL; 389 if (get_cb_field(&in, &c, 0, UINT8_MAX, '.')) 390 return -EINVAL; 391 if (get_cb_field(&in, &d, 0, UINT8_MAX, '/')) 392 return -EINVAL; 393 if (get_cb_field(&in, &m, 0, sizeof(uint32_t) * CHAR_BIT, 0)) 394 return -EINVAL; 395 396 addr[0] = IPv4(a, b, c, d); 397 mask_len[0] = m; 398 return 0; 399 } 400 401 static int 402 parse_ipv4_5tuple_rule(char *str, struct rte_eth_ntuple_filter *ntuple_filter) 403 { 404 int i, ret; 405 char *s, *sp, *in[CB_FLD_NUM]; 406 static const char *dlm = " \t\n"; 407 int dim = CB_FLD_NUM; 408 uint32_t temp; 409 410 s = str; 411 for (i = 0; i != dim; i++, s = NULL) { 412 in[i] = strtok_r(s, dlm, &sp); 413 if (in[i] == NULL) 414 return -EINVAL; 415 } 416 417 ret = parse_ipv4_net(in[CB_FLD_SRC_ADDR], 418 &ntuple_filter->src_ip, 419 &ntuple_filter->src_ip_mask); 420 if (ret != 0) { 421 flow_classify_log("failed to read source address/mask: %s\n", 422 in[CB_FLD_SRC_ADDR]); 423 return ret; 424 } 425 426 ret = parse_ipv4_net(in[CB_FLD_DST_ADDR], 427 &ntuple_filter->dst_ip, 428 &ntuple_filter->dst_ip_mask); 429 if (ret != 0) { 430 flow_classify_log("failed to read source address/mask: %s\n", 431 in[CB_FLD_DST_ADDR]); 432 return ret; 433 } 434 435 if (get_cb_field(&in[CB_FLD_SRC_PORT], &temp, 0, UINT16_MAX, 0)) 436 return -EINVAL; 437 ntuple_filter->src_port = (uint16_t)temp; 438 439 if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim, 440 sizeof(cb_port_delim)) != 0) 441 return -EINVAL; 442 443 if (get_cb_field(&in[CB_FLD_SRC_PORT_MASK], &temp, 0, UINT16_MAX, 0)) 444 return -EINVAL; 445 ntuple_filter->src_port_mask = (uint16_t)temp; 446 447 if (get_cb_field(&in[CB_FLD_DST_PORT], &temp, 0, UINT16_MAX, 0)) 448 return -EINVAL; 449 ntuple_filter->dst_port = (uint16_t)temp; 450 451 if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim, 452 sizeof(cb_port_delim)) != 0) 453 return -EINVAL; 454 455 if (get_cb_field(&in[CB_FLD_DST_PORT_MASK], &temp, 0, UINT16_MAX, 0)) 456 return -EINVAL; 457 ntuple_filter->dst_port_mask = (uint16_t)temp; 458 459 if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, '/')) 460 return -EINVAL; 461 ntuple_filter->proto = (uint8_t)temp; 462 463 if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, 0)) 464 return -EINVAL; 465 ntuple_filter->proto_mask = (uint8_t)temp; 466 467 if (get_cb_field(&in[CB_FLD_PRIORITY], &temp, 0, UINT16_MAX, 0)) 468 return -EINVAL; 469 ntuple_filter->priority = (uint16_t)temp; 470 if (ntuple_filter->priority > FLOW_CLASSIFY_MAX_PRIORITY) 471 ret = -EINVAL; 472 473 return ret; 474 } 475 476 /* Bypass comment and empty lines */ 477 static inline int 478 is_bypass_line(char *buff) 479 { 480 int i = 0; 481 482 /* comment line */ 483 if (buff[0] == COMMENT_LEAD_CHAR) 484 return 1; 485 /* empty line */ 486 while (buff[i] != '\0') { 487 if (!isspace(buff[i])) 488 return 0; 489 i++; 490 } 491 return 1; 492 } 493 494 static uint32_t 495 convert_depth_to_bitmask(uint32_t depth_val) 496 { 497 uint32_t bitmask = 0; 498 int i, j; 499 500 for (i = depth_val, j = 0; i > 0; i--, j++) 501 bitmask |= (1 << (31 - j)); 502 return bitmask; 503 } 504 505 static int 506 add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter, 507 struct flow_classifier *cls_app) 508 { 509 int ret = -1; 510 int key_found; 511 struct rte_flow_error error; 512 struct rte_flow_item_ipv4 ipv4_spec; 513 struct rte_flow_item_ipv4 ipv4_mask; 514 struct rte_flow_item ipv4_udp_item; 515 struct rte_flow_item ipv4_tcp_item; 516 struct rte_flow_item ipv4_sctp_item; 517 struct rte_flow_item_udp udp_spec; 518 struct rte_flow_item_udp udp_mask; 519 struct rte_flow_item udp_item; 520 struct rte_flow_item_tcp tcp_spec; 521 struct rte_flow_item_tcp tcp_mask; 522 struct rte_flow_item tcp_item; 523 struct rte_flow_item_sctp sctp_spec; 524 struct rte_flow_item_sctp sctp_mask; 525 struct rte_flow_item sctp_item; 526 struct rte_flow_item pattern_ipv4_5tuple[4]; 527 struct rte_flow_classify_rule *rule; 528 uint8_t ipv4_proto; 529 530 if (num_classify_rules >= MAX_NUM_CLASSIFY) { 531 printf( 532 "\nINFO: classify rule capacity %d reached\n", 533 num_classify_rules); 534 return ret; 535 } 536 537 /* set up parameters for validate and add */ 538 memset(&ipv4_spec, 0, sizeof(ipv4_spec)); 539 ipv4_spec.hdr.next_proto_id = ntuple_filter->proto; 540 ipv4_spec.hdr.src_addr = ntuple_filter->src_ip; 541 ipv4_spec.hdr.dst_addr = ntuple_filter->dst_ip; 542 ipv4_proto = ipv4_spec.hdr.next_proto_id; 543 544 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 545 ipv4_mask.hdr.next_proto_id = ntuple_filter->proto_mask; 546 ipv4_mask.hdr.src_addr = ntuple_filter->src_ip_mask; 547 ipv4_mask.hdr.src_addr = 548 convert_depth_to_bitmask(ipv4_mask.hdr.src_addr); 549 ipv4_mask.hdr.dst_addr = ntuple_filter->dst_ip_mask; 550 ipv4_mask.hdr.dst_addr = 551 convert_depth_to_bitmask(ipv4_mask.hdr.dst_addr); 552 553 switch (ipv4_proto) { 554 case IPPROTO_UDP: 555 ipv4_udp_item.type = RTE_FLOW_ITEM_TYPE_IPV4; 556 ipv4_udp_item.spec = &ipv4_spec; 557 ipv4_udp_item.mask = &ipv4_mask; 558 ipv4_udp_item.last = NULL; 559 560 udp_spec.hdr.src_port = ntuple_filter->src_port; 561 udp_spec.hdr.dst_port = ntuple_filter->dst_port; 562 udp_spec.hdr.dgram_len = 0; 563 udp_spec.hdr.dgram_cksum = 0; 564 565 udp_mask.hdr.src_port = ntuple_filter->src_port_mask; 566 udp_mask.hdr.dst_port = ntuple_filter->dst_port_mask; 567 udp_mask.hdr.dgram_len = 0; 568 udp_mask.hdr.dgram_cksum = 0; 569 570 udp_item.type = RTE_FLOW_ITEM_TYPE_UDP; 571 udp_item.spec = &udp_spec; 572 udp_item.mask = &udp_mask; 573 udp_item.last = NULL; 574 575 attr.priority = ntuple_filter->priority; 576 pattern_ipv4_5tuple[1] = ipv4_udp_item; 577 pattern_ipv4_5tuple[2] = udp_item; 578 break; 579 case IPPROTO_TCP: 580 ipv4_tcp_item.type = RTE_FLOW_ITEM_TYPE_IPV4; 581 ipv4_tcp_item.spec = &ipv4_spec; 582 ipv4_tcp_item.mask = &ipv4_mask; 583 ipv4_tcp_item.last = NULL; 584 585 memset(&tcp_spec, 0, sizeof(tcp_spec)); 586 tcp_spec.hdr.src_port = ntuple_filter->src_port; 587 tcp_spec.hdr.dst_port = ntuple_filter->dst_port; 588 589 memset(&tcp_mask, 0, sizeof(tcp_mask)); 590 tcp_mask.hdr.src_port = ntuple_filter->src_port_mask; 591 tcp_mask.hdr.dst_port = ntuple_filter->dst_port_mask; 592 593 tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP; 594 tcp_item.spec = &tcp_spec; 595 tcp_item.mask = &tcp_mask; 596 tcp_item.last = NULL; 597 598 attr.priority = ntuple_filter->priority; 599 pattern_ipv4_5tuple[1] = ipv4_tcp_item; 600 pattern_ipv4_5tuple[2] = tcp_item; 601 break; 602 case IPPROTO_SCTP: 603 ipv4_sctp_item.type = RTE_FLOW_ITEM_TYPE_IPV4; 604 ipv4_sctp_item.spec = &ipv4_spec; 605 ipv4_sctp_item.mask = &ipv4_mask; 606 ipv4_sctp_item.last = NULL; 607 608 sctp_spec.hdr.src_port = ntuple_filter->src_port; 609 sctp_spec.hdr.dst_port = ntuple_filter->dst_port; 610 sctp_spec.hdr.cksum = 0; 611 sctp_spec.hdr.tag = 0; 612 613 sctp_mask.hdr.src_port = ntuple_filter->src_port_mask; 614 sctp_mask.hdr.dst_port = ntuple_filter->dst_port_mask; 615 sctp_mask.hdr.cksum = 0; 616 sctp_mask.hdr.tag = 0; 617 618 sctp_item.type = RTE_FLOW_ITEM_TYPE_SCTP; 619 sctp_item.spec = &sctp_spec; 620 sctp_item.mask = &sctp_mask; 621 sctp_item.last = NULL; 622 623 attr.priority = ntuple_filter->priority; 624 pattern_ipv4_5tuple[1] = ipv4_sctp_item; 625 pattern_ipv4_5tuple[2] = sctp_item; 626 break; 627 default: 628 return ret; 629 } 630 631 attr.ingress = 1; 632 pattern_ipv4_5tuple[0] = eth_item; 633 pattern_ipv4_5tuple[3] = end_item; 634 actions[0] = count_action; 635 actions[1] = end_action; 636 637 rule = rte_flow_classify_table_entry_add( 638 cls_app->cls, cls_app->table_id[0], &key_found, 639 &attr, pattern_ipv4_5tuple, actions, &error); 640 if (rule == NULL) { 641 printf("table entry add failed ipv4_proto = %u\n", 642 ipv4_proto); 643 ret = -1; 644 return ret; 645 } 646 647 rules[num_classify_rules] = rule; 648 num_classify_rules++; 649 return 0; 650 } 651 652 static int 653 add_rules(const char *rule_path, struct flow_classifier *cls_app) 654 { 655 FILE *fh; 656 char buff[LINE_MAX]; 657 unsigned int i = 0; 658 unsigned int total_num = 0; 659 struct rte_eth_ntuple_filter ntuple_filter; 660 int ret; 661 662 fh = fopen(rule_path, "rb"); 663 if (fh == NULL) 664 rte_exit(EXIT_FAILURE, "%s: fopen %s failed\n", __func__, 665 rule_path); 666 667 ret = fseek(fh, 0, SEEK_SET); 668 if (ret) 669 rte_exit(EXIT_FAILURE, "%s: fseek %d failed\n", __func__, 670 ret); 671 672 i = 0; 673 while (fgets(buff, LINE_MAX, fh) != NULL) { 674 i++; 675 676 if (is_bypass_line(buff)) 677 continue; 678 679 if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) { 680 printf("\nINFO: classify rule capacity %d reached\n", 681 total_num); 682 break; 683 } 684 685 if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0) 686 rte_exit(EXIT_FAILURE, 687 "%s Line %u: parse rules error\n", 688 rule_path, i); 689 690 if (add_classify_rule(&ntuple_filter, cls_app) != 0) 691 rte_exit(EXIT_FAILURE, "add rule error\n"); 692 693 total_num++; 694 } 695 696 fclose(fh); 697 return 0; 698 } 699 700 /* display usage */ 701 static void 702 print_usage(const char *prgname) 703 { 704 printf("%s usage:\n", prgname); 705 printf("[EAL options] -- --"OPTION_RULE_IPV4"=FILE: "); 706 printf("specify the ipv4 rules file.\n"); 707 printf("Each rule occupies one line in the file.\n"); 708 } 709 710 /* Parse the argument given in the command line of the application */ 711 static int 712 parse_args(int argc, char **argv) 713 { 714 int opt, ret; 715 char **argvopt; 716 int option_index; 717 char *prgname = argv[0]; 718 static struct option lgopts[] = { 719 {OPTION_RULE_IPV4, 1, 0, 0}, 720 {NULL, 0, 0, 0} 721 }; 722 723 argvopt = argv; 724 725 while ((opt = getopt_long(argc, argvopt, "", 726 lgopts, &option_index)) != EOF) { 727 728 switch (opt) { 729 /* long options */ 730 case 0: 731 if (!strncmp(lgopts[option_index].name, 732 OPTION_RULE_IPV4, 733 sizeof(OPTION_RULE_IPV4))) 734 parm_config.rule_ipv4_name = optarg; 735 break; 736 default: 737 print_usage(prgname); 738 return -1; 739 } 740 } 741 742 if (optind >= 0) 743 argv[optind-1] = prgname; 744 745 ret = optind-1; 746 optind = 1; /* reset getopt lib */ 747 return ret; 748 } 749 750 /* 751 * The main function, which does initialization and calls the lcore_main 752 * function. 753 */ 754 int 755 main(int argc, char *argv[]) 756 { 757 struct rte_mempool *mbuf_pool; 758 uint8_t nb_ports; 759 uint8_t portid; 760 int ret; 761 int socket_id; 762 struct rte_table_acl_params table_acl_params; 763 struct rte_flow_classify_table_params cls_table_params; 764 struct flow_classifier *cls_app; 765 struct rte_flow_classifier_params cls_params; 766 uint32_t size; 767 768 /* Initialize the Environment Abstraction Layer (EAL). */ 769 ret = rte_eal_init(argc, argv); 770 if (ret < 0) 771 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 772 773 argc -= ret; 774 argv += ret; 775 776 /* parse application arguments (after the EAL ones) */ 777 ret = parse_args(argc, argv); 778 if (ret < 0) 779 rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n"); 780 781 /* Check that there is an even number of ports to send/receive on. */ 782 nb_ports = rte_eth_dev_count(); 783 if (nb_ports < 2 || (nb_ports & 1)) 784 rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n"); 785 786 /* Creates a new mempool in memory to hold the mbufs. */ 787 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports, 788 MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 789 790 if (mbuf_pool == NULL) 791 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 792 793 /* Initialize all ports. */ 794 for (portid = 0; portid < nb_ports; portid++) 795 if (port_init(portid, mbuf_pool) != 0) 796 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n", 797 portid); 798 799 if (rte_lcore_count() > 1) 800 printf("\nWARNING: Too many lcores enabled. Only 1 used.\n"); 801 802 socket_id = rte_eth_dev_socket_id(0); 803 804 /* Memory allocation */ 805 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl)); 806 cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 807 if (cls_app == NULL) 808 rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n"); 809 810 cls_params.name = "flow_classifier"; 811 cls_params.socket_id = socket_id; 812 cls_params.type = RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL; 813 814 cls_app->cls = rte_flow_classifier_create(&cls_params); 815 if (cls_app->cls == NULL) { 816 rte_free(cls_app); 817 rte_exit(EXIT_FAILURE, "Cannot create classifier\n"); 818 } 819 820 /* initialise ACL table params */ 821 table_acl_params.name = "table_acl_ipv4_5tuple"; 822 table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM; 823 table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs); 824 memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs)); 825 826 /* initialise table create params */ 827 cls_table_params.ops = &rte_table_acl_ops, 828 cls_table_params.arg_create = &table_acl_params, 829 830 ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params, 831 &cls_app->table_id[0]); 832 if (ret) { 833 rte_flow_classifier_free(cls_app->cls); 834 rte_free(cls_app); 835 rte_exit(EXIT_FAILURE, "Failed to create classifier table\n"); 836 } 837 838 /* read file of IPv4 5 tuple rules and initialize parameters 839 * for rte_flow_classify_validate and rte_flow_classify_table_entry_add 840 * API's. 841 */ 842 if (add_rules(parm_config.rule_ipv4_name, cls_app)) { 843 rte_flow_classifier_free(cls_app->cls); 844 rte_free(cls_app); 845 rte_exit(EXIT_FAILURE, "Failed to add rules\n"); 846 } 847 848 /* Call lcore_main on the master core only. */ 849 lcore_main(cls_app); 850 851 return 0; 852 } 853