1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <sys/queue.h> 7 8 #include <rte_log.h> 9 #include <rte_malloc.h> 10 #include <rte_flow.h> 11 #include <rte_flow_driver.h> 12 #include <rte_tailq.h> 13 #include <rte_alarm.h> 14 #include <rte_cycles.h> 15 16 #include "bnxt.h" 17 #include "bnxt_filter.h" 18 #include "bnxt_hwrm.h" 19 #include "bnxt_ring.h" 20 #include "bnxt_rxq.h" 21 #include "bnxt_rxr.h" 22 #include "bnxt_vnic.h" 23 #include "hsi_struct_def_dpdk.h" 24 25 static int 26 bnxt_flow_args_validate(const struct rte_flow_attr *attr, 27 const struct rte_flow_item pattern[], 28 const struct rte_flow_action actions[], 29 struct rte_flow_error *error) 30 { 31 if (!pattern) { 32 rte_flow_error_set(error, 33 EINVAL, 34 RTE_FLOW_ERROR_TYPE_ITEM_NUM, 35 NULL, 36 "NULL pattern."); 37 return -rte_errno; 38 } 39 40 if (!actions) { 41 rte_flow_error_set(error, 42 EINVAL, 43 RTE_FLOW_ERROR_TYPE_ACTION_NUM, 44 NULL, 45 "NULL action."); 46 return -rte_errno; 47 } 48 49 if (!attr) { 50 rte_flow_error_set(error, 51 EINVAL, 52 RTE_FLOW_ERROR_TYPE_ATTR, 53 NULL, 54 "NULL attribute."); 55 return -rte_errno; 56 } 57 58 return 0; 59 } 60 61 static const struct rte_flow_item * 62 bnxt_flow_non_void_item(const struct rte_flow_item *cur) 63 { 64 while (1) { 65 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID) 66 return cur; 67 cur++; 68 } 69 } 70 71 static const struct rte_flow_action * 72 bnxt_flow_non_void_action(const struct rte_flow_action *cur) 73 { 74 while (1) { 75 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID) 76 return cur; 77 cur++; 78 } 79 } 80 81 static int 82 bnxt_filter_type_check(const struct rte_flow_item pattern[], 83 struct rte_flow_error *error) 84 { 85 const struct rte_flow_item *item = 86 bnxt_flow_non_void_item(pattern); 87 int use_ntuple = 1; 88 bool has_vlan = 0; 89 90 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 91 switch (item->type) { 92 case RTE_FLOW_ITEM_TYPE_ANY: 93 case RTE_FLOW_ITEM_TYPE_ETH: 94 use_ntuple = 0; 95 break; 96 case RTE_FLOW_ITEM_TYPE_VLAN: 97 use_ntuple = 0; 98 has_vlan = 1; 99 break; 100 case RTE_FLOW_ITEM_TYPE_IPV4: 101 case RTE_FLOW_ITEM_TYPE_IPV6: 102 case RTE_FLOW_ITEM_TYPE_TCP: 103 case RTE_FLOW_ITEM_TYPE_UDP: 104 /* FALLTHROUGH */ 105 /* need ntuple match, reset exact match */ 106 use_ntuple |= 1; 107 break; 108 default: 109 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n"); 110 use_ntuple |= 0; 111 } 112 item++; 113 } 114 115 if (has_vlan && use_ntuple) { 116 PMD_DRV_LOG(ERR, 117 "VLAN flow cannot use NTUPLE filter\n"); 118 rte_flow_error_set(error, EINVAL, 119 RTE_FLOW_ERROR_TYPE_ITEM, 120 item, 121 "Cannot use VLAN with NTUPLE"); 122 return -rte_errno; 123 } 124 125 return use_ntuple; 126 } 127 128 static int 129 bnxt_validate_and_parse_flow_type(struct bnxt *bp, 130 const struct rte_flow_attr *attr, 131 const struct rte_flow_item pattern[], 132 struct rte_flow_error *error, 133 struct bnxt_filter_info *filter) 134 { 135 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern); 136 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; 137 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; 138 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; 139 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; 140 const struct rte_flow_item_udp *udp_spec, *udp_mask; 141 const struct rte_flow_item_eth *eth_spec, *eth_mask; 142 const struct rte_ether_addr *dst, *src; 143 const struct rte_flow_item_nvgre *nvgre_spec; 144 const struct rte_flow_item_nvgre *nvgre_mask; 145 const struct rte_flow_item_gre *gre_spec; 146 const struct rte_flow_item_gre *gre_mask; 147 const struct rte_flow_item_vxlan *vxlan_spec; 148 const struct rte_flow_item_vxlan *vxlan_mask; 149 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; 150 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; 151 const struct rte_flow_item_vf *vf_spec; 152 uint32_t tenant_id_be = 0, valid_flags = 0; 153 bool vni_masked = 0; 154 bool tni_masked = 0; 155 uint32_t en_ethertype; 156 uint8_t inner = 0; 157 uint32_t vf = 0; 158 uint32_t en = 0; 159 int use_ntuple; 160 int dflt_vnic; 161 162 use_ntuple = bnxt_filter_type_check(pattern, error); 163 if (use_ntuple < 0) 164 return use_ntuple; 165 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple); 166 167 filter->filter_type = use_ntuple ? 168 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER; 169 en_ethertype = use_ntuple ? 170 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : 171 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; 172 173 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 174 if (item->last) { 175 /* last or range is NOT supported as match criteria */ 176 rte_flow_error_set(error, EINVAL, 177 RTE_FLOW_ERROR_TYPE_ITEM, 178 item, 179 "No support for range"); 180 return -rte_errno; 181 } 182 183 switch (item->type) { 184 case RTE_FLOW_ITEM_TYPE_ANY: 185 inner = 186 ((const struct rte_flow_item_any *)item->spec)->num > 3; 187 if (inner) 188 PMD_DRV_LOG(DEBUG, "Parse inner header\n"); 189 break; 190 case RTE_FLOW_ITEM_TYPE_ETH: 191 if (!item->spec) 192 break; 193 194 eth_spec = item->spec; 195 196 if (item->mask) 197 eth_mask = item->mask; 198 else 199 eth_mask = &rte_flow_item_eth_mask; 200 201 /* Source MAC address mask cannot be partially set. 202 * Should be All 0's or all 1's. 203 * Destination MAC address mask must not be partially 204 * set. Should be all 1's or all 0's. 205 */ 206 if ((!rte_is_zero_ether_addr(ð_mask->src) && 207 !rte_is_broadcast_ether_addr(ð_mask->src)) || 208 (!rte_is_zero_ether_addr(ð_mask->dst) && 209 !rte_is_broadcast_ether_addr(ð_mask->dst))) { 210 rte_flow_error_set(error, 211 EINVAL, 212 RTE_FLOW_ERROR_TYPE_ITEM, 213 item, 214 "MAC_addr mask not valid"); 215 return -rte_errno; 216 } 217 218 /* Mask is not allowed. Only exact matches are */ 219 if (eth_mask->type && 220 eth_mask->type != RTE_BE16(0xffff)) { 221 rte_flow_error_set(error, EINVAL, 222 RTE_FLOW_ERROR_TYPE_ITEM, 223 item, 224 "ethertype mask not valid"); 225 return -rte_errno; 226 } 227 228 if (rte_is_broadcast_ether_addr(ð_mask->dst)) { 229 dst = ð_spec->dst; 230 if (!rte_is_valid_assigned_ether_addr(dst)) { 231 rte_flow_error_set(error, 232 EINVAL, 233 RTE_FLOW_ERROR_TYPE_ITEM, 234 item, 235 "DMAC is invalid"); 236 PMD_DRV_LOG(ERR, 237 "DMAC is invalid!\n"); 238 return -rte_errno; 239 } 240 rte_memcpy(filter->dst_macaddr, 241 ð_spec->dst, RTE_ETHER_ADDR_LEN); 242 en |= use_ntuple ? 243 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR : 244 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR; 245 valid_flags |= inner ? 246 BNXT_FLOW_L2_INNER_DST_VALID_FLAG : 247 BNXT_FLOW_L2_DST_VALID_FLAG; 248 filter->priority = attr->priority; 249 PMD_DRV_LOG(DEBUG, 250 "Creating a priority flow\n"); 251 } 252 if (rte_is_broadcast_ether_addr(ð_mask->src)) { 253 src = ð_spec->src; 254 if (!rte_is_valid_assigned_ether_addr(src)) { 255 rte_flow_error_set(error, 256 EINVAL, 257 RTE_FLOW_ERROR_TYPE_ITEM, 258 item, 259 "SMAC is invalid"); 260 PMD_DRV_LOG(ERR, 261 "SMAC is invalid!\n"); 262 return -rte_errno; 263 } 264 rte_memcpy(filter->src_macaddr, 265 ð_spec->src, RTE_ETHER_ADDR_LEN); 266 en |= use_ntuple ? 267 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR : 268 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR; 269 valid_flags |= inner ? 270 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG : 271 BNXT_FLOW_L2_SRC_VALID_FLAG; 272 } /* 273 * else { 274 * PMD_DRV_LOG(ERR, "Handle this condition\n"); 275 * } 276 */ 277 if (eth_mask->type) { 278 filter->ethertype = 279 rte_be_to_cpu_16(eth_spec->type); 280 en |= en_ethertype; 281 } 282 if (inner) 283 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG; 284 285 break; 286 case RTE_FLOW_ITEM_TYPE_VLAN: 287 vlan_spec = item->spec; 288 289 if (item->mask) 290 vlan_mask = item->mask; 291 else 292 vlan_mask = &rte_flow_item_vlan_mask; 293 294 if (en & en_ethertype) { 295 rte_flow_error_set(error, EINVAL, 296 RTE_FLOW_ERROR_TYPE_ITEM, 297 item, 298 "VLAN TPID matching is not" 299 " supported"); 300 return -rte_errno; 301 } 302 if (vlan_mask->tci && 303 vlan_mask->tci == RTE_BE16(0x0fff)) { 304 /* Only the VLAN ID can be matched. */ 305 filter->l2_ovlan = 306 rte_be_to_cpu_16(vlan_spec->tci & 307 RTE_BE16(0x0fff)); 308 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 309 } else { 310 rte_flow_error_set(error, 311 EINVAL, 312 RTE_FLOW_ERROR_TYPE_ITEM, 313 item, 314 "VLAN mask is invalid"); 315 return -rte_errno; 316 } 317 if (vlan_mask->inner_type && 318 vlan_mask->inner_type != RTE_BE16(0xffff)) { 319 rte_flow_error_set(error, EINVAL, 320 RTE_FLOW_ERROR_TYPE_ITEM, 321 item, 322 "inner ethertype mask not" 323 " valid"); 324 return -rte_errno; 325 } 326 if (vlan_mask->inner_type) { 327 filter->ethertype = 328 rte_be_to_cpu_16(vlan_spec->inner_type); 329 en |= en_ethertype; 330 } 331 332 break; 333 case RTE_FLOW_ITEM_TYPE_IPV4: 334 /* If mask is not involved, we could use EM filters. */ 335 ipv4_spec = item->spec; 336 337 if (!item->spec) 338 break; 339 340 if (item->mask) 341 ipv4_mask = item->mask; 342 else 343 ipv4_mask = &rte_flow_item_ipv4_mask; 344 345 /* Only IP DST and SRC fields are maskable. */ 346 if (ipv4_mask->hdr.version_ihl || 347 ipv4_mask->hdr.type_of_service || 348 ipv4_mask->hdr.total_length || 349 ipv4_mask->hdr.packet_id || 350 ipv4_mask->hdr.fragment_offset || 351 ipv4_mask->hdr.time_to_live || 352 ipv4_mask->hdr.next_proto_id || 353 ipv4_mask->hdr.hdr_checksum) { 354 rte_flow_error_set(error, 355 EINVAL, 356 RTE_FLOW_ERROR_TYPE_ITEM, 357 item, 358 "Invalid IPv4 mask."); 359 return -rte_errno; 360 } 361 362 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr; 363 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr; 364 365 if (use_ntuple) 366 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 367 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 368 else 369 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | 370 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; 371 372 if (ipv4_mask->hdr.src_addr) { 373 filter->src_ipaddr_mask[0] = 374 ipv4_mask->hdr.src_addr; 375 en |= !use_ntuple ? 0 : 376 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 377 } 378 379 if (ipv4_mask->hdr.dst_addr) { 380 filter->dst_ipaddr_mask[0] = 381 ipv4_mask->hdr.dst_addr; 382 en |= !use_ntuple ? 0 : 383 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 384 } 385 386 filter->ip_addr_type = use_ntuple ? 387 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 : 388 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 389 390 if (ipv4_spec->hdr.next_proto_id) { 391 filter->ip_protocol = 392 ipv4_spec->hdr.next_proto_id; 393 if (use_ntuple) 394 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 395 else 396 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO; 397 } 398 break; 399 case RTE_FLOW_ITEM_TYPE_IPV6: 400 ipv6_spec = item->spec; 401 402 if (!item->spec) 403 break; 404 405 if (item->mask) 406 ipv6_mask = item->mask; 407 else 408 ipv6_mask = &rte_flow_item_ipv6_mask; 409 410 /* Only IP DST and SRC fields are maskable. */ 411 if (ipv6_mask->hdr.vtc_flow || 412 ipv6_mask->hdr.payload_len || 413 ipv6_mask->hdr.proto || 414 ipv6_mask->hdr.hop_limits) { 415 rte_flow_error_set(error, 416 EINVAL, 417 RTE_FLOW_ERROR_TYPE_ITEM, 418 item, 419 "Invalid IPv6 mask."); 420 return -rte_errno; 421 } 422 423 if (use_ntuple) 424 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 425 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 426 else 427 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | 428 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; 429 430 rte_memcpy(filter->src_ipaddr, 431 ipv6_spec->hdr.src_addr, 16); 432 rte_memcpy(filter->dst_ipaddr, 433 ipv6_spec->hdr.dst_addr, 16); 434 435 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr, 436 16)) { 437 rte_memcpy(filter->src_ipaddr_mask, 438 ipv6_mask->hdr.src_addr, 16); 439 en |= !use_ntuple ? 0 : 440 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 441 } 442 443 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr, 444 16)) { 445 rte_memcpy(filter->dst_ipaddr_mask, 446 ipv6_mask->hdr.dst_addr, 16); 447 en |= !use_ntuple ? 0 : 448 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 449 } 450 451 filter->ip_addr_type = use_ntuple ? 452 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 : 453 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 454 break; 455 case RTE_FLOW_ITEM_TYPE_TCP: 456 tcp_spec = item->spec; 457 458 if (!item->spec) 459 break; 460 461 if (item->mask) 462 tcp_mask = item->mask; 463 else 464 tcp_mask = &rte_flow_item_tcp_mask; 465 466 /* Check TCP mask. Only DST & SRC ports are maskable */ 467 if (tcp_mask->hdr.sent_seq || 468 tcp_mask->hdr.recv_ack || 469 tcp_mask->hdr.data_off || 470 tcp_mask->hdr.tcp_flags || 471 tcp_mask->hdr.rx_win || 472 tcp_mask->hdr.cksum || 473 tcp_mask->hdr.tcp_urp) { 474 rte_flow_error_set(error, 475 EINVAL, 476 RTE_FLOW_ERROR_TYPE_ITEM, 477 item, 478 "Invalid TCP mask"); 479 return -rte_errno; 480 } 481 482 filter->src_port = tcp_spec->hdr.src_port; 483 filter->dst_port = tcp_spec->hdr.dst_port; 484 485 if (use_ntuple) 486 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 487 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 488 else 489 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | 490 EM_FLOW_ALLOC_INPUT_EN_DST_PORT; 491 492 if (tcp_mask->hdr.dst_port) { 493 filter->dst_port_mask = tcp_mask->hdr.dst_port; 494 en |= !use_ntuple ? 0 : 495 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 496 } 497 498 if (tcp_mask->hdr.src_port) { 499 filter->src_port_mask = tcp_mask->hdr.src_port; 500 en |= !use_ntuple ? 0 : 501 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 502 } 503 break; 504 case RTE_FLOW_ITEM_TYPE_UDP: 505 udp_spec = item->spec; 506 507 if (!item->spec) 508 break; 509 510 if (item->mask) 511 udp_mask = item->mask; 512 else 513 udp_mask = &rte_flow_item_udp_mask; 514 515 if (udp_mask->hdr.dgram_len || 516 udp_mask->hdr.dgram_cksum) { 517 rte_flow_error_set(error, 518 EINVAL, 519 RTE_FLOW_ERROR_TYPE_ITEM, 520 item, 521 "Invalid UDP mask"); 522 return -rte_errno; 523 } 524 525 filter->src_port = udp_spec->hdr.src_port; 526 filter->dst_port = udp_spec->hdr.dst_port; 527 528 if (use_ntuple) 529 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 530 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 531 else 532 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | 533 EM_FLOW_ALLOC_INPUT_EN_DST_PORT; 534 535 if (udp_mask->hdr.dst_port) { 536 filter->dst_port_mask = udp_mask->hdr.dst_port; 537 en |= !use_ntuple ? 0 : 538 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 539 } 540 541 if (udp_mask->hdr.src_port) { 542 filter->src_port_mask = udp_mask->hdr.src_port; 543 en |= !use_ntuple ? 0 : 544 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 545 } 546 break; 547 case RTE_FLOW_ITEM_TYPE_VXLAN: 548 vxlan_spec = item->spec; 549 vxlan_mask = item->mask; 550 /* Check if VXLAN item is used to describe protocol. 551 * If yes, both spec and mask should be NULL. 552 * If no, both spec and mask shouldn't be NULL. 553 */ 554 if ((!vxlan_spec && vxlan_mask) || 555 (vxlan_spec && !vxlan_mask)) { 556 rte_flow_error_set(error, 557 EINVAL, 558 RTE_FLOW_ERROR_TYPE_ITEM, 559 item, 560 "Invalid VXLAN item"); 561 return -rte_errno; 562 } 563 564 if (!vxlan_spec && !vxlan_mask) { 565 filter->tunnel_type = 566 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 567 break; 568 } 569 570 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] || 571 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] || 572 vxlan_spec->flags != 0x8) { 573 rte_flow_error_set(error, 574 EINVAL, 575 RTE_FLOW_ERROR_TYPE_ITEM, 576 item, 577 "Invalid VXLAN item"); 578 return -rte_errno; 579 } 580 581 /* Check if VNI is masked. */ 582 if (vxlan_mask != NULL) { 583 vni_masked = 584 !!memcmp(vxlan_mask->vni, vni_mask, 585 RTE_DIM(vni_mask)); 586 if (vni_masked) { 587 rte_flow_error_set 588 (error, 589 EINVAL, 590 RTE_FLOW_ERROR_TYPE_ITEM, 591 item, 592 "Invalid VNI mask"); 593 return -rte_errno; 594 } 595 596 rte_memcpy(((uint8_t *)&tenant_id_be + 1), 597 vxlan_spec->vni, 3); 598 filter->vni = 599 rte_be_to_cpu_32(tenant_id_be); 600 filter->tunnel_type = 601 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 602 } 603 break; 604 case RTE_FLOW_ITEM_TYPE_NVGRE: 605 nvgre_spec = item->spec; 606 nvgre_mask = item->mask; 607 /* Check if NVGRE item is used to describe protocol. 608 * If yes, both spec and mask should be NULL. 609 * If no, both spec and mask shouldn't be NULL. 610 */ 611 if ((!nvgre_spec && nvgre_mask) || 612 (nvgre_spec && !nvgre_mask)) { 613 rte_flow_error_set(error, 614 EINVAL, 615 RTE_FLOW_ERROR_TYPE_ITEM, 616 item, 617 "Invalid NVGRE item"); 618 return -rte_errno; 619 } 620 621 if (!nvgre_spec && !nvgre_mask) { 622 filter->tunnel_type = 623 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 624 break; 625 } 626 627 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 || 628 nvgre_spec->protocol != 0x6558) { 629 rte_flow_error_set(error, 630 EINVAL, 631 RTE_FLOW_ERROR_TYPE_ITEM, 632 item, 633 "Invalid NVGRE item"); 634 return -rte_errno; 635 } 636 637 if (nvgre_spec && nvgre_mask) { 638 tni_masked = 639 !!memcmp(nvgre_mask->tni, tni_mask, 640 RTE_DIM(tni_mask)); 641 if (tni_masked) { 642 rte_flow_error_set 643 (error, 644 EINVAL, 645 RTE_FLOW_ERROR_TYPE_ITEM, 646 item, 647 "Invalid TNI mask"); 648 return -rte_errno; 649 } 650 rte_memcpy(((uint8_t *)&tenant_id_be + 1), 651 nvgre_spec->tni, 3); 652 filter->vni = 653 rte_be_to_cpu_32(tenant_id_be); 654 filter->tunnel_type = 655 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 656 } 657 break; 658 659 case RTE_FLOW_ITEM_TYPE_GRE: 660 gre_spec = (const struct rte_flow_item_gre *)item->spec; 661 gre_mask = (const struct rte_flow_item_gre *)item->mask; 662 663 /* 664 *Check if GRE item is used to describe protocol. 665 * If yes, both spec and mask should be NULL. 666 * If no, both spec and mask shouldn't be NULL. 667 */ 668 if (!!gre_spec ^ !!gre_mask) { 669 rte_flow_error_set(error, EINVAL, 670 RTE_FLOW_ERROR_TYPE_ITEM, 671 item, 672 "Invalid GRE item"); 673 return -rte_errno; 674 } 675 676 if (!gre_spec && !gre_mask) { 677 filter->tunnel_type = 678 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE; 679 break; 680 } 681 break; 682 683 case RTE_FLOW_ITEM_TYPE_VF: 684 vf_spec = item->spec; 685 vf = vf_spec->id; 686 if (!BNXT_PF(bp)) { 687 rte_flow_error_set(error, 688 EINVAL, 689 RTE_FLOW_ERROR_TYPE_ITEM, 690 item, 691 "Configuring on a VF!"); 692 return -rte_errno; 693 } 694 695 if (vf >= bp->pdev->max_vfs) { 696 rte_flow_error_set(error, 697 EINVAL, 698 RTE_FLOW_ERROR_TYPE_ITEM, 699 item, 700 "Incorrect VF id!"); 701 return -rte_errno; 702 } 703 704 if (!attr->transfer) { 705 rte_flow_error_set(error, 706 ENOTSUP, 707 RTE_FLOW_ERROR_TYPE_ITEM, 708 item, 709 "Matching VF traffic without" 710 " affecting it (transfer attribute)" 711 " is unsupported"); 712 return -rte_errno; 713 } 714 715 filter->mirror_vnic_id = 716 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); 717 if (dflt_vnic < 0) { 718 /* This simply indicates there's no driver 719 * loaded. This is not an error. 720 */ 721 rte_flow_error_set 722 (error, 723 EINVAL, 724 RTE_FLOW_ERROR_TYPE_ITEM, 725 item, 726 "Unable to get default VNIC for VF"); 727 return -rte_errno; 728 } 729 730 filter->mirror_vnic_id = dflt_vnic; 731 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; 732 break; 733 default: 734 break; 735 } 736 item++; 737 } 738 filter->enables = en; 739 filter->valid_flags = valid_flags; 740 741 /* Items parsed but no filter to create in HW. */ 742 if (filter->enables == 0 && filter->valid_flags == 0) 743 filter->filter_type = HWRM_CFA_CONFIG; 744 745 return 0; 746 } 747 748 /* Parse attributes */ 749 static int 750 bnxt_flow_parse_attr(const struct rte_flow_attr *attr, 751 struct rte_flow_error *error) 752 { 753 /* Must be input direction */ 754 if (!attr->ingress) { 755 rte_flow_error_set(error, 756 EINVAL, 757 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 758 attr, 759 "Only support ingress."); 760 return -rte_errno; 761 } 762 763 /* Not supported */ 764 if (attr->egress) { 765 rte_flow_error_set(error, 766 EINVAL, 767 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 768 attr, 769 "No support for egress."); 770 return -rte_errno; 771 } 772 773 return 0; 774 } 775 776 static struct bnxt_filter_info * 777 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf) 778 { 779 struct bnxt_filter_info *mf, *f0; 780 struct bnxt_vnic_info *vnic0; 781 int i; 782 783 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 784 f0 = STAILQ_FIRST(&vnic0->filter); 785 786 /* This flow has same DST MAC as the port/l2 filter. */ 787 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0) 788 return f0; 789 790 for (i = bp->max_vnics - 1; i >= 0; i--) { 791 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 792 793 if (vnic->fw_vnic_id == INVALID_VNIC_ID) 794 continue; 795 796 STAILQ_FOREACH(mf, &vnic->filter, next) { 797 798 if (mf->matching_l2_fltr_ptr) 799 continue; 800 801 if (mf->ethertype == nf->ethertype && 802 mf->l2_ovlan == nf->l2_ovlan && 803 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 804 mf->l2_ivlan == nf->l2_ivlan && 805 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 806 !memcmp(mf->src_macaddr, nf->src_macaddr, 807 RTE_ETHER_ADDR_LEN) && 808 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 809 RTE_ETHER_ADDR_LEN)) 810 return mf; 811 } 812 } 813 return NULL; 814 } 815 816 static struct bnxt_filter_info * 817 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, 818 struct bnxt_vnic_info *vnic) 819 { 820 struct bnxt_filter_info *filter1; 821 int rc; 822 823 /* Alloc new L2 filter. 824 * This flow needs MAC filter which does not match any existing 825 * L2 filters. 826 */ 827 filter1 = bnxt_get_unused_filter(bp); 828 if (filter1 == NULL) 829 return NULL; 830 831 memcpy(filter1, nf, sizeof(*filter1)); 832 833 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE; 834 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 835 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || 836 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) { 837 filter1->flags |= 838 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 839 PMD_DRV_LOG(DEBUG, "Create Outer filter\n"); 840 } 841 842 if (nf->filter_type == HWRM_CFA_L2_FILTER && 843 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || 844 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) { 845 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n"); 846 filter1->flags |= 847 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID; 848 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN); 849 } else { 850 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n"); 851 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN); 852 } 853 854 if (nf->priority && 855 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG || 856 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) { 857 /* Tell the FW where to place the filter in the table. */ 858 if (nf->priority > 65535) { 859 filter1->pri_hint = 860 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER; 861 /* This will place the filter in TCAM */ 862 filter1->l2_filter_id_hint = (uint64_t)-1; 863 } 864 } 865 866 if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG | 867 BNXT_FLOW_L2_SRC_VALID_FLAG | 868 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG | 869 BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) { 870 filter1->enables = 871 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 872 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK; 873 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 874 } 875 876 if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) { 877 filter1->flags |= 878 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP; 879 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) { 880 /* Num VLANs for drop filter will/should be 0. 881 * If the req is memset to 0, then the count will 882 * be automatically set to 0. 883 */ 884 if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) { 885 filter1->enables |= 886 L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS; 887 } else { 888 filter1->enables |= 889 L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS; 890 filter1->flags |= 891 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 892 } 893 } 894 } 895 896 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, 897 filter1); 898 if (rc) { 899 bnxt_free_filter(bp, filter1); 900 return NULL; 901 } 902 return filter1; 903 } 904 905 struct bnxt_filter_info * 906 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, 907 struct bnxt_vnic_info *vnic) 908 { 909 struct bnxt_filter_info *l2_filter = NULL; 910 911 l2_filter = bnxt_find_matching_l2_filter(bp, nf); 912 if (l2_filter) { 913 l2_filter->l2_ref_cnt++; 914 } else { 915 l2_filter = bnxt_create_l2_filter(bp, nf, vnic); 916 if (l2_filter) { 917 STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next); 918 l2_filter->vnic = vnic; 919 } 920 } 921 nf->matching_l2_fltr_ptr = l2_filter; 922 923 return l2_filter; 924 } 925 926 static void bnxt_vnic_cleanup(struct bnxt *bp, struct bnxt_vnic_info *vnic) 927 { 928 if (vnic->rx_queue_cnt > 1) 929 bnxt_hwrm_vnic_ctx_free(bp, vnic); 930 931 bnxt_hwrm_vnic_free(bp, vnic); 932 933 rte_free(vnic->fw_grp_ids); 934 vnic->fw_grp_ids = NULL; 935 936 vnic->rx_queue_cnt = 0; 937 } 938 939 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic, 940 const struct rte_flow_action *act, 941 struct rte_flow_error *error) 942 { 943 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 944 uint64_t rx_offloads = dev_conf->rxmode.offloads; 945 int rc; 946 947 if (bp->nr_vnics > bp->max_vnics - 1) 948 return rte_flow_error_set(error, EINVAL, 949 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 950 NULL, 951 "Group id is invalid"); 952 953 rc = bnxt_vnic_grp_alloc(bp, vnic); 954 if (rc) 955 return rte_flow_error_set(error, -rc, 956 RTE_FLOW_ERROR_TYPE_ACTION, 957 act, 958 "Failed to alloc VNIC group"); 959 960 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 961 if (rc) { 962 rte_flow_error_set(error, -rc, 963 RTE_FLOW_ERROR_TYPE_ACTION, 964 act, 965 "Failed to alloc VNIC"); 966 goto ret; 967 } 968 969 /* RSS context is required only when there is more than one RSS ring */ 970 if (vnic->rx_queue_cnt > 1) { 971 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); 972 if (rc) { 973 rte_flow_error_set(error, -rc, 974 RTE_FLOW_ERROR_TYPE_ACTION, 975 act, 976 "Failed to alloc VNIC context"); 977 goto ret; 978 } 979 } 980 981 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 982 vnic->vlan_strip = true; 983 else 984 vnic->vlan_strip = false; 985 986 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 987 if (rc) { 988 rte_flow_error_set(error, -rc, 989 RTE_FLOW_ERROR_TYPE_ACTION, 990 act, 991 "Failed to configure VNIC"); 992 goto ret; 993 } 994 995 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 996 if (rc) { 997 rte_flow_error_set(error, -rc, 998 RTE_FLOW_ERROR_TYPE_ACTION, 999 act, 1000 "Failed to configure VNIC plcmode"); 1001 goto ret; 1002 } 1003 1004 bp->nr_vnics++; 1005 1006 return 0; 1007 1008 ret: 1009 bnxt_vnic_cleanup(bp, vnic); 1010 return rc; 1011 } 1012 1013 static int match_vnic_rss_cfg(struct bnxt *bp, 1014 struct bnxt_vnic_info *vnic, 1015 const struct rte_flow_action_rss *rss) 1016 { 1017 unsigned int match = 0, i; 1018 1019 if (vnic->rx_queue_cnt != rss->queue_num) 1020 return -EINVAL; 1021 1022 for (i = 0; i < rss->queue_num; i++) { 1023 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt && 1024 !bp->rx_queues[rss->queue[i]]->rx_started) 1025 return -EINVAL; 1026 } 1027 1028 for (i = 0; i < vnic->rx_queue_cnt; i++) { 1029 int j; 1030 1031 for (j = 0; j < vnic->rx_queue_cnt; j++) { 1032 if (bp->grp_info[rss->queue[i]].fw_grp_id == 1033 vnic->fw_grp_ids[j]) 1034 match++; 1035 } 1036 } 1037 1038 if (match != vnic->rx_queue_cnt) { 1039 PMD_DRV_LOG(ERR, 1040 "VNIC queue count %d vs queues matched %d\n", 1041 match, vnic->rx_queue_cnt); 1042 return -EINVAL; 1043 } 1044 1045 return 0; 1046 } 1047 1048 static void 1049 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter, 1050 struct bnxt_filter_info *filter1, 1051 int use_ntuple) 1052 { 1053 if (!use_ntuple && 1054 !(filter->valid_flags & 1055 ~(BNXT_FLOW_L2_DST_VALID_FLAG | 1056 BNXT_FLOW_L2_SRC_VALID_FLAG | 1057 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG | 1058 BNXT_FLOW_L2_INNER_DST_VALID_FLAG | 1059 BNXT_FLOW_L2_DROP_FLAG | 1060 BNXT_FLOW_PARSE_INNER_FLAG))) { 1061 filter->flags = filter1->flags; 1062 filter->enables = filter1->enables; 1063 filter->filter_type = HWRM_CFA_L2_FILTER; 1064 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN); 1065 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 1066 filter->pri_hint = filter1->pri_hint; 1067 filter->l2_filter_id_hint = filter1->l2_filter_id_hint; 1068 } 1069 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1070 filter->l2_ref_cnt = filter1->l2_ref_cnt; 1071 filter->flow_id = filter1->flow_id; 1072 PMD_DRV_LOG(DEBUG, 1073 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n", 1074 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt); 1075 } 1076 1077 /* Valid actions supported along with RSS are count and mark. */ 1078 static int 1079 bnxt_validate_rss_action(const struct rte_flow_action actions[]) 1080 { 1081 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1082 switch (actions->type) { 1083 case RTE_FLOW_ACTION_TYPE_VOID: 1084 break; 1085 case RTE_FLOW_ACTION_TYPE_RSS: 1086 break; 1087 default: 1088 return -ENOTSUP; 1089 } 1090 } 1091 1092 return 0; 1093 } 1094 1095 static int 1096 bnxt_get_vnic(struct bnxt *bp, uint32_t group) 1097 { 1098 int vnic_id = 0; 1099 1100 /* For legacy NS3 based implementations, 1101 * group_id will be mapped to a VNIC ID. 1102 */ 1103 if (BNXT_STINGRAY(bp)) 1104 vnic_id = group; 1105 1106 /* Non NS3 cases, group_id will be ignored. 1107 * Setting will be configured on default VNIC. 1108 */ 1109 return vnic_id; 1110 } 1111 1112 static int 1113 bnxt_vnic_rss_cfg_update(struct bnxt *bp, 1114 struct bnxt_vnic_info *vnic, 1115 const struct rte_flow_action *act, 1116 struct rte_flow_error *error) 1117 { 1118 const struct rte_flow_action_rss *rss; 1119 unsigned int rss_idx, i; 1120 uint16_t hash_type; 1121 uint64_t types; 1122 int rc; 1123 1124 rss = (const struct rte_flow_action_rss *)act->conf; 1125 1126 /* Currently only Toeplitz hash is supported. */ 1127 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1128 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { 1129 rte_flow_error_set(error, 1130 ENOTSUP, 1131 RTE_FLOW_ERROR_TYPE_ACTION, 1132 act, 1133 "Unsupported RSS hash function"); 1134 rc = -rte_errno; 1135 goto ret; 1136 } 1137 1138 /* key_len should match the hash key supported by hardware */ 1139 if (rss->key_len != 0 && rss->key_len != HW_HASH_KEY_SIZE) { 1140 rte_flow_error_set(error, 1141 EINVAL, 1142 RTE_FLOW_ERROR_TYPE_ACTION, 1143 act, 1144 "Incorrect hash key parameters"); 1145 rc = -rte_errno; 1146 goto ret; 1147 } 1148 1149 /* Currently RSS hash on inner and outer headers are supported. 1150 * 0 => Default (innermost RSS) setting 1151 * 1 => Outermost 1152 */ 1153 if (rss->level > 1) { 1154 rte_flow_error_set(error, 1155 ENOTSUP, 1156 RTE_FLOW_ERROR_TYPE_ACTION, 1157 act, 1158 "Unsupported hash level"); 1159 rc = -rte_errno; 1160 goto ret; 1161 } 1162 1163 if ((rss->queue_num == 0 && rss->queue != NULL) || 1164 (rss->queue_num != 0 && rss->queue == NULL)) { 1165 rte_flow_error_set(error, 1166 EINVAL, 1167 RTE_FLOW_ERROR_TYPE_ACTION, 1168 act, 1169 "Invalid queue config specified"); 1170 rc = -rte_errno; 1171 goto ret; 1172 } 1173 1174 /* If RSS types is 0, use a best effort configuration */ 1175 types = rss->types ? rss->types : RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6; 1176 1177 hash_type = bnxt_rte_to_hwrm_hash_types(types); 1178 1179 /* If requested types can't be supported, leave existing settings */ 1180 if (hash_type) 1181 vnic->hash_type = hash_type; 1182 1183 vnic->hash_mode = 1184 bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level); 1185 1186 /* Update RSS key only if key_len != 0 */ 1187 if (rss->key_len != 0) 1188 memcpy(vnic->rss_hash_key, rss->key, rss->key_len); 1189 1190 if (rss->queue_num == 0) 1191 goto skip_rss_table; 1192 1193 /* Validate Rx queues */ 1194 for (i = 0; i < rss->queue_num; i++) { 1195 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", rss->queue[i]); 1196 1197 if (rss->queue[i] >= bp->rx_nr_rings || 1198 !bp->rx_queues[rss->queue[i]]) { 1199 rte_flow_error_set(error, 1200 EINVAL, 1201 RTE_FLOW_ERROR_TYPE_ACTION, 1202 act, 1203 "Invalid queue ID for RSS"); 1204 rc = -rte_errno; 1205 goto ret; 1206 } 1207 } 1208 1209 /* Prepare the indirection table */ 1210 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; rss_idx++) { 1211 struct bnxt_rx_queue *rxq; 1212 uint32_t idx; 1213 1214 idx = rss->queue[rss_idx % rss->queue_num]; 1215 1216 if (BNXT_CHIP_P5(bp)) { 1217 rxq = bp->rx_queues[idx]; 1218 vnic->rss_table[rss_idx * 2] = 1219 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1220 vnic->rss_table[rss_idx * 2 + 1] = 1221 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1222 } else { 1223 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[idx]; 1224 } 1225 } 1226 1227 skip_rss_table: 1228 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1229 if (rc != 0) { 1230 rte_flow_error_set(error, 1231 -rc, 1232 RTE_FLOW_ERROR_TYPE_ACTION, 1233 act, 1234 "VNIC RSS configure failed"); 1235 rc = -rte_errno; 1236 goto ret; 1237 } 1238 ret: 1239 return rc; 1240 } 1241 1242 static int 1243 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, 1244 const struct rte_flow_item pattern[], 1245 const struct rte_flow_action actions[], 1246 const struct rte_flow_attr *attr, 1247 struct rte_flow_error *error, 1248 struct bnxt_filter_info *filter) 1249 { 1250 const struct rte_flow_action *act = 1251 bnxt_flow_non_void_action(actions); 1252 struct bnxt *bp = dev->data->dev_private; 1253 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1254 struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL; 1255 const struct rte_flow_action_queue *act_q; 1256 const struct rte_flow_action_vf *act_vf; 1257 struct bnxt_filter_info *filter1 = NULL; 1258 const struct rte_flow_action_rss *rss; 1259 struct bnxt_rx_queue *rxq = NULL; 1260 int dflt_vnic, vnic_id; 1261 unsigned int rss_idx; 1262 uint32_t vf = 0, i; 1263 int rc, use_ntuple; 1264 1265 rc = 1266 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter); 1267 if (rc != 0) 1268 goto ret; 1269 1270 rc = bnxt_flow_parse_attr(attr, error); 1271 if (rc != 0) 1272 goto ret; 1273 1274 /* Since we support ingress attribute only - right now. */ 1275 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1276 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; 1277 1278 use_ntuple = bnxt_filter_type_check(pattern, error); 1279 1280 start: 1281 switch (act->type) { 1282 case RTE_FLOW_ACTION_TYPE_QUEUE: 1283 /* Allow this flow. Redirect to a VNIC. */ 1284 act_q = (const struct rte_flow_action_queue *)act->conf; 1285 if (!act_q->index || act_q->index >= bp->rx_nr_rings) { 1286 rte_flow_error_set(error, 1287 EINVAL, 1288 RTE_FLOW_ERROR_TYPE_ACTION, 1289 act, 1290 "Invalid queue ID."); 1291 rc = -rte_errno; 1292 goto ret; 1293 } 1294 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index); 1295 1296 vnic_id = attr->group; 1297 if (!vnic_id) { 1298 PMD_DRV_LOG(DEBUG, "Group id is 0\n"); 1299 vnic_id = act_q->index; 1300 } 1301 1302 BNXT_VALID_VNIC_OR_RET(bp, vnic_id); 1303 1304 vnic = &bp->vnic_info[vnic_id]; 1305 if (vnic->rx_queue_cnt) { 1306 if (vnic->start_grp_id != act_q->index) { 1307 PMD_DRV_LOG(ERR, 1308 "VNIC already in use\n"); 1309 rte_flow_error_set(error, 1310 EINVAL, 1311 RTE_FLOW_ERROR_TYPE_ACTION, 1312 act, 1313 "VNIC already in use"); 1314 rc = -rte_errno; 1315 goto ret; 1316 } 1317 goto use_vnic; 1318 } 1319 1320 rxq = bp->rx_queues[act_q->index]; 1321 1322 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq && 1323 vnic->fw_vnic_id != INVALID_HW_RING_ID) 1324 goto use_vnic; 1325 1326 if (!rxq) { 1327 PMD_DRV_LOG(ERR, 1328 "Queue invalid or used with other VNIC\n"); 1329 rte_flow_error_set(error, 1330 EINVAL, 1331 RTE_FLOW_ERROR_TYPE_ACTION, 1332 act, 1333 "Queue invalid queue or in use"); 1334 rc = -rte_errno; 1335 goto ret; 1336 } 1337 1338 rxq->vnic = vnic; 1339 rxq->rx_started = 1; 1340 vnic->rx_queue_cnt++; 1341 vnic->start_grp_id = act_q->index; 1342 vnic->end_grp_id = act_q->index; 1343 vnic->func_default = 0; //This is not a default VNIC. 1344 1345 PMD_DRV_LOG(DEBUG, "VNIC found\n"); 1346 1347 rc = bnxt_vnic_prep(bp, vnic, act, error); 1348 if (rc) 1349 goto ret; 1350 1351 PMD_DRV_LOG(DEBUG, 1352 "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 1353 act_q->index, vnic, vnic->fw_grp_ids); 1354 1355 use_vnic: 1356 vnic->ff_pool_idx = vnic_id; 1357 PMD_DRV_LOG(DEBUG, 1358 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx); 1359 filter->dst_id = vnic->fw_vnic_id; 1360 1361 /* For ntuple filter, create the L2 filter with default VNIC. 1362 * The user specified redirect queue will be set while creating 1363 * the ntuple filter in hardware. 1364 */ 1365 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 1366 if (use_ntuple) 1367 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1368 else 1369 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 1370 if (filter1 == NULL) { 1371 rte_flow_error_set(error, 1372 ENOSPC, 1373 RTE_FLOW_ERROR_TYPE_ACTION, 1374 act, 1375 "Filter not available"); 1376 rc = -rte_errno; 1377 goto ret; 1378 } 1379 1380 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n", 1381 filter, filter1, filter1->l2_ref_cnt); 1382 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1383 break; 1384 case RTE_FLOW_ACTION_TYPE_DROP: 1385 vnic0 = &bp->vnic_info[0]; 1386 filter->dst_id = vnic0->fw_vnic_id; 1387 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG; 1388 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1389 if (filter1 == NULL) { 1390 rte_flow_error_set(error, 1391 ENOSPC, 1392 RTE_FLOW_ERROR_TYPE_ACTION, 1393 act, 1394 "Filter not available"); 1395 rc = -rte_errno; 1396 goto ret; 1397 } 1398 1399 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1400 filter->flags = 1401 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP; 1402 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1403 filter->flags = 1404 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 1405 1406 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1407 break; 1408 case RTE_FLOW_ACTION_TYPE_COUNT: 1409 vnic0 = &bp->vnic_info[0]; 1410 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1411 if (filter1 == NULL) { 1412 rte_flow_error_set(error, 1413 ENOSPC, 1414 RTE_FLOW_ERROR_TYPE_ACTION, 1415 act, 1416 "New filter not available"); 1417 rc = -rte_errno; 1418 goto ret; 1419 } 1420 1421 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1422 filter->flow_id = filter1->flow_id; 1423 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER; 1424 break; 1425 case RTE_FLOW_ACTION_TYPE_VF: 1426 act_vf = (const struct rte_flow_action_vf *)act->conf; 1427 vf = act_vf->id; 1428 1429 if (filter->tunnel_type == 1430 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN || 1431 filter->tunnel_type == 1432 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) { 1433 /* If issued on a VF, ensure id is 0 and is trusted */ 1434 if (BNXT_VF(bp)) { 1435 if (!BNXT_VF_IS_TRUSTED(bp) || vf) { 1436 rte_flow_error_set(error, EINVAL, 1437 RTE_FLOW_ERROR_TYPE_ACTION, 1438 act, 1439 "Incorrect VF"); 1440 rc = -rte_errno; 1441 goto ret; 1442 } 1443 } 1444 1445 filter->enables |= filter->tunnel_type; 1446 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER; 1447 goto done; 1448 } 1449 1450 if (vf >= bp->pdev->max_vfs) { 1451 rte_flow_error_set(error, 1452 EINVAL, 1453 RTE_FLOW_ERROR_TYPE_ACTION, 1454 act, 1455 "Incorrect VF id!"); 1456 rc = -rte_errno; 1457 goto ret; 1458 } 1459 1460 filter->mirror_vnic_id = 1461 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); 1462 if (dflt_vnic < 0) { 1463 /* This simply indicates there's no driver loaded. 1464 * This is not an error. 1465 */ 1466 rte_flow_error_set(error, 1467 EINVAL, 1468 RTE_FLOW_ERROR_TYPE_ACTION, 1469 act, 1470 "Unable to get default VNIC for VF"); 1471 rc = -rte_errno; 1472 goto ret; 1473 } 1474 1475 filter->mirror_vnic_id = dflt_vnic; 1476 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; 1477 1478 vnic0 = &bp->vnic_info[0]; 1479 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1480 if (filter1 == NULL) { 1481 rte_flow_error_set(error, 1482 ENOSPC, 1483 RTE_FLOW_ERROR_TYPE_ACTION, 1484 act, 1485 "New filter not available"); 1486 rc = -rte_errno; 1487 goto ret; 1488 } 1489 1490 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1491 filter->flow_id = filter1->flow_id; 1492 break; 1493 case RTE_FLOW_ACTION_TYPE_RSS: 1494 rc = bnxt_validate_rss_action(actions); 1495 if (rc != 0) { 1496 rte_flow_error_set(error, 1497 EINVAL, 1498 RTE_FLOW_ERROR_TYPE_ACTION, 1499 act, 1500 "Invalid actions specified with RSS"); 1501 rc = -rte_errno; 1502 goto ret; 1503 } 1504 1505 rss = (const struct rte_flow_action_rss *)act->conf; 1506 1507 vnic_id = bnxt_get_vnic(bp, attr->group); 1508 1509 BNXT_VALID_VNIC_OR_RET(bp, vnic_id); 1510 vnic = &bp->vnic_info[vnic_id]; 1511 1512 /* 1513 * For non NS3 cases, rte_flow_items will not be considered 1514 * for RSS updates. 1515 */ 1516 if (filter->filter_type == HWRM_CFA_CONFIG) { 1517 /* RSS config update requested */ 1518 rc = bnxt_vnic_rss_cfg_update(bp, vnic, act, error); 1519 if (rc != 0) 1520 goto ret; 1521 1522 filter->dst_id = vnic->fw_vnic_id; 1523 break; 1524 } 1525 1526 /* Check if requested RSS config matches RSS config of VNIC 1527 * only if it is not a fresh VNIC configuration. 1528 * Otherwise the existing VNIC configuration can be used. 1529 */ 1530 if (vnic->rx_queue_cnt) { 1531 rc = match_vnic_rss_cfg(bp, vnic, rss); 1532 if (rc) { 1533 PMD_DRV_LOG(ERR, 1534 "VNIC and RSS config mismatch\n"); 1535 rte_flow_error_set(error, 1536 EINVAL, 1537 RTE_FLOW_ERROR_TYPE_ACTION, 1538 act, 1539 "VNIC and RSS cfg mismatch"); 1540 rc = -rte_errno; 1541 goto ret; 1542 } 1543 goto vnic_found; 1544 } 1545 1546 for (i = 0; i < rss->queue_num; i++) { 1547 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", 1548 rss->queue[i]); 1549 1550 if (!rss->queue[i] || 1551 rss->queue[i] >= bp->rx_nr_rings || 1552 !bp->rx_queues[rss->queue[i]]) { 1553 rte_flow_error_set(error, 1554 EINVAL, 1555 RTE_FLOW_ERROR_TYPE_ACTION, 1556 act, 1557 "Invalid queue ID for RSS"); 1558 rc = -rte_errno; 1559 goto ret; 1560 } 1561 rxq = bp->rx_queues[rss->queue[i]]; 1562 1563 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] != 1564 INVALID_HW_RING_ID) { 1565 PMD_DRV_LOG(ERR, 1566 "queue active with other VNIC\n"); 1567 rte_flow_error_set(error, 1568 EINVAL, 1569 RTE_FLOW_ERROR_TYPE_ACTION, 1570 act, 1571 "Invalid queue ID for RSS"); 1572 rc = -rte_errno; 1573 goto ret; 1574 } 1575 1576 rxq->vnic = vnic; 1577 rxq->rx_started = 1; 1578 vnic->rx_queue_cnt++; 1579 } 1580 1581 vnic->start_grp_id = rss->queue[0]; 1582 vnic->end_grp_id = rss->queue[rss->queue_num - 1]; 1583 vnic->func_default = 0; //This is not a default VNIC. 1584 1585 rc = bnxt_vnic_prep(bp, vnic, act, error); 1586 if (rc) 1587 goto ret; 1588 1589 PMD_DRV_LOG(DEBUG, 1590 "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 1591 vnic_id, vnic, vnic->fw_grp_ids); 1592 1593 vnic->ff_pool_idx = vnic_id; 1594 PMD_DRV_LOG(DEBUG, 1595 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx); 1596 1597 /* This can be done only after vnic_grp_alloc is done. */ 1598 for (i = 0; i < vnic->rx_queue_cnt; i++) { 1599 vnic->fw_grp_ids[i] = 1600 bp->grp_info[rss->queue[i]].fw_grp_id; 1601 /* Make sure vnic0 does not use these rings. */ 1602 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] = 1603 INVALID_HW_RING_ID; 1604 } 1605 1606 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) { 1607 for (i = 0; i < vnic->rx_queue_cnt; i++) 1608 vnic->rss_table[rss_idx++] = 1609 vnic->fw_grp_ids[i]; 1610 } 1611 1612 /* Configure RSS only if the queue count is > 1 */ 1613 if (vnic->rx_queue_cnt > 1) { 1614 vnic->hash_type = 1615 bnxt_rte_to_hwrm_hash_types(rss->types); 1616 vnic->hash_mode = 1617 bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level); 1618 1619 if (!rss->key_len) { 1620 /* If hash key has not been specified, 1621 * use random hash key. 1622 */ 1623 bnxt_prandom_bytes(vnic->rss_hash_key, 1624 HW_HASH_KEY_SIZE); 1625 } else { 1626 if (rss->key_len > HW_HASH_KEY_SIZE) 1627 memcpy(vnic->rss_hash_key, 1628 rss->key, 1629 HW_HASH_KEY_SIZE); 1630 else 1631 memcpy(vnic->rss_hash_key, 1632 rss->key, 1633 rss->key_len); 1634 } 1635 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1636 } else { 1637 PMD_DRV_LOG(DEBUG, "No RSS config required\n"); 1638 } 1639 1640 vnic_found: 1641 filter->dst_id = vnic->fw_vnic_id; 1642 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 1643 if (filter1 == NULL) { 1644 rte_flow_error_set(error, 1645 ENOSPC, 1646 RTE_FLOW_ERROR_TYPE_ACTION, 1647 act, 1648 "New filter not available"); 1649 rc = -rte_errno; 1650 goto ret; 1651 } 1652 1653 PMD_DRV_LOG(DEBUG, "L2 filter created\n"); 1654 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1655 break; 1656 case RTE_FLOW_ACTION_TYPE_MARK: 1657 if (bp->mark_table == NULL) { 1658 rte_flow_error_set(error, 1659 ENOMEM, 1660 RTE_FLOW_ERROR_TYPE_ACTION, 1661 act, 1662 "Mark table not allocated."); 1663 rc = -rte_errno; 1664 goto ret; 1665 } 1666 1667 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 1668 PMD_DRV_LOG(DEBUG, 1669 "Disabling vector processing for mark\n"); 1670 bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts; 1671 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1672 } 1673 1674 filter->valid_flags |= BNXT_FLOW_MARK_FLAG; 1675 filter->mark = ((const struct rte_flow_action_mark *) 1676 act->conf)->id; 1677 PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark); 1678 break; 1679 default: 1680 rte_flow_error_set(error, 1681 EINVAL, 1682 RTE_FLOW_ERROR_TYPE_ACTION, 1683 act, 1684 "Invalid action."); 1685 rc = -rte_errno; 1686 goto ret; 1687 } 1688 1689 done: 1690 act = bnxt_flow_non_void_action(++act); 1691 while (act->type != RTE_FLOW_ACTION_TYPE_END) 1692 goto start; 1693 1694 return rc; 1695 ret: 1696 1697 if (filter1) { 1698 bnxt_hwrm_clear_l2_filter(bp, filter1); 1699 bnxt_free_filter(bp, filter1); 1700 } 1701 1702 if (rte_errno) { 1703 if (vnic && STAILQ_EMPTY(&vnic->filter)) 1704 vnic->rx_queue_cnt = 0; 1705 1706 if (rxq && !vnic->rx_queue_cnt) 1707 rxq->vnic = &bp->vnic_info[0]; 1708 } 1709 return -rte_errno; 1710 } 1711 1712 static 1713 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp, 1714 struct bnxt_filter_info *filter) 1715 { 1716 struct bnxt_vnic_info *vnic = NULL; 1717 unsigned int i; 1718 1719 for (i = 0; i < bp->max_vnics; i++) { 1720 vnic = &bp->vnic_info[i]; 1721 if (vnic->fw_vnic_id != INVALID_VNIC_ID && 1722 filter->dst_id == vnic->fw_vnic_id) { 1723 PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n", 1724 vnic->ff_pool_idx); 1725 return vnic; 1726 } 1727 } 1728 return NULL; 1729 } 1730 1731 static int 1732 bnxt_flow_validate(struct rte_eth_dev *dev, 1733 const struct rte_flow_attr *attr, 1734 const struct rte_flow_item pattern[], 1735 const struct rte_flow_action actions[], 1736 struct rte_flow_error *error) 1737 { 1738 struct bnxt *bp = dev->data->dev_private; 1739 struct bnxt_vnic_info *vnic = NULL; 1740 struct bnxt_filter_info *filter; 1741 int ret = 0; 1742 1743 bnxt_acquire_flow_lock(bp); 1744 ret = bnxt_flow_args_validate(attr, pattern, actions, error); 1745 if (ret != 0) { 1746 bnxt_release_flow_lock(bp); 1747 return ret; 1748 } 1749 1750 filter = bnxt_get_unused_filter(bp); 1751 if (filter == NULL) { 1752 rte_flow_error_set(error, ENOSPC, 1753 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1754 "Not enough resources for a new flow"); 1755 bnxt_release_flow_lock(bp); 1756 return -ENOSPC; 1757 } 1758 1759 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, 1760 error, filter); 1761 if (ret) 1762 goto exit; 1763 1764 vnic = find_matching_vnic(bp, filter); 1765 if (vnic) { 1766 if (STAILQ_EMPTY(&vnic->filter)) { 1767 bnxt_vnic_cleanup(bp, vnic); 1768 bp->nr_vnics--; 1769 PMD_DRV_LOG(DEBUG, "Free VNIC\n"); 1770 } 1771 } 1772 1773 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1774 bnxt_hwrm_clear_em_filter(bp, filter); 1775 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1776 bnxt_hwrm_clear_ntuple_filter(bp, filter); 1777 else 1778 bnxt_hwrm_clear_l2_filter(bp, filter); 1779 1780 exit: 1781 /* No need to hold on to this filter if we are just validating flow */ 1782 bnxt_free_filter(bp, filter); 1783 bnxt_release_flow_lock(bp); 1784 1785 return ret; 1786 } 1787 1788 static void 1789 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter, 1790 struct bnxt_filter_info *new_filter) 1791 { 1792 /* Clear the new L2 filter that was created in the previous step in 1793 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new 1794 * filter which points to the new destination queue and so we clear 1795 * the previous L2 filter. For ntuple filters, we are going to reuse 1796 * the old L2 filter and create new NTUPLE filter with this new 1797 * destination queue subsequently during bnxt_flow_create. So we 1798 * decrement the ref cnt of the L2 filter that would've been bumped 1799 * up previously in bnxt_validate_and_parse_flow as the old n-tuple 1800 * filter that was referencing it will be deleted now. 1801 */ 1802 bnxt_hwrm_clear_l2_filter(bp, old_filter); 1803 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) { 1804 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter); 1805 } else { 1806 if (new_filter->filter_type == HWRM_CFA_EM_FILTER) 1807 bnxt_hwrm_clear_em_filter(bp, old_filter); 1808 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1809 bnxt_hwrm_clear_ntuple_filter(bp, old_filter); 1810 } 1811 } 1812 1813 static int 1814 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) 1815 { 1816 struct bnxt_filter_info *mf; 1817 struct rte_flow *flow; 1818 int i; 1819 1820 for (i = bp->max_vnics - 1; i >= 0; i--) { 1821 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1822 1823 if (vnic->fw_vnic_id == INVALID_VNIC_ID) 1824 continue; 1825 1826 STAILQ_FOREACH(flow, &vnic->flow_list, next) { 1827 mf = flow->filter; 1828 1829 if (mf->filter_type == nf->filter_type && 1830 mf->flags == nf->flags && 1831 mf->src_port == nf->src_port && 1832 mf->src_port_mask == nf->src_port_mask && 1833 mf->dst_port == nf->dst_port && 1834 mf->dst_port_mask == nf->dst_port_mask && 1835 mf->ip_protocol == nf->ip_protocol && 1836 mf->ip_addr_type == nf->ip_addr_type && 1837 mf->ethertype == nf->ethertype && 1838 mf->vni == nf->vni && 1839 mf->tunnel_type == nf->tunnel_type && 1840 mf->l2_ovlan == nf->l2_ovlan && 1841 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 1842 mf->l2_ivlan == nf->l2_ivlan && 1843 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 1844 !memcmp(mf->l2_addr, nf->l2_addr, 1845 RTE_ETHER_ADDR_LEN) && 1846 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 1847 RTE_ETHER_ADDR_LEN) && 1848 !memcmp(mf->src_macaddr, nf->src_macaddr, 1849 RTE_ETHER_ADDR_LEN) && 1850 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 1851 RTE_ETHER_ADDR_LEN) && 1852 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 1853 sizeof(nf->src_ipaddr)) && 1854 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 1855 sizeof(nf->src_ipaddr_mask)) && 1856 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 1857 sizeof(nf->dst_ipaddr)) && 1858 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 1859 sizeof(nf->dst_ipaddr_mask))) { 1860 if (mf->dst_id == nf->dst_id) 1861 return -EEXIST; 1862 /* Free the old filter, update flow 1863 * with new filter 1864 */ 1865 bnxt_update_filter(bp, mf, nf); 1866 STAILQ_REMOVE(&vnic->filter, mf, 1867 bnxt_filter_info, next); 1868 STAILQ_INSERT_TAIL(&vnic->filter, nf, next); 1869 bnxt_free_filter(bp, mf); 1870 flow->filter = nf; 1871 return -EXDEV; 1872 } 1873 } 1874 } 1875 return 0; 1876 } 1877 1878 static void 1879 bnxt_setup_flow_counter(struct bnxt *bp) 1880 { 1881 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && 1882 !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) { 1883 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, 1884 bnxt_flow_cnt_alarm_cb, 1885 (void *)bp); 1886 bp->flags |= BNXT_FLAG_FC_THREAD; 1887 } 1888 } 1889 1890 void bnxt_flow_cnt_alarm_cb(void *arg) 1891 { 1892 int rc = 0; 1893 struct bnxt *bp = arg; 1894 1895 if (!bp->flow_stat->rx_fc_out_tbl.va) { 1896 PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n"); 1897 bnxt_cancel_fc_thread(bp); 1898 return; 1899 } 1900 1901 if (!bp->flow_stat->flow_count) { 1902 bnxt_cancel_fc_thread(bp); 1903 return; 1904 } 1905 1906 if (!bp->eth_dev->data->dev_started) { 1907 bnxt_cancel_fc_thread(bp); 1908 return; 1909 } 1910 1911 rc = bnxt_flow_stats_req(bp); 1912 if (rc) { 1913 PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n"); 1914 return; 1915 } 1916 1917 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, 1918 bnxt_flow_cnt_alarm_cb, 1919 (void *)bp); 1920 } 1921 1922 1923 static struct rte_flow * 1924 bnxt_flow_create(struct rte_eth_dev *dev, 1925 const struct rte_flow_attr *attr, 1926 const struct rte_flow_item pattern[], 1927 const struct rte_flow_action actions[], 1928 struct rte_flow_error *error) 1929 { 1930 struct bnxt *bp = dev->data->dev_private; 1931 struct bnxt_vnic_info *vnic = NULL; 1932 struct bnxt_filter_info *filter; 1933 bool update_flow = false; 1934 struct rte_flow *flow; 1935 int ret = 0; 1936 uint32_t tun_type, flow_id; 1937 1938 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1939 rte_flow_error_set(error, EINVAL, 1940 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1941 "Failed to create flow, Not a Trusted VF!"); 1942 return NULL; 1943 } 1944 1945 if (!dev->data->dev_started) { 1946 rte_flow_error_set(error, 1947 EINVAL, 1948 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1949 NULL, 1950 "Device must be started"); 1951 return NULL; 1952 } 1953 1954 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0); 1955 if (!flow) { 1956 rte_flow_error_set(error, ENOMEM, 1957 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1958 "Failed to allocate memory"); 1959 return flow; 1960 } 1961 1962 bnxt_acquire_flow_lock(bp); 1963 ret = bnxt_flow_args_validate(attr, pattern, actions, error); 1964 if (ret != 0) { 1965 PMD_DRV_LOG(ERR, "Not a validate flow.\n"); 1966 goto free_flow; 1967 } 1968 1969 filter = bnxt_get_unused_filter(bp); 1970 if (filter == NULL) { 1971 rte_flow_error_set(error, ENOSPC, 1972 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1973 "Not enough resources for a new flow"); 1974 goto free_flow; 1975 } 1976 1977 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, 1978 error, filter); 1979 if (ret != 0) 1980 goto free_filter; 1981 1982 ret = bnxt_match_filter(bp, filter); 1983 if (ret == -EEXIST) { 1984 PMD_DRV_LOG(DEBUG, "Flow already exists.\n"); 1985 /* Clear the filter that was created as part of 1986 * validate_and_parse_flow() above 1987 */ 1988 bnxt_hwrm_clear_l2_filter(bp, filter); 1989 goto free_filter; 1990 } else if (ret == -EXDEV) { 1991 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n"); 1992 PMD_DRV_LOG(DEBUG, "Updating with different destination\n"); 1993 update_flow = true; 1994 } 1995 1996 /* If tunnel redirection to a VF/PF is specified then only tunnel_type 1997 * is set and enable is set to the tunnel type. Issue hwrm cmd directly 1998 * in such a case. 1999 */ 2000 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && 2001 filter->enables == filter->tunnel_type) { 2002 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type); 2003 if (ret) { 2004 rte_flow_error_set(error, -ret, 2005 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2006 "Unable to query tunnel to VF"); 2007 goto free_filter; 2008 } 2009 if (tun_type == (1U << filter->tunnel_type)) { 2010 ret = 2011 bnxt_hwrm_tunnel_redirect_free(bp, 2012 filter->tunnel_type); 2013 if (ret) { 2014 PMD_DRV_LOG(ERR, 2015 "Unable to free existing tunnel\n"); 2016 rte_flow_error_set(error, -ret, 2017 RTE_FLOW_ERROR_TYPE_HANDLE, 2018 NULL, 2019 "Unable to free preexisting " 2020 "tunnel on VF"); 2021 goto free_filter; 2022 } 2023 } 2024 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type); 2025 if (ret) { 2026 rte_flow_error_set(error, -ret, 2027 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2028 "Unable to redirect tunnel to VF"); 2029 goto free_filter; 2030 } 2031 vnic = &bp->vnic_info[0]; 2032 goto done; 2033 } 2034 2035 if (filter->filter_type == HWRM_CFA_EM_FILTER) { 2036 filter->enables |= 2037 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2038 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); 2039 if (ret != 0) { 2040 rte_flow_error_set(error, -ret, 2041 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2042 "Failed to create EM filter"); 2043 goto free_filter; 2044 } 2045 } 2046 2047 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { 2048 filter->enables |= 2049 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2050 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); 2051 if (ret != 0) { 2052 rte_flow_error_set(error, -ret, 2053 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2054 "Failed to create ntuple filter"); 2055 goto free_filter; 2056 } 2057 } 2058 2059 vnic = find_matching_vnic(bp, filter); 2060 done: 2061 if (!ret || update_flow) { 2062 flow->filter = filter; 2063 flow->vnic = vnic; 2064 if (update_flow) { 2065 ret = -EXDEV; 2066 goto free_flow; 2067 } 2068 2069 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { 2070 PMD_DRV_LOG(DEBUG, 2071 "Mark action: mark id 0x%x, flow id 0x%x\n", 2072 filter->mark, filter->flow_id); 2073 2074 /* TCAM and EM should be 16-bit only. 2075 * Other modes not supported. 2076 */ 2077 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; 2078 if (bp->mark_table[flow_id].valid) { 2079 rte_flow_error_set(error, EEXIST, 2080 RTE_FLOW_ERROR_TYPE_HANDLE, 2081 NULL, 2082 "Flow with mark id exists"); 2083 bnxt_clear_one_vnic_filter(bp, filter); 2084 goto free_filter; 2085 } 2086 bp->mark_table[flow_id].valid = true; 2087 bp->mark_table[flow_id].mark_id = filter->mark; 2088 } 2089 2090 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2091 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); 2092 2093 if (BNXT_FLOW_XSTATS_EN(bp)) 2094 bp->flow_stat->flow_count++; 2095 bnxt_release_flow_lock(bp); 2096 bnxt_setup_flow_counter(bp); 2097 PMD_DRV_LOG(DEBUG, "Successfully created flow.\n"); 2098 return flow; 2099 } 2100 2101 free_filter: 2102 bnxt_free_filter(bp, filter); 2103 free_flow: 2104 if (ret == -EEXIST) 2105 rte_flow_error_set(error, ret, 2106 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2107 "Matching Flow exists."); 2108 else if (ret == -EXDEV) 2109 rte_flow_error_set(error, 0, 2110 RTE_FLOW_ERROR_TYPE_NONE, NULL, 2111 "Flow with pattern exists, updating destination queue"); 2112 else if (!rte_errno) 2113 rte_flow_error_set(error, -ret, 2114 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2115 "Failed to create flow."); 2116 rte_free(flow); 2117 flow = NULL; 2118 bnxt_release_flow_lock(bp); 2119 return flow; 2120 } 2121 2122 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp, 2123 struct bnxt_filter_info *filter, 2124 struct rte_flow_error *error) 2125 { 2126 uint16_t tun_dst_fid; 2127 uint32_t tun_type; 2128 int ret = 0; 2129 2130 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type); 2131 if (ret) { 2132 rte_flow_error_set(error, -ret, 2133 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2134 "Unable to query tunnel to VF"); 2135 return ret; 2136 } 2137 if (tun_type == (1U << filter->tunnel_type)) { 2138 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type, 2139 &tun_dst_fid); 2140 if (ret) { 2141 rte_flow_error_set(error, -ret, 2142 RTE_FLOW_ERROR_TYPE_HANDLE, 2143 NULL, 2144 "tunnel_redirect info cmd fail"); 2145 return ret; 2146 } 2147 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n", 2148 tun_dst_fid + bp->first_vf_id, bp->fw_fid); 2149 2150 /* Tunnel doesn't belong to this VF, so don't send HWRM 2151 * cmd, just delete the flow from driver 2152 */ 2153 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id)) { 2154 PMD_DRV_LOG(ERR, 2155 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n"); 2156 } else { 2157 ret = bnxt_hwrm_tunnel_redirect_free(bp, 2158 filter->tunnel_type); 2159 if (ret) { 2160 rte_flow_error_set(error, -ret, 2161 RTE_FLOW_ERROR_TYPE_HANDLE, 2162 NULL, 2163 "Unable to free tunnel redirection"); 2164 return ret; 2165 } 2166 } 2167 } 2168 return ret; 2169 } 2170 2171 static int 2172 _bnxt_flow_destroy(struct bnxt *bp, 2173 struct rte_flow *flow, 2174 struct rte_flow_error *error) 2175 { 2176 struct bnxt_filter_info *filter; 2177 struct bnxt_vnic_info *vnic; 2178 int ret = 0; 2179 uint32_t flow_id; 2180 2181 filter = flow->filter; 2182 vnic = flow->vnic; 2183 2184 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && 2185 filter->enables == filter->tunnel_type) { 2186 ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error); 2187 if (!ret) 2188 goto done; 2189 else 2190 return ret; 2191 } 2192 2193 /* For config type, there is no filter in HW. Finish cleanup here */ 2194 if (filter->filter_type == HWRM_CFA_CONFIG) 2195 goto done; 2196 2197 ret = bnxt_match_filter(bp, filter); 2198 if (ret == 0) 2199 PMD_DRV_LOG(ERR, "Could not find matching flow\n"); 2200 2201 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { 2202 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; 2203 memset(&bp->mark_table[flow_id], 0, 2204 sizeof(bp->mark_table[flow_id])); 2205 filter->flow_id = 0; 2206 } 2207 2208 ret = bnxt_clear_one_vnic_filter(bp, filter); 2209 2210 done: 2211 if (!ret) { 2212 /* If it is a L2 drop filter, when the filter is created, 2213 * the FW updates the BC/MC records. 2214 * Once this filter is removed, issue the set_rx_mask command 2215 * to reset the BC/MC records in the HW to the settings 2216 * before the drop counter is created. 2217 */ 2218 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG) 2219 bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]); 2220 2221 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); 2222 bnxt_free_filter(bp, filter); 2223 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); 2224 rte_free(flow); 2225 if (BNXT_FLOW_XSTATS_EN(bp)) 2226 bp->flow_stat->flow_count--; 2227 2228 /* If this was the last flow associated with this vnic, 2229 * switch the queue back to RSS pool. 2230 */ 2231 if (vnic && !vnic->func_default && 2232 STAILQ_EMPTY(&vnic->flow_list)) { 2233 bnxt_vnic_cleanup(bp, vnic); 2234 bp->nr_vnics--; 2235 } 2236 } else { 2237 rte_flow_error_set(error, -ret, 2238 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2239 "Failed to destroy flow."); 2240 } 2241 2242 return ret; 2243 } 2244 2245 static int 2246 bnxt_flow_destroy(struct rte_eth_dev *dev, 2247 struct rte_flow *flow, 2248 struct rte_flow_error *error) 2249 { 2250 struct bnxt *bp = dev->data->dev_private; 2251 int ret = 0; 2252 2253 bnxt_acquire_flow_lock(bp); 2254 if (!flow) { 2255 rte_flow_error_set(error, EINVAL, 2256 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2257 "Invalid flow: failed to destroy flow."); 2258 bnxt_release_flow_lock(bp); 2259 return -EINVAL; 2260 } 2261 2262 if (!flow->filter) { 2263 rte_flow_error_set(error, EINVAL, 2264 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2265 "Invalid flow: failed to destroy flow."); 2266 bnxt_release_flow_lock(bp); 2267 return -EINVAL; 2268 } 2269 ret = _bnxt_flow_destroy(bp, flow, error); 2270 bnxt_release_flow_lock(bp); 2271 2272 return ret; 2273 } 2274 2275 void bnxt_cancel_fc_thread(struct bnxt *bp) 2276 { 2277 bp->flags &= ~BNXT_FLAG_FC_THREAD; 2278 rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp); 2279 } 2280 2281 static int 2282 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) 2283 { 2284 struct bnxt *bp = dev->data->dev_private; 2285 struct bnxt_vnic_info *vnic; 2286 struct rte_flow *flow; 2287 unsigned int i; 2288 int ret = 0; 2289 2290 bnxt_acquire_flow_lock(bp); 2291 for (i = 0; i < bp->max_vnics; i++) { 2292 vnic = &bp->vnic_info[i]; 2293 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID) 2294 continue; 2295 2296 while (!STAILQ_EMPTY(&vnic->flow_list)) { 2297 flow = STAILQ_FIRST(&vnic->flow_list); 2298 2299 if (!flow->filter) 2300 continue; 2301 2302 ret = _bnxt_flow_destroy(bp, flow, error); 2303 if (ret) 2304 break; 2305 } 2306 } 2307 2308 bnxt_cancel_fc_thread(bp); 2309 bnxt_release_flow_lock(bp); 2310 2311 return ret; 2312 } 2313 2314 const struct rte_flow_ops bnxt_flow_ops = { 2315 .validate = bnxt_flow_validate, 2316 .create = bnxt_flow_create, 2317 .destroy = bnxt_flow_destroy, 2318 .flush = bnxt_flow_flush, 2319 }; 2320