1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd. 3 * Copyright(c) 2010-2017 Intel Corporation 4 */ 5 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <stdlib.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 14 #include <rte_interrupts.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_eal.h> 18 #include <rte_ether.h> 19 #include <ethdev_driver.h> 20 #include <rte_memcpy.h> 21 #include <rte_malloc.h> 22 #include <rte_random.h> 23 #include <rte_bus_pci.h> 24 25 #include "base/txgbe.h" 26 #include "txgbe_ethdev.h" 27 #include "rte_pmd_txgbe.h" 28 29 #define TXGBE_MAX_VFTA (128) 30 #define TXGBE_VF_MSG_SIZE_DEFAULT 1 31 #define TXGBE_VF_GET_QUEUE_MSG_SIZE 5 32 33 static inline uint16_t 34 dev_num_vf(struct rte_eth_dev *eth_dev) 35 { 36 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 37 38 return pci_dev->max_vfs; 39 } 40 41 static inline 42 int txgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num) 43 { 44 unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN]; 45 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev); 46 uint16_t vfn; 47 48 for (vfn = 0; vfn < vf_num; vfn++) { 49 rte_eth_random_addr(vf_mac_addr); 50 /* keep the random address as default */ 51 memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 52 RTE_ETHER_ADDR_LEN); 53 } 54 55 return 0; 56 } 57 58 static inline int 59 txgbe_mb_intr_setup(struct rte_eth_dev *dev) 60 { 61 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 62 63 intr->mask_misc |= TXGBE_ICRMISC_VFMBX; 64 65 return 0; 66 } 67 68 int txgbe_pf_host_init(struct rte_eth_dev *eth_dev) 69 { 70 struct txgbe_vf_info **vfinfo = TXGBE_DEV_VFDATA(eth_dev); 71 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(eth_dev); 72 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 73 uint16_t vf_num; 74 uint8_t nb_queue; 75 int ret = 0; 76 77 PMD_INIT_FUNC_TRACE(); 78 79 RTE_ETH_DEV_SRIOV(eth_dev).active = 0; 80 vf_num = dev_num_vf(eth_dev); 81 if (vf_num == 0) 82 return ret; 83 84 *vfinfo = rte_zmalloc("vf_info", 85 sizeof(struct txgbe_vf_info) * vf_num, 0); 86 if (*vfinfo == NULL) { 87 PMD_INIT_LOG(ERR, 88 "Cannot allocate memory for private VF data\n"); 89 return -ENOMEM; 90 } 91 92 ret = rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id); 93 if (ret) { 94 PMD_INIT_LOG(ERR, 95 "failed to allocate switch domain for device %d", ret); 96 rte_free(*vfinfo); 97 *vfinfo = NULL; 98 return ret; 99 } 100 101 memset(uta_info, 0, sizeof(struct txgbe_uta_info)); 102 hw->mac.mc_filter_type = 0; 103 104 if (vf_num >= RTE_ETH_32_POOLS) { 105 nb_queue = 2; 106 RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS; 107 } else if (vf_num >= RTE_ETH_16_POOLS) { 108 nb_queue = 4; 109 RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS; 110 } else { 111 nb_queue = 8; 112 RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS; 113 } 114 115 RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue; 116 RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num; 117 RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 118 (uint16_t)(vf_num * nb_queue); 119 120 txgbe_vf_perm_addr_gen(eth_dev, vf_num); 121 122 /* init_mailbox_params */ 123 hw->mbx.init_params(hw); 124 125 /* set mb interrupt mask */ 126 txgbe_mb_intr_setup(eth_dev); 127 128 return ret; 129 } 130 131 void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) 132 { 133 struct txgbe_vf_info **vfinfo; 134 uint16_t vf_num; 135 int ret; 136 137 PMD_INIT_FUNC_TRACE(); 138 139 RTE_ETH_DEV_SRIOV(eth_dev).active = 0; 140 RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0; 141 RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0; 142 RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0; 143 144 vf_num = dev_num_vf(eth_dev); 145 if (vf_num == 0) 146 return; 147 148 vfinfo = TXGBE_DEV_VFDATA(eth_dev); 149 if (*vfinfo == NULL) 150 return; 151 152 ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id); 153 if (ret) 154 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); 155 156 rte_free(*vfinfo); 157 *vfinfo = NULL; 158 } 159 160 static void 161 txgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev) 162 { 163 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 164 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); 165 uint16_t vf_num; 166 int i; 167 struct txgbe_ethertype_filter ethertype_filter; 168 169 if (!hw->mac.set_ethertype_anti_spoofing) { 170 PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n"); 171 return; 172 } 173 174 i = txgbe_ethertype_filter_lookup(filter_info, 175 TXGBE_ETHERTYPE_FLOW_CTRL); 176 if (i >= 0) { 177 PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n"); 178 return; 179 } 180 181 ethertype_filter.ethertype = TXGBE_ETHERTYPE_FLOW_CTRL; 182 ethertype_filter.etqf = TXGBE_ETFLT_ENA | 183 TXGBE_ETFLT_TXAS | 184 TXGBE_ETHERTYPE_FLOW_CTRL; 185 ethertype_filter.etqs = 0; 186 ethertype_filter.conf = TRUE; 187 i = txgbe_ethertype_filter_insert(filter_info, 188 ðertype_filter); 189 if (i < 0) { 190 PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n"); 191 return; 192 } 193 194 wr32(hw, TXGBE_ETFLT(i), 195 (TXGBE_ETFLT_ENA | 196 TXGBE_ETFLT_TXAS | 197 TXGBE_ETHERTYPE_FLOW_CTRL)); 198 199 vf_num = dev_num_vf(eth_dev); 200 for (i = 0; i < vf_num; i++) 201 hw->mac.set_ethertype_anti_spoofing(hw, true, i); 202 } 203 204 int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev) 205 { 206 uint32_t vtctl, fcrth; 207 uint32_t vfre_slot, vfre_offset; 208 uint16_t vf_num; 209 const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */ 210 const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1); 211 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 212 uint32_t gpie; 213 uint32_t gcr_ext; 214 uint32_t vlanctrl; 215 int i; 216 217 vf_num = dev_num_vf(eth_dev); 218 if (vf_num == 0) 219 return -1; 220 221 /* enable VMDq and set the default pool for PF */ 222 vtctl = rd32(hw, TXGBE_POOLCTL); 223 vtctl &= ~TXGBE_POOLCTL_DEFPL_MASK; 224 vtctl |= TXGBE_POOLCTL_DEFPL(RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx); 225 vtctl |= TXGBE_POOLCTL_RPLEN; 226 wr32(hw, TXGBE_POOLCTL, vtctl); 227 228 vfre_offset = vf_num & VFRE_MASK; 229 vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0; 230 231 /* Enable pools reserved to PF only */ 232 wr32(hw, TXGBE_POOLRXENA(vfre_slot), (~0U) << vfre_offset); 233 wr32(hw, TXGBE_POOLRXENA(vfre_slot ^ 1), vfre_slot - 1); 234 wr32(hw, TXGBE_POOLTXENA(vfre_slot), (~0U) << vfre_offset); 235 wr32(hw, TXGBE_POOLTXENA(vfre_slot ^ 1), vfre_slot - 1); 236 237 wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA); 238 239 /* clear VMDq map to permanent rar 0 */ 240 hw->mac.clear_vmdq(hw, 0, BIT_MASK32); 241 242 /* clear VMDq map to scan rar 127 */ 243 wr32(hw, TXGBE_ETHADDRIDX, hw->mac.num_rar_entries); 244 wr32(hw, TXGBE_ETHADDRASSL, 0); 245 wr32(hw, TXGBE_ETHADDRASSH, 0); 246 247 /* set VMDq map to default PF pool */ 248 hw->mac.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx); 249 250 /* 251 * SW msut set PORTCTL.VT_Mode the same as GPIE.VT_Mode 252 */ 253 gpie = rd32(hw, TXGBE_GPIE); 254 gpie |= TXGBE_GPIE_MSIX; 255 gcr_ext = rd32(hw, TXGBE_PORTCTL); 256 gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK; 257 258 switch (RTE_ETH_DEV_SRIOV(eth_dev).active) { 259 case RTE_ETH_64_POOLS: 260 gcr_ext |= TXGBE_PORTCTL_NUMVT_64; 261 break; 262 case RTE_ETH_32_POOLS: 263 gcr_ext |= TXGBE_PORTCTL_NUMVT_32; 264 break; 265 case RTE_ETH_16_POOLS: 266 gcr_ext |= TXGBE_PORTCTL_NUMVT_16; 267 break; 268 } 269 270 wr32(hw, TXGBE_PORTCTL, gcr_ext); 271 wr32(hw, TXGBE_GPIE, gpie); 272 273 /* 274 * enable vlan filtering and allow all vlan tags through 275 */ 276 vlanctrl = rd32(hw, TXGBE_VLANCTL); 277 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */ 278 wr32(hw, TXGBE_VLANCTL, vlanctrl); 279 280 /* enable all vlan filters */ 281 for (i = 0; i < TXGBE_MAX_VFTA; i++) 282 wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF); 283 284 /* Enable MAC Anti-Spoofing */ 285 hw->mac.set_mac_anti_spoofing(hw, FALSE, vf_num); 286 287 /* set flow control threshold to max to avoid tx switch hang */ 288 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) { 289 wr32(hw, TXGBE_FCWTRLO(i), 0); 290 fcrth = rd32(hw, TXGBE_PBRXSIZE(i)) - 32; 291 wr32(hw, TXGBE_FCWTRHI(i), fcrth); 292 } 293 294 txgbe_add_tx_flow_control_drop_filter(eth_dev); 295 296 return 0; 297 } 298 299 static void 300 txgbe_set_rx_mode(struct rte_eth_dev *eth_dev) 301 { 302 struct rte_eth_dev_data *dev_data = eth_dev->data; 303 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 304 u32 fctrl, vmolr; 305 uint16_t vfn = dev_num_vf(eth_dev); 306 307 /* disable store-bad-packets */ 308 wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_SAVEBAD, 0); 309 310 /* Check for Promiscuous and All Multicast modes */ 311 fctrl = rd32m(hw, TXGBE_PSRCTL, 312 ~(TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP)); 313 fctrl |= TXGBE_PSRCTL_BCA | 314 TXGBE_PSRCTL_MCHFENA; 315 316 vmolr = rd32m(hw, TXGBE_POOLETHCTL(vfn), 317 ~(TXGBE_POOLETHCTL_UCP | 318 TXGBE_POOLETHCTL_MCP | 319 TXGBE_POOLETHCTL_UCHA | 320 TXGBE_POOLETHCTL_MCHA)); 321 vmolr |= TXGBE_POOLETHCTL_BCA | 322 TXGBE_POOLETHCTL_UTA | 323 TXGBE_POOLETHCTL_VLA; 324 325 if (dev_data->promiscuous) { 326 fctrl |= TXGBE_PSRCTL_UCP | 327 TXGBE_PSRCTL_MCP; 328 /* pf don't want packets routing to vf, so clear UPE */ 329 vmolr |= TXGBE_POOLETHCTL_MCP; 330 } else if (dev_data->all_multicast) { 331 fctrl |= TXGBE_PSRCTL_MCP; 332 vmolr |= TXGBE_POOLETHCTL_MCP; 333 } else { 334 vmolr |= TXGBE_POOLETHCTL_UCHA; 335 vmolr |= TXGBE_POOLETHCTL_MCHA; 336 } 337 338 wr32(hw, TXGBE_POOLETHCTL(vfn), vmolr); 339 340 wr32(hw, TXGBE_PSRCTL, fctrl); 341 342 txgbe_vlan_hw_strip_config(eth_dev); 343 } 344 345 static inline void 346 txgbe_vf_reset_event(struct rte_eth_dev *eth_dev, uint16_t vf) 347 { 348 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 349 struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev)); 350 int rar_entry = hw->mac.num_rar_entries - (vf + 1); 351 uint32_t vmolr = rd32(hw, TXGBE_POOLETHCTL(vf)); 352 353 vmolr |= (TXGBE_POOLETHCTL_UCHA | 354 TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_UTA); 355 wr32(hw, TXGBE_POOLETHCTL(vf), vmolr); 356 357 wr32(hw, TXGBE_POOLTAG(vf), 0); 358 359 /* reset multicast table array for vf */ 360 vfinfo[vf].num_vf_mc_hashes = 0; 361 362 /* reset rx mode */ 363 txgbe_set_rx_mode(eth_dev); 364 365 hw->mac.clear_rar(hw, rar_entry); 366 } 367 368 static inline void 369 txgbe_vf_reset_msg(struct rte_eth_dev *eth_dev, uint16_t vf) 370 { 371 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 372 uint32_t reg; 373 uint32_t reg_offset, vf_shift; 374 const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */ 375 const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1); 376 uint8_t nb_q_per_pool; 377 int i; 378 379 vf_shift = vf & VFRE_MASK; 380 reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0; 381 382 /* enable transmit for vf */ 383 reg = rd32(hw, TXGBE_POOLTXENA(reg_offset)); 384 reg |= (reg | (1 << vf_shift)); 385 wr32(hw, TXGBE_POOLTXENA(reg_offset), reg); 386 387 /* enable all queue drop for IOV */ 388 nb_q_per_pool = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool; 389 for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) { 390 txgbe_flush(hw); 391 reg = 1 << (i % 32); 392 wr32m(hw, TXGBE_QPRXDROP(i / 32), reg, reg); 393 } 394 395 /* enable receive for vf */ 396 reg = rd32(hw, TXGBE_POOLRXENA(reg_offset)); 397 reg |= (reg | (1 << vf_shift)); 398 wr32(hw, TXGBE_POOLRXENA(reg_offset), reg); 399 400 txgbe_vf_reset_event(eth_dev, vf); 401 } 402 403 static int 404 txgbe_disable_vf_mc_promisc(struct rte_eth_dev *eth_dev, uint32_t vf) 405 { 406 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 407 uint32_t vmolr; 408 409 vmolr = rd32(hw, TXGBE_POOLETHCTL(vf)); 410 411 PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf); 412 413 vmolr &= ~TXGBE_POOLETHCTL_MCP; 414 415 wr32(hw, TXGBE_POOLETHCTL(vf), vmolr); 416 417 return 0; 418 } 419 420 static int 421 txgbe_vf_reset(struct rte_eth_dev *eth_dev, uint16_t vf, uint32_t *msgbuf) 422 { 423 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 424 struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev)); 425 unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses; 426 int rar_entry = hw->mac.num_rar_entries - (vf + 1); 427 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); 428 429 txgbe_vf_reset_msg(eth_dev, vf); 430 431 hw->mac.set_rar(hw, rar_entry, vf_mac, vf, true); 432 433 /* Disable multicast promiscuous at reset */ 434 txgbe_disable_vf_mc_promisc(eth_dev, vf); 435 436 /* reply to reset with ack and vf mac address */ 437 msgbuf[0] = TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_ACK; 438 rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN); 439 /* 440 * Piggyback the multicast filter type so VF can compute the 441 * correct vectors 442 */ 443 msgbuf[3] = hw->mac.mc_filter_type; 444 txgbe_write_mbx(hw, msgbuf, TXGBE_VF_PERMADDR_MSG_LEN, vf); 445 446 return 0; 447 } 448 449 static int 450 txgbe_vf_set_mac_addr(struct rte_eth_dev *eth_dev, 451 uint32_t vf, uint32_t *msgbuf) 452 { 453 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 454 struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev)); 455 int rar_entry = hw->mac.num_rar_entries - (vf + 1); 456 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); 457 struct rte_ether_addr *ea = (struct rte_ether_addr *)new_mac; 458 459 if (rte_is_valid_assigned_ether_addr(ea)) { 460 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6); 461 return hw->mac.set_rar(hw, rar_entry, new_mac, vf, true); 462 } 463 return -1; 464 } 465 466 static int 467 txgbe_vf_set_multicast(struct rte_eth_dev *eth_dev, 468 uint32_t vf, uint32_t *msgbuf) 469 { 470 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 471 struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev)); 472 int nb_entries = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> 473 TXGBE_VT_MSGINFO_SHIFT; 474 uint16_t *hash_list = (uint16_t *)&msgbuf[1]; 475 uint32_t mta_idx; 476 uint32_t mta_shift; 477 const uint32_t TXGBE_MTA_INDEX_MASK = 0x7F; 478 const uint32_t TXGBE_MTA_BIT_SHIFT = 5; 479 const uint32_t TXGBE_MTA_BIT_MASK = (0x1 << TXGBE_MTA_BIT_SHIFT) - 1; 480 uint32_t reg_val; 481 int i; 482 u32 vmolr = rd32(hw, TXGBE_POOLETHCTL(vf)); 483 484 /* Disable multicast promiscuous first */ 485 txgbe_disable_vf_mc_promisc(eth_dev, vf); 486 487 /* only so many hash values supported */ 488 nb_entries = RTE_MIN(nb_entries, TXGBE_MAX_VF_MC_ENTRIES); 489 490 /* store the mc entries */ 491 vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries; 492 for (i = 0; i < nb_entries; i++) 493 vfinfo->vf_mc_hashes[i] = hash_list[i]; 494 495 if (nb_entries == 0) { 496 vmolr &= ~TXGBE_POOLETHCTL_MCHA; 497 wr32(hw, TXGBE_POOLETHCTL(vf), vmolr); 498 return 0; 499 } 500 501 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { 502 mta_idx = (vfinfo->vf_mc_hashes[i] >> TXGBE_MTA_BIT_SHIFT) 503 & TXGBE_MTA_INDEX_MASK; 504 mta_shift = vfinfo->vf_mc_hashes[i] & TXGBE_MTA_BIT_MASK; 505 reg_val = rd32(hw, TXGBE_MCADDRTBL(mta_idx)); 506 reg_val |= (1 << mta_shift); 507 wr32(hw, TXGBE_MCADDRTBL(mta_idx), reg_val); 508 } 509 510 vmolr |= TXGBE_POOLETHCTL_MCHA; 511 wr32(hw, TXGBE_POOLETHCTL(vf), vmolr); 512 513 return 0; 514 } 515 516 static int 517 txgbe_vf_set_vlan(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf) 518 { 519 int add, vid; 520 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 521 struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev)); 522 523 add = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) 524 >> TXGBE_VT_MSGINFO_SHIFT; 525 vid = TXGBE_PSRVLAN_VID(msgbuf[1]); 526 527 if (add) 528 vfinfo[vf].vlan_count++; 529 else if (vfinfo[vf].vlan_count) 530 vfinfo[vf].vlan_count--; 531 return hw->mac.set_vfta(hw, vid, vf, (bool)add, false); 532 } 533 534 static int 535 txgbe_set_vf_lpe(struct rte_eth_dev *eth_dev, 536 __rte_unused uint32_t vf, uint32_t *msgbuf) 537 { 538 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 539 uint32_t max_frame = msgbuf[1]; 540 uint32_t max_frs; 541 542 if (max_frame < RTE_ETHER_MIN_LEN || 543 max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 544 return -1; 545 546 max_frs = rd32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK); 547 if (max_frs < max_frame) { 548 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, 549 TXGBE_FRMSZ_MAX(max_frame)); 550 } 551 552 return 0; 553 } 554 555 static int 556 txgbe_negotiate_vf_api(struct rte_eth_dev *eth_dev, 557 uint32_t vf, uint32_t *msgbuf) 558 { 559 uint32_t api_version = msgbuf[1]; 560 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev); 561 562 switch (api_version) { 563 case txgbe_mbox_api_10: 564 case txgbe_mbox_api_11: 565 case txgbe_mbox_api_12: 566 case txgbe_mbox_api_13: 567 vfinfo[vf].api_version = (uint8_t)api_version; 568 return 0; 569 default: 570 break; 571 } 572 573 PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n", 574 api_version, vf); 575 576 return -1; 577 } 578 579 static int 580 txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf) 581 { 582 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev); 583 uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool; 584 struct rte_eth_conf *eth_conf; 585 struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf; 586 u8 num_tcs; 587 struct txgbe_hw *hw; 588 u32 vmvir; 589 u32 vlana; 590 u32 vid; 591 u32 user_priority; 592 593 /* Verify if the PF supports the mbox APIs version or not */ 594 switch (vfinfo[vf].api_version) { 595 case txgbe_mbox_api_20: 596 case txgbe_mbox_api_11: 597 case txgbe_mbox_api_12: 598 case txgbe_mbox_api_13: 599 break; 600 default: 601 return -1; 602 } 603 604 /* Notify VF of Rx and Tx queue number */ 605 msgbuf[TXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool; 606 msgbuf[TXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool; 607 608 /* Notify VF of default queue */ 609 msgbuf[TXGBE_VF_DEF_QUEUE] = default_q; 610 611 /* Notify VF of number of DCB traffic classes */ 612 eth_conf = ð_dev->data->dev_conf; 613 switch (eth_conf->txmode.mq_mode) { 614 case RTE_ETH_MQ_TX_NONE: 615 case RTE_ETH_MQ_TX_DCB: 616 PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u" 617 ", but its tx mode = %d\n", vf, 618 eth_conf->txmode.mq_mode); 619 return -1; 620 621 case RTE_ETH_MQ_TX_VMDQ_DCB: 622 vmdq_dcb_tx_conf = ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 623 switch (vmdq_dcb_tx_conf->nb_queue_pools) { 624 case RTE_ETH_16_POOLS: 625 num_tcs = RTE_ETH_8_TCS; 626 break; 627 case RTE_ETH_32_POOLS: 628 num_tcs = RTE_ETH_4_TCS; 629 break; 630 default: 631 return -1; 632 } 633 break; 634 635 /* RTE_ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */ 636 case RTE_ETH_MQ_TX_VMDQ_ONLY: 637 hw = TXGBE_DEV_HW(eth_dev); 638 vmvir = rd32(hw, TXGBE_POOLTAG(vf)); 639 vlana = vmvir & TXGBE_POOLTAG_ACT_MASK; 640 vid = vmvir & TXGBE_POOLTAG_VTAG_MASK; 641 user_priority = 642 TXGBD_POOLTAG_VTAG_UP(vmvir); 643 if (vlana == TXGBE_POOLTAG_ACT_ALWAYS && 644 (vid != 0 || user_priority != 0)) 645 num_tcs = 1; 646 else 647 num_tcs = 0; 648 break; 649 650 default: 651 PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n", 652 eth_conf->txmode.mq_mode); 653 return -1; 654 } 655 msgbuf[TXGBE_VF_TRANS_VLAN] = num_tcs; 656 657 return 0; 658 } 659 660 static int 661 txgbe_set_vf_mc_promisc(struct rte_eth_dev *eth_dev, 662 uint32_t vf, uint32_t *msgbuf) 663 { 664 struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev)); 665 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 666 int xcast_mode = msgbuf[1]; /* msgbuf contains the flag to enable */ 667 u32 vmolr, fctrl, disable, enable; 668 669 switch (vfinfo[vf].api_version) { 670 case txgbe_mbox_api_12: 671 /* promisc introduced in 1.3 version */ 672 if (xcast_mode == TXGBEVF_XCAST_MODE_PROMISC) 673 return -EOPNOTSUPP; 674 break; 675 /* Fall threw */ 676 case txgbe_mbox_api_13: 677 break; 678 default: 679 return -1; 680 } 681 682 if (vfinfo[vf].xcast_mode == xcast_mode) 683 goto out; 684 685 switch (xcast_mode) { 686 case TXGBEVF_XCAST_MODE_NONE: 687 disable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA | 688 TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP | 689 TXGBE_POOLETHCTL_VLP; 690 enable = 0; 691 break; 692 case TXGBEVF_XCAST_MODE_MULTI: 693 disable = TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP | 694 TXGBE_POOLETHCTL_VLP; 695 enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA; 696 break; 697 case TXGBEVF_XCAST_MODE_ALLMULTI: 698 disable = TXGBE_POOLETHCTL_UCP | TXGBE_POOLETHCTL_VLP; 699 enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA | 700 TXGBE_POOLETHCTL_MCP; 701 break; 702 case TXGBEVF_XCAST_MODE_PROMISC: 703 fctrl = rd32(hw, TXGBE_PSRCTL); 704 if (!(fctrl & TXGBE_PSRCTL_UCP)) { 705 /* VF promisc requires PF in promisc */ 706 PMD_DRV_LOG(ERR, 707 "Enabling VF promisc requires PF in promisc\n"); 708 return -1; 709 } 710 711 disable = 0; 712 enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA | 713 TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP | 714 TXGBE_POOLETHCTL_VLP; 715 break; 716 default: 717 return -1; 718 } 719 720 vmolr = rd32(hw, TXGBE_POOLETHCTL(vf)); 721 vmolr &= ~disable; 722 vmolr |= enable; 723 wr32(hw, TXGBE_POOLETHCTL(vf), vmolr); 724 vfinfo[vf].xcast_mode = xcast_mode; 725 726 out: 727 msgbuf[1] = xcast_mode; 728 729 return 0; 730 } 731 732 static int 733 txgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) 734 { 735 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 736 struct txgbe_vf_info *vf_info = *(TXGBE_DEV_VFDATA(dev)); 737 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); 738 struct rte_ether_addr *ea = (struct rte_ether_addr *)new_mac; 739 int index = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> 740 TXGBE_VT_MSGINFO_SHIFT; 741 742 if (index) { 743 if (!rte_is_valid_assigned_ether_addr(ea)) { 744 PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf); 745 return -1; 746 } 747 748 vf_info[vf].mac_count++; 749 750 hw->mac.set_rar(hw, vf_info[vf].mac_count, 751 new_mac, vf, true); 752 } else { 753 if (vf_info[vf].mac_count) { 754 hw->mac.clear_rar(hw, vf_info[vf].mac_count); 755 vf_info[vf].mac_count = 0; 756 } 757 } 758 return 0; 759 } 760 761 static int 762 txgbe_rcv_msg_from_vf(struct rte_eth_dev *eth_dev, uint16_t vf) 763 { 764 uint16_t mbx_size = TXGBE_P2VMBX_SIZE; 765 uint16_t msg_size = TXGBE_VF_MSG_SIZE_DEFAULT; 766 uint32_t msgbuf[TXGBE_P2VMBX_SIZE]; 767 int32_t retval; 768 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 769 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev); 770 struct rte_pmd_txgbe_mb_event_param ret_param; 771 772 retval = txgbe_read_mbx(hw, msgbuf, mbx_size, vf); 773 if (retval) { 774 PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf); 775 return retval; 776 } 777 778 /* do nothing with the message already been processed */ 779 if (msgbuf[0] & (TXGBE_VT_MSGTYPE_ACK | TXGBE_VT_MSGTYPE_NACK)) 780 return retval; 781 782 /* flush the ack before we write any messages back */ 783 txgbe_flush(hw); 784 785 /** 786 * initialise structure to send to user application 787 * will return response from user in retval field 788 */ 789 ret_param.retval = RTE_PMD_TXGBE_MB_EVENT_PROCEED; 790 ret_param.vfid = vf; 791 ret_param.msg_type = msgbuf[0] & 0xFFFF; 792 ret_param.msg = (void *)msgbuf; 793 794 /* perform VF reset */ 795 if (msgbuf[0] == TXGBE_VF_RESET) { 796 int ret = txgbe_vf_reset(eth_dev, vf, msgbuf); 797 798 vfinfo[vf].clear_to_send = true; 799 800 /* notify application about VF reset */ 801 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_VF_MBOX, 802 &ret_param); 803 return ret; 804 } 805 806 /** 807 * ask user application if we allowed to perform those functions 808 * if we get ret_param.retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED 809 * then business as usual, 810 * if 0, do nothing and send ACK to VF 811 * if ret_param.retval > 1, do nothing and send NAK to VF 812 */ 813 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_VF_MBOX, 814 &ret_param); 815 816 retval = ret_param.retval; 817 818 /* check & process VF to PF mailbox message */ 819 switch ((msgbuf[0] & 0xFFFF)) { 820 case TXGBE_VF_SET_MAC_ADDR: 821 if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED) 822 retval = txgbe_vf_set_mac_addr(eth_dev, vf, msgbuf); 823 break; 824 case TXGBE_VF_SET_MULTICAST: 825 if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED) 826 retval = txgbe_vf_set_multicast(eth_dev, vf, msgbuf); 827 break; 828 case TXGBE_VF_SET_LPE: 829 if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED) 830 retval = txgbe_set_vf_lpe(eth_dev, vf, msgbuf); 831 break; 832 case TXGBE_VF_SET_VLAN: 833 if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED) 834 retval = txgbe_vf_set_vlan(eth_dev, vf, msgbuf); 835 break; 836 case TXGBE_VF_API_NEGOTIATE: 837 retval = txgbe_negotiate_vf_api(eth_dev, vf, msgbuf); 838 break; 839 case TXGBE_VF_GET_QUEUES: 840 retval = txgbe_get_vf_queues(eth_dev, vf, msgbuf); 841 msg_size = TXGBE_VF_GET_QUEUE_MSG_SIZE; 842 break; 843 case TXGBE_VF_UPDATE_XCAST_MODE: 844 if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED) 845 retval = txgbe_set_vf_mc_promisc(eth_dev, vf, msgbuf); 846 break; 847 case TXGBE_VF_SET_MACVLAN: 848 if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED) 849 retval = txgbe_set_vf_macvlan_msg(eth_dev, vf, msgbuf); 850 break; 851 default: 852 PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (uint32_t)msgbuf[0]); 853 retval = TXGBE_ERR_MBX; 854 break; 855 } 856 857 /* response the VF according to the message process result */ 858 if (retval) 859 msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK; 860 else 861 msgbuf[0] |= TXGBE_VT_MSGTYPE_ACK; 862 863 msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS; 864 865 txgbe_write_mbx(hw, msgbuf, msg_size, vf); 866 867 return retval; 868 } 869 870 static inline void 871 txgbe_rcv_ack_from_vf(struct rte_eth_dev *eth_dev, uint16_t vf) 872 { 873 uint32_t msg = TXGBE_VT_MSGTYPE_NACK; 874 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 875 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev); 876 877 if (!vfinfo[vf].clear_to_send) 878 txgbe_write_mbx(hw, &msg, 1, vf); 879 } 880 881 void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev) 882 { 883 uint16_t vf; 884 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 885 886 for (vf = 0; vf < dev_num_vf(eth_dev); vf++) { 887 /* check & process vf function level reset */ 888 if (!txgbe_check_for_rst(hw, vf)) 889 txgbe_vf_reset_event(eth_dev, vf); 890 891 /* check & process vf mailbox messages */ 892 if (!txgbe_check_for_msg(hw, vf)) 893 txgbe_rcv_msg_from_vf(eth_dev, vf); 894 895 /* check & process acks from vf */ 896 if (!txgbe_check_for_ack(hw, vf)) 897 txgbe_rcv_ack_from_vf(eth_dev, vf); 898 } 899 } 900