1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <rte_byteorder.h> 14 #include <rte_common.h> 15 16 #include <rte_interrupts.h> 17 #include <rte_debug.h> 18 #include <rte_pci.h> 19 #include <rte_alarm.h> 20 #include <rte_atomic.h> 21 #include <rte_eal.h> 22 #include <rte_ether.h> 23 #include <ethdev_driver.h> 24 #include <ethdev_pci.h> 25 #include <rte_malloc.h> 26 #include <rte_memzone.h> 27 #include <rte_dev.h> 28 29 #include "iavf.h" 30 #include "iavf_rxtx.h" 31 #include "iavf_generic_flow.h" 32 #include "rte_pmd_iavf.h" 33 #include "iavf_ipsec_crypto.h" 34 35 /* devargs */ 36 #define IAVF_PROTO_XTR_ARG "proto_xtr" 37 38 static const char * const iavf_valid_args[] = { 39 IAVF_PROTO_XTR_ARG, 40 NULL 41 }; 42 43 static const struct rte_mbuf_dynfield iavf_proto_xtr_metadata_param = { 44 .name = "intel_pmd_dynfield_proto_xtr_metadata", 45 .size = sizeof(uint32_t), 46 .align = __alignof__(uint32_t), 47 .flags = 0, 48 }; 49 50 struct iavf_proto_xtr_ol { 51 const struct rte_mbuf_dynflag param; 52 uint64_t *ol_flag; 53 bool required; 54 }; 55 56 static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = { 57 [IAVF_PROTO_XTR_VLAN] = { 58 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" }, 59 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_vlan_mask }, 60 [IAVF_PROTO_XTR_IPV4] = { 61 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" }, 62 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask }, 63 [IAVF_PROTO_XTR_IPV6] = { 64 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" }, 65 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask }, 66 [IAVF_PROTO_XTR_IPV6_FLOW] = { 67 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" }, 68 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask }, 69 [IAVF_PROTO_XTR_TCP] = { 70 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" }, 71 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_tcp_mask }, 72 [IAVF_PROTO_XTR_IP_OFFSET] = { 73 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" }, 74 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask }, 75 [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = { 76 .param = { 77 .name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" }, 78 .ol_flag = 79 &rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask }, 80 }; 81 82 static int iavf_dev_configure(struct rte_eth_dev *dev); 83 static int iavf_dev_start(struct rte_eth_dev *dev); 84 static int iavf_dev_stop(struct rte_eth_dev *dev); 85 static int iavf_dev_close(struct rte_eth_dev *dev); 86 static int iavf_dev_reset(struct rte_eth_dev *dev); 87 static int iavf_dev_info_get(struct rte_eth_dev *dev, 88 struct rte_eth_dev_info *dev_info); 89 static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev); 90 static int iavf_dev_stats_get(struct rte_eth_dev *dev, 91 struct rte_eth_stats *stats); 92 static int iavf_dev_stats_reset(struct rte_eth_dev *dev); 93 static int iavf_dev_xstats_reset(struct rte_eth_dev *dev); 94 static int iavf_dev_xstats_get(struct rte_eth_dev *dev, 95 struct rte_eth_xstat *xstats, unsigned int n); 96 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev, 97 struct rte_eth_xstat_name *xstats_names, 98 unsigned int limit); 99 static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev); 100 static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev); 101 static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev); 102 static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev); 103 static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev, 104 struct rte_ether_addr *addr, 105 uint32_t index, 106 uint32_t pool); 107 static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index); 108 static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, 109 uint16_t vlan_id, int on); 110 static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); 111 static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev, 112 struct rte_eth_rss_reta_entry64 *reta_conf, 113 uint16_t reta_size); 114 static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev, 115 struct rte_eth_rss_reta_entry64 *reta_conf, 116 uint16_t reta_size); 117 static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev, 118 struct rte_eth_rss_conf *rss_conf); 119 static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 120 struct rte_eth_rss_conf *rss_conf); 121 static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 122 static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, 123 struct rte_ether_addr *mac_addr); 124 static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 125 uint16_t queue_id); 126 static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 127 uint16_t queue_id); 128 static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev, 129 const struct rte_flow_ops **ops); 130 static int iavf_set_mc_addr_list(struct rte_eth_dev *dev, 131 struct rte_ether_addr *mc_addrs, 132 uint32_t mc_addrs_num); 133 static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg); 134 135 static const struct rte_pci_id pci_id_iavf_map[] = { 136 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) }, 137 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) }, 138 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) }, 139 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) }, 140 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) }, 141 { .vendor_id = 0, /* sentinel */ }, 142 }; 143 144 struct rte_iavf_xstats_name_off { 145 char name[RTE_ETH_XSTATS_NAME_SIZE]; 146 unsigned int offset; 147 }; 148 149 #define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a) 150 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = { 151 {"rx_bytes", _OFF_OF(eth_stats.rx_bytes)}, 152 {"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)}, 153 {"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)}, 154 {"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)}, 155 {"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)}, 156 {"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats, 157 rx_unknown_protocol)}, 158 {"tx_bytes", _OFF_OF(eth_stats.tx_bytes)}, 159 {"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)}, 160 {"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)}, 161 {"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)}, 162 {"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)}, 163 {"tx_error_packets", _OFF_OF(eth_stats.tx_errors)}, 164 165 {"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)}, 166 {"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)}, 167 {"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)}, 168 {"inline_ipsec_crypto_ierrors_sad_lookup", 169 _OFF_OF(ips_stats.ierrors.sad_miss)}, 170 {"inline_ipsec_crypto_ierrors_not_processed", 171 _OFF_OF(ips_stats.ierrors.not_processed)}, 172 {"inline_ipsec_crypto_ierrors_icv_fail", 173 _OFF_OF(ips_stats.ierrors.icv_check)}, 174 {"inline_ipsec_crypto_ierrors_length", 175 _OFF_OF(ips_stats.ierrors.ipsec_length)}, 176 {"inline_ipsec_crypto_ierrors_misc", 177 _OFF_OF(ips_stats.ierrors.misc)}, 178 }; 179 #undef _OFF_OF 180 181 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \ 182 sizeof(rte_iavf_stats_strings[0])) 183 184 static const struct eth_dev_ops iavf_eth_dev_ops = { 185 .dev_configure = iavf_dev_configure, 186 .dev_start = iavf_dev_start, 187 .dev_stop = iavf_dev_stop, 188 .dev_close = iavf_dev_close, 189 .dev_reset = iavf_dev_reset, 190 .dev_infos_get = iavf_dev_info_get, 191 .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get, 192 .link_update = iavf_dev_link_update, 193 .stats_get = iavf_dev_stats_get, 194 .stats_reset = iavf_dev_stats_reset, 195 .xstats_get = iavf_dev_xstats_get, 196 .xstats_get_names = iavf_dev_xstats_get_names, 197 .xstats_reset = iavf_dev_xstats_reset, 198 .promiscuous_enable = iavf_dev_promiscuous_enable, 199 .promiscuous_disable = iavf_dev_promiscuous_disable, 200 .allmulticast_enable = iavf_dev_allmulticast_enable, 201 .allmulticast_disable = iavf_dev_allmulticast_disable, 202 .mac_addr_add = iavf_dev_add_mac_addr, 203 .mac_addr_remove = iavf_dev_del_mac_addr, 204 .set_mc_addr_list = iavf_set_mc_addr_list, 205 .vlan_filter_set = iavf_dev_vlan_filter_set, 206 .vlan_offload_set = iavf_dev_vlan_offload_set, 207 .rx_queue_start = iavf_dev_rx_queue_start, 208 .rx_queue_stop = iavf_dev_rx_queue_stop, 209 .tx_queue_start = iavf_dev_tx_queue_start, 210 .tx_queue_stop = iavf_dev_tx_queue_stop, 211 .rx_queue_setup = iavf_dev_rx_queue_setup, 212 .rx_queue_release = iavf_dev_rx_queue_release, 213 .tx_queue_setup = iavf_dev_tx_queue_setup, 214 .tx_queue_release = iavf_dev_tx_queue_release, 215 .mac_addr_set = iavf_dev_set_default_mac_addr, 216 .reta_update = iavf_dev_rss_reta_update, 217 .reta_query = iavf_dev_rss_reta_query, 218 .rss_hash_update = iavf_dev_rss_hash_update, 219 .rss_hash_conf_get = iavf_dev_rss_hash_conf_get, 220 .rxq_info_get = iavf_dev_rxq_info_get, 221 .txq_info_get = iavf_dev_txq_info_get, 222 .mtu_set = iavf_dev_mtu_set, 223 .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable, 224 .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable, 225 .flow_ops_get = iavf_dev_flow_ops_get, 226 .tx_done_cleanup = iavf_dev_tx_done_cleanup, 227 .get_monitor_addr = iavf_get_monitor_addr, 228 .tm_ops_get = iavf_tm_ops_get, 229 }; 230 231 static int 232 iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, 233 void *arg) 234 { 235 if (!arg) 236 return -EINVAL; 237 238 *(const void **)arg = &iavf_tm_ops; 239 240 return 0; 241 } 242 243 __rte_unused 244 static int 245 iavf_vfr_inprogress(struct iavf_hw *hw) 246 { 247 int inprogress = 0; 248 249 if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) & 250 IAVF_VFGEN_RSTAT_VFR_STATE_MASK) == 251 VIRTCHNL_VFR_INPROGRESS) 252 inprogress = 1; 253 254 if (inprogress) 255 PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress"); 256 257 return inprogress; 258 } 259 260 __rte_unused 261 static void 262 iavf_dev_watchdog(void *cb_arg) 263 { 264 struct iavf_adapter *adapter = cb_arg; 265 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); 266 int vfr_inprogress = 0, rc = 0; 267 268 /* check if watchdog has been disabled since last call */ 269 if (!adapter->vf.watchdog_enabled) 270 return; 271 272 /* If in reset then poll vfr_inprogress register for completion */ 273 if (adapter->vf.vf_reset) { 274 vfr_inprogress = iavf_vfr_inprogress(hw); 275 276 if (!vfr_inprogress) { 277 PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed", 278 adapter->vf.eth_dev->data->name); 279 adapter->vf.vf_reset = false; 280 } 281 /* If not in reset then poll vfr_inprogress register for VFLR event */ 282 } else { 283 vfr_inprogress = iavf_vfr_inprogress(hw); 284 285 if (vfr_inprogress) { 286 PMD_DRV_LOG(INFO, 287 "VF \"%s\" reset event detected by watchdog", 288 adapter->vf.eth_dev->data->name); 289 290 /* enter reset state with VFLR event */ 291 adapter->vf.vf_reset = true; 292 293 rte_eth_dev_callback_process(adapter->vf.eth_dev, 294 RTE_ETH_EVENT_INTR_RESET, NULL); 295 } 296 } 297 298 /* re-alarm watchdog */ 299 rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD, 300 &iavf_dev_watchdog, cb_arg); 301 302 if (rc) 303 PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm", 304 adapter->vf.eth_dev->data->name); 305 } 306 307 static void 308 iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused) 309 { 310 #if (IAVF_DEV_WATCHDOG_PERIOD > 0) 311 PMD_DRV_LOG(INFO, "Enabling device watchdog"); 312 adapter->vf.watchdog_enabled = true; 313 if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD, 314 &iavf_dev_watchdog, (void *)adapter)) 315 PMD_DRV_LOG(ERR, "Failed to enabled device watchdog"); 316 #endif 317 } 318 319 static void 320 iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused) 321 { 322 #if (IAVF_DEV_WATCHDOG_PERIOD > 0) 323 PMD_DRV_LOG(INFO, "Disabling device watchdog"); 324 adapter->vf.watchdog_enabled = false; 325 #endif 326 } 327 328 static int 329 iavf_set_mc_addr_list(struct rte_eth_dev *dev, 330 struct rte_ether_addr *mc_addrs, 331 uint32_t mc_addrs_num) 332 { 333 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 334 struct iavf_adapter *adapter = 335 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 336 int err, ret; 337 338 if (mc_addrs_num > IAVF_NUM_MACADDR_MAX) { 339 PMD_DRV_LOG(ERR, 340 "can't add more than a limited number (%u) of addresses.", 341 (uint32_t)IAVF_NUM_MACADDR_MAX); 342 return -EINVAL; 343 } 344 345 /* flush previous addresses */ 346 err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, 347 false); 348 if (err) 349 return err; 350 351 /* add new ones */ 352 err = iavf_add_del_mc_addr_list(adapter, mc_addrs, mc_addrs_num, true); 353 354 if (err) { 355 /* if adding mac address list fails, should add the previous 356 * addresses back. 357 */ 358 ret = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, 359 vf->mc_addrs_num, true); 360 if (ret) 361 return ret; 362 } else { 363 vf->mc_addrs_num = mc_addrs_num; 364 memcpy(vf->mc_addrs, 365 mc_addrs, mc_addrs_num * sizeof(*mc_addrs)); 366 } 367 368 return err; 369 } 370 371 static void 372 iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf) 373 { 374 static const uint64_t map_hena_rss[] = { 375 /* IPv4 */ 376 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = 377 RTE_ETH_RSS_NONFRAG_IPV4_UDP, 378 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = 379 RTE_ETH_RSS_NONFRAG_IPV4_UDP, 380 [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] = 381 RTE_ETH_RSS_NONFRAG_IPV4_UDP, 382 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = 383 RTE_ETH_RSS_NONFRAG_IPV4_TCP, 384 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] = 385 RTE_ETH_RSS_NONFRAG_IPV4_TCP, 386 [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] = 387 RTE_ETH_RSS_NONFRAG_IPV4_SCTP, 388 [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] = 389 RTE_ETH_RSS_NONFRAG_IPV4_OTHER, 390 [IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4, 391 392 /* IPv6 */ 393 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = 394 RTE_ETH_RSS_NONFRAG_IPV6_UDP, 395 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = 396 RTE_ETH_RSS_NONFRAG_IPV6_UDP, 397 [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] = 398 RTE_ETH_RSS_NONFRAG_IPV6_UDP, 399 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = 400 RTE_ETH_RSS_NONFRAG_IPV6_TCP, 401 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] = 402 RTE_ETH_RSS_NONFRAG_IPV6_TCP, 403 [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] = 404 RTE_ETH_RSS_NONFRAG_IPV6_SCTP, 405 [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] = 406 RTE_ETH_RSS_NONFRAG_IPV6_OTHER, 407 [IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6, 408 409 /* L2 Payload */ 410 [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD 411 }; 412 413 const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP | 414 RTE_ETH_RSS_NONFRAG_IPV4_TCP | 415 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | 416 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | 417 RTE_ETH_RSS_FRAG_IPV4; 418 419 const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP | 420 RTE_ETH_RSS_NONFRAG_IPV6_TCP | 421 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | 422 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | 423 RTE_ETH_RSS_FRAG_IPV6; 424 425 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 426 uint64_t caps = 0, hena = 0, valid_rss_hf = 0; 427 uint32_t i; 428 int ret; 429 430 ret = iavf_get_hena_caps(adapter, &caps); 431 if (ret) { 432 /** 433 * RSS offload type configuration is not a necessary feature 434 * for VF, so here just print a warning and return. 435 */ 436 PMD_DRV_LOG(WARNING, 437 "fail to get RSS offload type caps, ret: %d", ret); 438 return; 439 } 440 441 /** 442 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2 443 * generalizations of all other IPv4 and IPv6 RSS types. 444 */ 445 if (rss_hf & RTE_ETH_RSS_IPV4) 446 rss_hf |= ipv4_rss; 447 448 if (rss_hf & RTE_ETH_RSS_IPV6) 449 rss_hf |= ipv6_rss; 450 451 RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT); 452 453 for (i = 0; i < RTE_DIM(map_hena_rss); i++) { 454 uint64_t bit = BIT_ULL(i); 455 456 if ((caps & bit) && (map_hena_rss[i] & rss_hf)) { 457 valid_rss_hf |= map_hena_rss[i]; 458 hena |= bit; 459 } 460 } 461 462 ret = iavf_set_hena(adapter, hena); 463 if (ret) { 464 /** 465 * RSS offload type configuration is not a necessary feature 466 * for VF, so here just print a warning and return. 467 */ 468 PMD_DRV_LOG(WARNING, 469 "fail to set RSS offload types, ret: %d", ret); 470 return; 471 } 472 473 if (valid_rss_hf & ipv4_rss) 474 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4; 475 476 if (valid_rss_hf & ipv6_rss) 477 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6; 478 479 if (rss_hf & ~valid_rss_hf) 480 PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64, 481 rss_hf & ~valid_rss_hf); 482 483 vf->rss_hf = valid_rss_hf; 484 } 485 486 static int 487 iavf_init_rss(struct iavf_adapter *adapter) 488 { 489 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 490 struct rte_eth_rss_conf *rss_conf; 491 uint16_t i, j, nb_q; 492 int ret; 493 494 rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf; 495 nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues, 496 vf->max_rss_qregion); 497 498 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) { 499 PMD_DRV_LOG(DEBUG, "RSS is not supported"); 500 return -ENOTSUP; 501 } 502 503 /* configure RSS key */ 504 if (!rss_conf->rss_key) { 505 /* Calculate the default hash key */ 506 for (i = 0; i < vf->vf_res->rss_key_size; i++) 507 vf->rss_key[i] = (uint8_t)rte_rand(); 508 } else 509 rte_memcpy(vf->rss_key, rss_conf->rss_key, 510 RTE_MIN(rss_conf->rss_key_len, 511 vf->vf_res->rss_key_size)); 512 513 /* init RSS LUT table */ 514 for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) { 515 if (j >= nb_q) 516 j = 0; 517 vf->rss_lut[i] = j; 518 } 519 /* send virtchnl ops to configure RSS */ 520 ret = iavf_configure_rss_lut(adapter); 521 if (ret) 522 return ret; 523 ret = iavf_configure_rss_key(adapter); 524 if (ret) 525 return ret; 526 527 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) { 528 /* Set RSS hash configuration based on rss_conf->rss_hf. */ 529 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true); 530 if (ret) { 531 PMD_DRV_LOG(ERR, "fail to set default RSS"); 532 return ret; 533 } 534 } else { 535 iavf_config_rss_hf(adapter, rss_conf->rss_hf); 536 } 537 538 return 0; 539 } 540 541 static int 542 iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num) 543 { 544 struct iavf_adapter *ad = 545 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 546 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); 547 int ret; 548 549 ret = iavf_request_queues(dev, num); 550 if (ret) { 551 PMD_DRV_LOG(ERR, "request queues from PF failed"); 552 return ret; 553 } 554 PMD_DRV_LOG(INFO, "change queue pairs from %u to %u", 555 vf->vsi_res->num_queue_pairs, num); 556 557 ret = iavf_dev_reset(dev); 558 if (ret) { 559 PMD_DRV_LOG(ERR, "vf reset failed"); 560 return ret; 561 } 562 563 return 0; 564 } 565 566 static int 567 iavf_dev_vlan_insert_set(struct rte_eth_dev *dev) 568 { 569 struct iavf_adapter *adapter = 570 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 571 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 572 bool enable; 573 574 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)) 575 return 0; 576 577 enable = !!(dev->data->dev_conf.txmode.offloads & 578 RTE_ETH_TX_OFFLOAD_VLAN_INSERT); 579 iavf_config_vlan_insert_v2(adapter, enable); 580 581 return 0; 582 } 583 584 static int 585 iavf_dev_init_vlan(struct rte_eth_dev *dev) 586 { 587 int err; 588 589 err = iavf_dev_vlan_offload_set(dev, 590 RTE_ETH_VLAN_STRIP_MASK | 591 RTE_ETH_QINQ_STRIP_MASK | 592 RTE_ETH_VLAN_FILTER_MASK | 593 RTE_ETH_VLAN_EXTEND_MASK); 594 if (err) { 595 PMD_DRV_LOG(ERR, "Failed to update vlan offload"); 596 return err; 597 } 598 599 err = iavf_dev_vlan_insert_set(dev); 600 if (err) 601 PMD_DRV_LOG(ERR, "Failed to update vlan insertion"); 602 603 return err; 604 } 605 606 static int 607 iavf_dev_configure(struct rte_eth_dev *dev) 608 { 609 struct iavf_adapter *ad = 610 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 611 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); 612 uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, 613 dev->data->nb_tx_queues); 614 int ret; 615 616 ad->rx_bulk_alloc_allowed = true; 617 /* Initialize to TRUE. If any of Rx queues doesn't meet the 618 * vector Rx/Tx preconditions, it will be reset. 619 */ 620 ad->rx_vec_allowed = true; 621 ad->tx_vec_allowed = true; 622 623 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 624 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 625 626 /* Large VF setting */ 627 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) { 628 if (!(vf->vf_res->vf_cap_flags & 629 VIRTCHNL_VF_LARGE_NUM_QPAIRS)) { 630 PMD_DRV_LOG(ERR, "large VF is not supported"); 631 return -1; 632 } 633 634 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) { 635 PMD_DRV_LOG(ERR, "queue pairs number cannot be larger than %u", 636 IAVF_MAX_NUM_QUEUES_LV); 637 return -1; 638 } 639 640 ret = iavf_queues_req_reset(dev, num_queue_pairs); 641 if (ret) 642 return ret; 643 644 ret = iavf_get_max_rss_queue_region(ad); 645 if (ret) { 646 PMD_INIT_LOG(ERR, "get max rss queue region failed"); 647 return ret; 648 } 649 650 vf->lv_enabled = true; 651 } else { 652 /* Check if large VF is already enabled. If so, disable and 653 * release redundant queue resource. 654 * Or check if enough queue pairs. If not, request them from PF. 655 */ 656 if (vf->lv_enabled || 657 num_queue_pairs > vf->vsi_res->num_queue_pairs) { 658 ret = iavf_queues_req_reset(dev, num_queue_pairs); 659 if (ret) 660 return ret; 661 662 vf->lv_enabled = false; 663 } 664 /* if large VF is not required, use default rss queue region */ 665 vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; 666 } 667 668 ret = iavf_dev_init_vlan(dev); 669 if (ret) 670 PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret); 671 672 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 673 if (iavf_init_rss(ad) != 0) { 674 PMD_DRV_LOG(ERR, "configure rss failed"); 675 return -1; 676 } 677 } 678 return 0; 679 } 680 681 static int 682 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq) 683 { 684 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); 685 struct rte_eth_dev_data *dev_data = dev->data; 686 uint16_t buf_size, max_pkt_len; 687 uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD; 688 689 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; 690 691 /* Calculate the maximum packet length allowed */ 692 max_pkt_len = RTE_MIN((uint32_t) 693 rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS, 694 frame_size); 695 696 /* Check if maximum packet length is set correctly. */ 697 if (max_pkt_len <= RTE_ETHER_MIN_LEN || 698 max_pkt_len > IAVF_FRAME_SIZE_MAX) { 699 PMD_DRV_LOG(ERR, "maximum packet length must be " 700 "larger than %u and smaller than %u", 701 (uint32_t)IAVF_ETH_MAX_LEN, 702 (uint32_t)IAVF_FRAME_SIZE_MAX); 703 return -EINVAL; 704 } 705 706 rxq->max_pkt_len = max_pkt_len; 707 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) || 708 rxq->max_pkt_len > buf_size) { 709 dev_data->scattered_rx = 1; 710 } 711 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 712 IAVF_WRITE_FLUSH(hw); 713 714 return 0; 715 } 716 717 static int 718 iavf_init_queues(struct rte_eth_dev *dev) 719 { 720 struct iavf_rx_queue **rxq = 721 (struct iavf_rx_queue **)dev->data->rx_queues; 722 int i, ret = IAVF_SUCCESS; 723 724 for (i = 0; i < dev->data->nb_rx_queues; i++) { 725 if (!rxq[i] || !rxq[i]->q_set) 726 continue; 727 ret = iavf_init_rxq(dev, rxq[i]); 728 if (ret != IAVF_SUCCESS) 729 break; 730 } 731 /* set rx/tx function to vector/scatter/single-segment 732 * according to parameters 733 */ 734 iavf_set_rx_function(dev); 735 iavf_set_tx_function(dev); 736 737 return ret; 738 } 739 740 static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, 741 struct rte_intr_handle *intr_handle) 742 { 743 struct iavf_adapter *adapter = 744 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 745 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 746 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); 747 struct iavf_qv_map *qv_map; 748 uint16_t interval, i; 749 int vec; 750 751 if (rte_intr_cap_multiple(intr_handle) && 752 dev->data->dev_conf.intr_conf.rxq) { 753 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues)) 754 return -1; 755 } 756 757 if (rte_intr_dp_is_en(intr_handle)) { 758 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 759 dev->data->nb_rx_queues)) { 760 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec", 761 dev->data->nb_rx_queues); 762 return -1; 763 } 764 } 765 766 767 qv_map = rte_zmalloc("qv_map", 768 dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0); 769 if (!qv_map) { 770 PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map", 771 dev->data->nb_rx_queues); 772 goto qv_map_alloc_err; 773 } 774 775 if (!dev->data->dev_conf.intr_conf.rxq || 776 !rte_intr_dp_is_en(intr_handle)) { 777 /* Rx interrupt disabled, Map interrupt only for writeback */ 778 vf->nb_msix = 1; 779 if (vf->vf_res->vf_cap_flags & 780 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { 781 /* If WB_ON_ITR supports, enable it */ 782 vf->msix_base = IAVF_RX_VEC_START; 783 /* Set the ITR for index zero, to 2us to make sure that 784 * we leave time for aggregation to occur, but don't 785 * increase latency dramatically. 786 */ 787 IAVF_WRITE_REG(hw, 788 IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1), 789 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | 790 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | 791 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT)); 792 /* debug - check for success! the return value 793 * should be 2, offset is 0x2800 794 */ 795 /* IAVF_READ_REG(hw, IAVF_VFINT_ITRN1(0, 0)); */ 796 } else { 797 /* If no WB_ON_ITR offload flags, need to set 798 * interrupt for descriptor write back. 799 */ 800 vf->msix_base = IAVF_MISC_VEC_ID; 801 802 /* set ITR to default */ 803 interval = iavf_calc_itr_interval( 804 IAVF_QUEUE_ITR_INTERVAL_DEFAULT); 805 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, 806 IAVF_VFINT_DYN_CTL01_INTENA_MASK | 807 (IAVF_ITR_INDEX_DEFAULT << 808 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | 809 (interval << 810 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT)); 811 } 812 IAVF_WRITE_FLUSH(hw); 813 /* map all queues to the same interrupt */ 814 for (i = 0; i < dev->data->nb_rx_queues; i++) { 815 qv_map[i].queue_id = i; 816 qv_map[i].vector_id = vf->msix_base; 817 } 818 vf->qv_map = qv_map; 819 } else { 820 if (!rte_intr_allow_others(intr_handle)) { 821 vf->nb_msix = 1; 822 vf->msix_base = IAVF_MISC_VEC_ID; 823 for (i = 0; i < dev->data->nb_rx_queues; i++) { 824 qv_map[i].queue_id = i; 825 qv_map[i].vector_id = vf->msix_base; 826 rte_intr_vec_list_index_set(intr_handle, 827 i, IAVF_MISC_VEC_ID); 828 } 829 vf->qv_map = qv_map; 830 PMD_DRV_LOG(DEBUG, 831 "vector %u are mapping to all Rx queues", 832 vf->msix_base); 833 } else { 834 /* If Rx interrupt is required, and we can use 835 * multi interrupts, then the vec is from 1 836 */ 837 vf->nb_msix = 838 RTE_MIN(rte_intr_nb_efd_get(intr_handle), 839 (uint16_t)(vf->vf_res->max_vectors - 1)); 840 vf->msix_base = IAVF_RX_VEC_START; 841 vec = IAVF_RX_VEC_START; 842 for (i = 0; i < dev->data->nb_rx_queues; i++) { 843 qv_map[i].queue_id = i; 844 qv_map[i].vector_id = vec; 845 rte_intr_vec_list_index_set(intr_handle, 846 i, vec++); 847 if (vec >= vf->nb_msix + IAVF_RX_VEC_START) 848 vec = IAVF_RX_VEC_START; 849 } 850 vf->qv_map = qv_map; 851 PMD_DRV_LOG(DEBUG, 852 "%u vectors are mapping to %u Rx queues", 853 vf->nb_msix, dev->data->nb_rx_queues); 854 } 855 } 856 857 if (!vf->lv_enabled) { 858 if (iavf_config_irq_map(adapter)) { 859 PMD_DRV_LOG(ERR, "config interrupt mapping failed"); 860 goto config_irq_map_err; 861 } 862 } else { 863 uint16_t num_qv_maps = dev->data->nb_rx_queues; 864 uint16_t index = 0; 865 866 while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) { 867 if (iavf_config_irq_map_lv(adapter, 868 IAVF_IRQ_MAP_NUM_PER_BUF, index)) { 869 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); 870 goto config_irq_map_err; 871 } 872 num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF; 873 index += IAVF_IRQ_MAP_NUM_PER_BUF; 874 } 875 876 if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) { 877 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); 878 goto config_irq_map_err; 879 } 880 } 881 return 0; 882 883 config_irq_map_err: 884 rte_free(vf->qv_map); 885 vf->qv_map = NULL; 886 887 qv_map_alloc_err: 888 rte_intr_vec_list_free(intr_handle); 889 890 return -1; 891 } 892 893 static int 894 iavf_start_queues(struct rte_eth_dev *dev) 895 { 896 struct iavf_rx_queue *rxq; 897 struct iavf_tx_queue *txq; 898 int i; 899 900 for (i = 0; i < dev->data->nb_tx_queues; i++) { 901 txq = dev->data->tx_queues[i]; 902 if (txq->tx_deferred_start) 903 continue; 904 if (iavf_dev_tx_queue_start(dev, i) != 0) { 905 PMD_DRV_LOG(ERR, "Fail to start queue %u", i); 906 return -1; 907 } 908 } 909 910 for (i = 0; i < dev->data->nb_rx_queues; i++) { 911 rxq = dev->data->rx_queues[i]; 912 if (rxq->rx_deferred_start) 913 continue; 914 if (iavf_dev_rx_queue_start(dev, i) != 0) { 915 PMD_DRV_LOG(ERR, "Fail to start queue %u", i); 916 return -1; 917 } 918 } 919 920 return 0; 921 } 922 923 static int 924 iavf_dev_start(struct rte_eth_dev *dev) 925 { 926 struct iavf_adapter *adapter = 927 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 928 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 929 struct rte_intr_handle *intr_handle = dev->intr_handle; 930 uint16_t num_queue_pairs; 931 uint16_t index = 0; 932 933 PMD_INIT_FUNC_TRACE(); 934 935 adapter->stopped = 0; 936 937 vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD; 938 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, 939 dev->data->nb_tx_queues); 940 num_queue_pairs = vf->num_queue_pairs; 941 942 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) 943 if (iavf_get_qos_cap(adapter)) { 944 PMD_INIT_LOG(ERR, "Failed to get qos capability"); 945 return -1; 946 } 947 948 if (iavf_init_queues(dev) != 0) { 949 PMD_DRV_LOG(ERR, "failed to do Queue init"); 950 return -1; 951 } 952 953 /* If needed, send configure queues msg multiple times to make the 954 * adminq buffer length smaller than the 4K limitation. 955 */ 956 while (num_queue_pairs > IAVF_CFG_Q_NUM_PER_BUF) { 957 if (iavf_configure_queues(adapter, 958 IAVF_CFG_Q_NUM_PER_BUF, index) != 0) { 959 PMD_DRV_LOG(ERR, "configure queues failed"); 960 goto err_queue; 961 } 962 num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF; 963 index += IAVF_CFG_Q_NUM_PER_BUF; 964 } 965 966 if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) { 967 PMD_DRV_LOG(ERR, "configure queues failed"); 968 goto err_queue; 969 } 970 971 if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) { 972 PMD_DRV_LOG(ERR, "configure irq failed"); 973 goto err_queue; 974 } 975 /* re-enable intr again, because efd assign may change */ 976 if (dev->data->dev_conf.intr_conf.rxq != 0) { 977 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 978 rte_intr_disable(intr_handle); 979 rte_intr_enable(intr_handle); 980 } 981 982 /* Set all mac addrs */ 983 iavf_add_del_all_mac_addr(adapter, true); 984 985 /* Set all multicast addresses */ 986 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, 987 true); 988 989 if (iavf_start_queues(dev) != 0) { 990 PMD_DRV_LOG(ERR, "enable queues failed"); 991 goto err_mac; 992 } 993 994 return 0; 995 996 err_mac: 997 iavf_add_del_all_mac_addr(adapter, false); 998 err_queue: 999 return -1; 1000 } 1001 1002 static int 1003 iavf_dev_stop(struct rte_eth_dev *dev) 1004 { 1005 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 1006 struct iavf_adapter *adapter = 1007 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1008 struct rte_intr_handle *intr_handle = dev->intr_handle; 1009 1010 PMD_INIT_FUNC_TRACE(); 1011 1012 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) && 1013 dev->data->dev_conf.intr_conf.rxq != 0) 1014 rte_intr_disable(intr_handle); 1015 1016 if (adapter->stopped == 1) 1017 return 0; 1018 1019 iavf_stop_queues(dev); 1020 1021 /* Disable the interrupt for Rx */ 1022 rte_intr_efd_disable(intr_handle); 1023 /* Rx interrupt vector mapping free */ 1024 rte_intr_vec_list_free(intr_handle); 1025 1026 /* remove all mac addrs */ 1027 iavf_add_del_all_mac_addr(adapter, false); 1028 1029 /* remove all multicast addresses */ 1030 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, 1031 false); 1032 1033 /* free iAVF security device context all related resources */ 1034 iavf_security_ctx_destroy(adapter); 1035 1036 adapter->stopped = 1; 1037 dev->data->dev_started = 0; 1038 1039 return 0; 1040 } 1041 1042 static int 1043 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1044 { 1045 struct iavf_adapter *adapter = 1046 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1047 struct iavf_info *vf = &adapter->vf; 1048 1049 dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV; 1050 dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV; 1051 dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN; 1052 dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX; 1053 dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD; 1054 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 1055 dev_info->hash_key_size = vf->vf_res->rss_key_size; 1056 dev_info->reta_size = vf->vf_res->rss_lut_size; 1057 dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL; 1058 dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX; 1059 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 1060 dev_info->rx_offload_capa = 1061 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1062 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1063 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1064 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1065 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1066 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1067 RTE_ETH_RX_OFFLOAD_SCATTER | 1068 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1069 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1070 1071 dev_info->tx_offload_capa = 1072 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 1073 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | 1074 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 1075 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 1076 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 1077 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | 1078 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1079 RTE_ETH_TX_OFFLOAD_TCP_TSO | 1080 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 1081 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | 1082 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | 1083 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | 1084 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 1085 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1086 1087 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC) 1088 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; 1089 1090 if (iavf_ipsec_crypto_supported(adapter)) { 1091 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY; 1092 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY; 1093 } 1094 1095 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1096 .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH, 1097 .rx_drop_en = 0, 1098 .offloads = 0, 1099 }; 1100 1101 dev_info->default_txconf = (struct rte_eth_txconf) { 1102 .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH, 1103 .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH, 1104 .offloads = 0, 1105 }; 1106 1107 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 1108 .nb_max = IAVF_MAX_RING_DESC, 1109 .nb_min = IAVF_MIN_RING_DESC, 1110 .nb_align = IAVF_ALIGN_RING_DESC, 1111 }; 1112 1113 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 1114 .nb_max = IAVF_MAX_RING_DESC, 1115 .nb_min = IAVF_MIN_RING_DESC, 1116 .nb_align = IAVF_ALIGN_RING_DESC, 1117 }; 1118 1119 return 0; 1120 } 1121 1122 static const uint32_t * 1123 iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 1124 { 1125 static const uint32_t ptypes[] = { 1126 RTE_PTYPE_L2_ETHER, 1127 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1128 RTE_PTYPE_L4_FRAG, 1129 RTE_PTYPE_L4_ICMP, 1130 RTE_PTYPE_L4_NONFRAG, 1131 RTE_PTYPE_L4_SCTP, 1132 RTE_PTYPE_L4_TCP, 1133 RTE_PTYPE_L4_UDP, 1134 RTE_PTYPE_UNKNOWN 1135 }; 1136 return ptypes; 1137 } 1138 1139 int 1140 iavf_dev_link_update(struct rte_eth_dev *dev, 1141 __rte_unused int wait_to_complete) 1142 { 1143 struct rte_eth_link new_link; 1144 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 1145 1146 memset(&new_link, 0, sizeof(new_link)); 1147 1148 /* Only read status info stored in VF, and the info is updated 1149 * when receive LINK_CHANGE evnet from PF by Virtchnnl. 1150 */ 1151 switch (vf->link_speed) { 1152 case 10: 1153 new_link.link_speed = RTE_ETH_SPEED_NUM_10M; 1154 break; 1155 case 100: 1156 new_link.link_speed = RTE_ETH_SPEED_NUM_100M; 1157 break; 1158 case 1000: 1159 new_link.link_speed = RTE_ETH_SPEED_NUM_1G; 1160 break; 1161 case 10000: 1162 new_link.link_speed = RTE_ETH_SPEED_NUM_10G; 1163 break; 1164 case 20000: 1165 new_link.link_speed = RTE_ETH_SPEED_NUM_20G; 1166 break; 1167 case 25000: 1168 new_link.link_speed = RTE_ETH_SPEED_NUM_25G; 1169 break; 1170 case 40000: 1171 new_link.link_speed = RTE_ETH_SPEED_NUM_40G; 1172 break; 1173 case 50000: 1174 new_link.link_speed = RTE_ETH_SPEED_NUM_50G; 1175 break; 1176 case 100000: 1177 new_link.link_speed = RTE_ETH_SPEED_NUM_100G; 1178 break; 1179 default: 1180 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1181 break; 1182 } 1183 1184 new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1185 new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP : 1186 RTE_ETH_LINK_DOWN; 1187 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 1188 RTE_ETH_LINK_SPEED_FIXED); 1189 1190 return rte_eth_linkstatus_set(dev, &new_link); 1191 } 1192 1193 static int 1194 iavf_dev_promiscuous_enable(struct rte_eth_dev *dev) 1195 { 1196 struct iavf_adapter *adapter = 1197 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1198 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1199 1200 return iavf_config_promisc(adapter, 1201 true, vf->promisc_multicast_enabled); 1202 } 1203 1204 static int 1205 iavf_dev_promiscuous_disable(struct rte_eth_dev *dev) 1206 { 1207 struct iavf_adapter *adapter = 1208 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1209 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1210 1211 return iavf_config_promisc(adapter, 1212 false, vf->promisc_multicast_enabled); 1213 } 1214 1215 static int 1216 iavf_dev_allmulticast_enable(struct rte_eth_dev *dev) 1217 { 1218 struct iavf_adapter *adapter = 1219 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1220 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1221 1222 return iavf_config_promisc(adapter, 1223 vf->promisc_unicast_enabled, true); 1224 } 1225 1226 static int 1227 iavf_dev_allmulticast_disable(struct rte_eth_dev *dev) 1228 { 1229 struct iavf_adapter *adapter = 1230 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1231 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1232 1233 return iavf_config_promisc(adapter, 1234 vf->promisc_unicast_enabled, false); 1235 } 1236 1237 static int 1238 iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr, 1239 __rte_unused uint32_t index, 1240 __rte_unused uint32_t pool) 1241 { 1242 struct iavf_adapter *adapter = 1243 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1244 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1245 int err; 1246 1247 if (rte_is_zero_ether_addr(addr)) { 1248 PMD_DRV_LOG(ERR, "Invalid Ethernet Address"); 1249 return -EINVAL; 1250 } 1251 1252 err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA); 1253 if (err) { 1254 PMD_DRV_LOG(ERR, "fail to add MAC address"); 1255 return -EIO; 1256 } 1257 1258 vf->mac_num++; 1259 1260 return 0; 1261 } 1262 1263 static void 1264 iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) 1265 { 1266 struct iavf_adapter *adapter = 1267 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1268 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1269 struct rte_ether_addr *addr; 1270 int err; 1271 1272 addr = &dev->data->mac_addrs[index]; 1273 1274 err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA); 1275 if (err) 1276 PMD_DRV_LOG(ERR, "fail to delete MAC address"); 1277 1278 vf->mac_num--; 1279 } 1280 1281 static int 1282 iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1283 { 1284 struct iavf_adapter *adapter = 1285 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1286 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1287 int err; 1288 1289 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { 1290 err = iavf_add_del_vlan_v2(adapter, vlan_id, on); 1291 if (err) 1292 return -EIO; 1293 return 0; 1294 } 1295 1296 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 1297 return -ENOTSUP; 1298 1299 err = iavf_add_del_vlan(adapter, vlan_id, on); 1300 if (err) 1301 return -EIO; 1302 return 0; 1303 } 1304 1305 static void 1306 iavf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable) 1307 { 1308 struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf; 1309 struct iavf_adapter *adapter = 1310 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1311 uint32_t i, j; 1312 uint64_t ids; 1313 1314 for (i = 0; i < RTE_DIM(vfc->ids); i++) { 1315 if (vfc->ids[i] == 0) 1316 continue; 1317 1318 ids = vfc->ids[i]; 1319 for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) { 1320 if (ids & 1) 1321 iavf_add_del_vlan_v2(adapter, 1322 64 * i + j, enable); 1323 } 1324 } 1325 } 1326 1327 static int 1328 iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask) 1329 { 1330 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 1331 struct iavf_adapter *adapter = 1332 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1333 bool enable; 1334 int err; 1335 1336 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 1337 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 1338 1339 iavf_iterate_vlan_filters_v2(dev, enable); 1340 } 1341 1342 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 1343 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 1344 1345 err = iavf_config_vlan_strip_v2(adapter, enable); 1346 /* If not support, the stripping is already disabled by PF */ 1347 if (err == -ENOTSUP && !enable) 1348 err = 0; 1349 if (err) 1350 return -EIO; 1351 } 1352 1353 return 0; 1354 } 1355 1356 static int 1357 iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1358 { 1359 struct iavf_adapter *adapter = 1360 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1361 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1362 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 1363 int err; 1364 1365 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) 1366 return iavf_dev_vlan_offload_set_v2(dev, mask); 1367 1368 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 1369 return -ENOTSUP; 1370 1371 /* Vlan stripping setting */ 1372 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 1373 /* Enable or disable VLAN stripping */ 1374 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1375 err = iavf_enable_vlan_strip(adapter); 1376 else 1377 err = iavf_disable_vlan_strip(adapter); 1378 1379 if (err) 1380 return -EIO; 1381 } 1382 return 0; 1383 } 1384 1385 static int 1386 iavf_dev_rss_reta_update(struct rte_eth_dev *dev, 1387 struct rte_eth_rss_reta_entry64 *reta_conf, 1388 uint16_t reta_size) 1389 { 1390 struct iavf_adapter *adapter = 1391 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1392 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1393 uint8_t *lut; 1394 uint16_t i, idx, shift; 1395 int ret; 1396 1397 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) 1398 return -ENOTSUP; 1399 1400 if (reta_size != vf->vf_res->rss_lut_size) { 1401 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 1402 "(%d) doesn't match the number of hardware can " 1403 "support (%d)", reta_size, vf->vf_res->rss_lut_size); 1404 return -EINVAL; 1405 } 1406 1407 lut = rte_zmalloc("rss_lut", reta_size, 0); 1408 if (!lut) { 1409 PMD_DRV_LOG(ERR, "No memory can be allocated"); 1410 return -ENOMEM; 1411 } 1412 /* store the old lut table temporarily */ 1413 rte_memcpy(lut, vf->rss_lut, reta_size); 1414 1415 for (i = 0; i < reta_size; i++) { 1416 idx = i / RTE_ETH_RETA_GROUP_SIZE; 1417 shift = i % RTE_ETH_RETA_GROUP_SIZE; 1418 if (reta_conf[idx].mask & (1ULL << shift)) 1419 lut[i] = reta_conf[idx].reta[shift]; 1420 } 1421 1422 rte_memcpy(vf->rss_lut, lut, reta_size); 1423 /* send virtchnl ops to configure RSS */ 1424 ret = iavf_configure_rss_lut(adapter); 1425 if (ret) /* revert back */ 1426 rte_memcpy(vf->rss_lut, lut, reta_size); 1427 rte_free(lut); 1428 1429 return ret; 1430 } 1431 1432 static int 1433 iavf_dev_rss_reta_query(struct rte_eth_dev *dev, 1434 struct rte_eth_rss_reta_entry64 *reta_conf, 1435 uint16_t reta_size) 1436 { 1437 struct iavf_adapter *adapter = 1438 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1439 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1440 uint16_t i, idx, shift; 1441 1442 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) 1443 return -ENOTSUP; 1444 1445 if (reta_size != vf->vf_res->rss_lut_size) { 1446 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 1447 "(%d) doesn't match the number of hardware can " 1448 "support (%d)", reta_size, vf->vf_res->rss_lut_size); 1449 return -EINVAL; 1450 } 1451 1452 for (i = 0; i < reta_size; i++) { 1453 idx = i / RTE_ETH_RETA_GROUP_SIZE; 1454 shift = i % RTE_ETH_RETA_GROUP_SIZE; 1455 if (reta_conf[idx].mask & (1ULL << shift)) 1456 reta_conf[idx].reta[shift] = vf->rss_lut[i]; 1457 } 1458 1459 return 0; 1460 } 1461 1462 static int 1463 iavf_set_rss_key(struct iavf_adapter *adapter, uint8_t *key, uint8_t key_len) 1464 { 1465 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1466 1467 /* HENA setting, it is enabled by default, no change */ 1468 if (!key || key_len == 0) { 1469 PMD_DRV_LOG(DEBUG, "No key to be configured"); 1470 return 0; 1471 } else if (key_len != vf->vf_res->rss_key_size) { 1472 PMD_DRV_LOG(ERR, "The size of hash key configured " 1473 "(%d) doesn't match the size of hardware can " 1474 "support (%d)", key_len, 1475 vf->vf_res->rss_key_size); 1476 return -EINVAL; 1477 } 1478 1479 rte_memcpy(vf->rss_key, key, key_len); 1480 1481 return iavf_configure_rss_key(adapter); 1482 } 1483 1484 static int 1485 iavf_dev_rss_hash_update(struct rte_eth_dev *dev, 1486 struct rte_eth_rss_conf *rss_conf) 1487 { 1488 struct iavf_adapter *adapter = 1489 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1490 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1491 int ret; 1492 1493 adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf; 1494 1495 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) 1496 return -ENOTSUP; 1497 1498 /* Set hash key. */ 1499 ret = iavf_set_rss_key(adapter, rss_conf->rss_key, 1500 rss_conf->rss_key_len); 1501 if (ret) 1502 return ret; 1503 1504 if (rss_conf->rss_hf == 0) { 1505 vf->rss_hf = 0; 1506 ret = iavf_set_hena(adapter, 0); 1507 1508 /* It is a workaround, temporarily allow error to be returned 1509 * due to possible lack of PF handling for hena = 0. 1510 */ 1511 if (ret) 1512 PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support"); 1513 return 0; 1514 } 1515 1516 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) { 1517 /* Clear existing RSS. */ 1518 ret = iavf_set_hena(adapter, 0); 1519 1520 /* It is a workaround, temporarily allow error to be returned 1521 * due to possible lack of PF handling for hena = 0. 1522 */ 1523 if (ret) 1524 PMD_DRV_LOG(WARNING, "fail to clean existing RSS," 1525 "lack PF support"); 1526 1527 /* Set new RSS configuration. */ 1528 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true); 1529 if (ret) { 1530 PMD_DRV_LOG(ERR, "fail to set new RSS"); 1531 return ret; 1532 } 1533 } else { 1534 iavf_config_rss_hf(adapter, rss_conf->rss_hf); 1535 } 1536 1537 return 0; 1538 } 1539 1540 static int 1541 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1542 struct rte_eth_rss_conf *rss_conf) 1543 { 1544 struct iavf_adapter *adapter = 1545 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1546 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1547 1548 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) 1549 return -ENOTSUP; 1550 1551 rss_conf->rss_hf = vf->rss_hf; 1552 1553 if (!rss_conf->rss_key) 1554 return 0; 1555 1556 rss_conf->rss_key_len = vf->vf_res->rss_key_size; 1557 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len); 1558 1559 return 0; 1560 } 1561 1562 static int 1563 iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused) 1564 { 1565 /* mtu setting is forbidden if port is start */ 1566 if (dev->data->dev_started) { 1567 PMD_DRV_LOG(ERR, "port must be stopped before configuration"); 1568 return -EBUSY; 1569 } 1570 1571 return 0; 1572 } 1573 1574 static int 1575 iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, 1576 struct rte_ether_addr *mac_addr) 1577 { 1578 struct iavf_adapter *adapter = 1579 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1580 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); 1581 struct rte_ether_addr *old_addr; 1582 int ret; 1583 1584 old_addr = (struct rte_ether_addr *)hw->mac.addr; 1585 1586 if (rte_is_same_ether_addr(old_addr, mac_addr)) 1587 return 0; 1588 1589 ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY); 1590 if (ret) 1591 PMD_DRV_LOG(ERR, "Fail to delete old MAC:" 1592 RTE_ETHER_ADDR_PRT_FMT, 1593 RTE_ETHER_ADDR_BYTES(old_addr)); 1594 1595 ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY); 1596 if (ret) 1597 PMD_DRV_LOG(ERR, "Fail to add new MAC:" 1598 RTE_ETHER_ADDR_PRT_FMT, 1599 RTE_ETHER_ADDR_BYTES(mac_addr)); 1600 1601 if (ret) 1602 return -EIO; 1603 1604 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr); 1605 return 0; 1606 } 1607 1608 static void 1609 iavf_stat_update_48(uint64_t *offset, uint64_t *stat) 1610 { 1611 if (*stat >= *offset) 1612 *stat = *stat - *offset; 1613 else 1614 *stat = (uint64_t)((*stat + 1615 ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset); 1616 1617 *stat &= IAVF_48_BIT_MASK; 1618 } 1619 1620 static void 1621 iavf_stat_update_32(uint64_t *offset, uint64_t *stat) 1622 { 1623 if (*stat >= *offset) 1624 *stat = (uint64_t)(*stat - *offset); 1625 else 1626 *stat = (uint64_t)((*stat + 1627 ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset); 1628 } 1629 1630 static void 1631 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes) 1632 { 1633 struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats; 1634 1635 iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes); 1636 iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast); 1637 iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast); 1638 iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast); 1639 iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards); 1640 iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes); 1641 iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast); 1642 iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast); 1643 iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast); 1644 iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors); 1645 iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards); 1646 } 1647 1648 static int 1649 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1650 { 1651 struct iavf_adapter *adapter = 1652 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1653 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 1654 struct iavf_vsi *vsi = &vf->vsi; 1655 struct virtchnl_eth_stats *pstats = NULL; 1656 int ret; 1657 1658 ret = iavf_query_stats(adapter, &pstats); 1659 if (ret == 0) { 1660 uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads & 1661 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 : 1662 RTE_ETHER_CRC_LEN; 1663 iavf_update_stats(vsi, pstats); 1664 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + 1665 pstats->rx_broadcast - pstats->rx_discards; 1666 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + 1667 pstats->tx_unicast; 1668 stats->imissed = pstats->rx_discards; 1669 stats->oerrors = pstats->tx_errors + pstats->tx_discards; 1670 stats->ibytes = pstats->rx_bytes; 1671 stats->ibytes -= stats->ipackets * crc_stats_len; 1672 stats->obytes = pstats->tx_bytes; 1673 } else { 1674 PMD_DRV_LOG(ERR, "Get statistics failed"); 1675 } 1676 return ret; 1677 } 1678 1679 static int 1680 iavf_dev_stats_reset(struct rte_eth_dev *dev) 1681 { 1682 int ret; 1683 struct iavf_adapter *adapter = 1684 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1685 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 1686 struct iavf_vsi *vsi = &vf->vsi; 1687 struct virtchnl_eth_stats *pstats = NULL; 1688 1689 /* read stat values to clear hardware registers */ 1690 ret = iavf_query_stats(adapter, &pstats); 1691 if (ret != 0) 1692 return ret; 1693 1694 /* set stats offset base on current values */ 1695 vsi->eth_stats_offset.eth_stats = *pstats; 1696 1697 return 0; 1698 } 1699 1700 static int 1701 iavf_dev_xstats_reset(struct rte_eth_dev *dev) 1702 { 1703 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 1704 iavf_dev_stats_reset(dev); 1705 memset(&vf->vsi.eth_stats_offset.ips_stats, 0, 1706 sizeof(struct iavf_ipsec_crypto_stats)); 1707 return 0; 1708 } 1709 1710 static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1711 struct rte_eth_xstat_name *xstats_names, 1712 __rte_unused unsigned int limit) 1713 { 1714 unsigned int i; 1715 1716 if (xstats_names != NULL) 1717 for (i = 0; i < IAVF_NB_XSTATS; i++) { 1718 snprintf(xstats_names[i].name, 1719 sizeof(xstats_names[i].name), 1720 "%s", rte_iavf_stats_strings[i].name); 1721 } 1722 return IAVF_NB_XSTATS; 1723 } 1724 1725 static void 1726 iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev, 1727 struct iavf_ipsec_crypto_stats *ips) 1728 { 1729 uint16_t idx; 1730 for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) { 1731 struct iavf_rx_queue *rxq; 1732 struct iavf_ipsec_crypto_stats *stats; 1733 rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx]; 1734 stats = &rxq->stats.ipsec_crypto; 1735 ips->icount += stats->icount; 1736 ips->ibytes += stats->ibytes; 1737 ips->ierrors.count += stats->ierrors.count; 1738 ips->ierrors.sad_miss += stats->ierrors.sad_miss; 1739 ips->ierrors.not_processed += stats->ierrors.not_processed; 1740 ips->ierrors.icv_check += stats->ierrors.icv_check; 1741 ips->ierrors.ipsec_length += stats->ierrors.ipsec_length; 1742 ips->ierrors.misc += stats->ierrors.misc; 1743 } 1744 } 1745 1746 static int iavf_dev_xstats_get(struct rte_eth_dev *dev, 1747 struct rte_eth_xstat *xstats, unsigned int n) 1748 { 1749 int ret; 1750 unsigned int i; 1751 struct iavf_adapter *adapter = 1752 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1753 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 1754 struct iavf_vsi *vsi = &vf->vsi; 1755 struct virtchnl_eth_stats *pstats = NULL; 1756 struct iavf_eth_xstats iavf_xtats = {{0}}; 1757 1758 if (n < IAVF_NB_XSTATS) 1759 return IAVF_NB_XSTATS; 1760 1761 ret = iavf_query_stats(adapter, &pstats); 1762 if (ret != 0) 1763 return 0; 1764 1765 if (!xstats) 1766 return 0; 1767 1768 iavf_update_stats(vsi, pstats); 1769 iavf_xtats.eth_stats = *pstats; 1770 1771 if (iavf_ipsec_crypto_supported(adapter)) 1772 iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats); 1773 1774 /* loop over xstats array and values from pstats */ 1775 for (i = 0; i < IAVF_NB_XSTATS; i++) { 1776 xstats[i].id = i; 1777 xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) + 1778 rte_iavf_stats_strings[i].offset); 1779 } 1780 1781 return IAVF_NB_XSTATS; 1782 } 1783 1784 1785 static int 1786 iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 1787 { 1788 struct iavf_adapter *adapter = 1789 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1790 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1791 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); 1792 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 1793 uint16_t msix_intr; 1794 1795 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle, 1796 queue_id); 1797 if (msix_intr == IAVF_MISC_VEC_ID) { 1798 PMD_DRV_LOG(INFO, "MISC is also enabled for control"); 1799 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, 1800 IAVF_VFINT_DYN_CTL01_INTENA_MASK | 1801 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK | 1802 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 1803 } else { 1804 IAVF_WRITE_REG(hw, 1805 IAVF_VFINT_DYN_CTLN1 1806 (msix_intr - IAVF_RX_VEC_START), 1807 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 1808 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK | 1809 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 1810 } 1811 1812 IAVF_WRITE_FLUSH(hw); 1813 1814 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1815 rte_intr_ack(pci_dev->intr_handle); 1816 1817 return 0; 1818 } 1819 1820 static int 1821 iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 1822 { 1823 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1824 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1825 uint16_t msix_intr; 1826 1827 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle, 1828 queue_id); 1829 if (msix_intr == IAVF_MISC_VEC_ID) { 1830 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it"); 1831 return -EIO; 1832 } 1833 1834 IAVF_WRITE_REG(hw, 1835 IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START), 1836 0); 1837 1838 IAVF_WRITE_FLUSH(hw); 1839 return 0; 1840 } 1841 1842 static int 1843 iavf_check_vf_reset_done(struct iavf_hw *hw) 1844 { 1845 int i, reset; 1846 1847 for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) { 1848 reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) & 1849 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 1850 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT; 1851 if (reset == VIRTCHNL_VFR_VFACTIVE || 1852 reset == VIRTCHNL_VFR_COMPLETED) 1853 break; 1854 rte_delay_ms(20); 1855 } 1856 1857 if (i >= IAVF_RESET_WAIT_CNT) 1858 return -1; 1859 1860 return 0; 1861 } 1862 1863 static int 1864 iavf_lookup_proto_xtr_type(const char *flex_name) 1865 { 1866 static struct { 1867 const char *name; 1868 enum iavf_proto_xtr_type type; 1869 } xtr_type_map[] = { 1870 { "vlan", IAVF_PROTO_XTR_VLAN }, 1871 { "ipv4", IAVF_PROTO_XTR_IPV4 }, 1872 { "ipv6", IAVF_PROTO_XTR_IPV6 }, 1873 { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW }, 1874 { "tcp", IAVF_PROTO_XTR_TCP }, 1875 { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET }, 1876 { "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID }, 1877 }; 1878 uint32_t i; 1879 1880 for (i = 0; i < RTE_DIM(xtr_type_map); i++) { 1881 if (strcmp(flex_name, xtr_type_map[i].name) == 0) 1882 return xtr_type_map[i].type; 1883 } 1884 1885 PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: " 1886 "vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said"); 1887 1888 return -1; 1889 } 1890 1891 /** 1892 * Parse elem, the elem could be single number/range or '(' ')' group 1893 * 1) A single number elem, it's just a simple digit. e.g. 9 1894 * 2) A single range elem, two digits with a '-' between. e.g. 2-6 1895 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6) 1896 * Within group elem, '-' used for a range separator; 1897 * ',' used for a single number. 1898 */ 1899 static int 1900 iavf_parse_queue_set(const char *input, int xtr_type, 1901 struct iavf_devargs *devargs) 1902 { 1903 const char *str = input; 1904 char *end = NULL; 1905 uint32_t min, max; 1906 uint32_t idx; 1907 1908 while (isblank(*str)) 1909 str++; 1910 1911 if (!isdigit(*str) && *str != '(') 1912 return -1; 1913 1914 /* process single number or single range of number */ 1915 if (*str != '(') { 1916 errno = 0; 1917 idx = strtoul(str, &end, 10); 1918 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM) 1919 return -1; 1920 1921 while (isblank(*end)) 1922 end++; 1923 1924 min = idx; 1925 max = idx; 1926 1927 /* process single <number>-<number> */ 1928 if (*end == '-') { 1929 end++; 1930 while (isblank(*end)) 1931 end++; 1932 if (!isdigit(*end)) 1933 return -1; 1934 1935 errno = 0; 1936 idx = strtoul(end, &end, 10); 1937 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM) 1938 return -1; 1939 1940 max = idx; 1941 while (isblank(*end)) 1942 end++; 1943 } 1944 1945 if (*end != ':') 1946 return -1; 1947 1948 for (idx = RTE_MIN(min, max); 1949 idx <= RTE_MAX(min, max); idx++) 1950 devargs->proto_xtr[idx] = xtr_type; 1951 1952 return 0; 1953 } 1954 1955 /* process set within bracket */ 1956 str++; 1957 while (isblank(*str)) 1958 str++; 1959 if (*str == '\0') 1960 return -1; 1961 1962 min = IAVF_MAX_QUEUE_NUM; 1963 do { 1964 /* go ahead to the first digit */ 1965 while (isblank(*str)) 1966 str++; 1967 if (!isdigit(*str)) 1968 return -1; 1969 1970 /* get the digit value */ 1971 errno = 0; 1972 idx = strtoul(str, &end, 10); 1973 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM) 1974 return -1; 1975 1976 /* go ahead to separator '-',',' and ')' */ 1977 while (isblank(*end)) 1978 end++; 1979 if (*end == '-') { 1980 if (min == IAVF_MAX_QUEUE_NUM) 1981 min = idx; 1982 else /* avoid continuous '-' */ 1983 return -1; 1984 } else if (*end == ',' || *end == ')') { 1985 max = idx; 1986 if (min == IAVF_MAX_QUEUE_NUM) 1987 min = idx; 1988 1989 for (idx = RTE_MIN(min, max); 1990 idx <= RTE_MAX(min, max); idx++) 1991 devargs->proto_xtr[idx] = xtr_type; 1992 1993 min = IAVF_MAX_QUEUE_NUM; 1994 } else { 1995 return -1; 1996 } 1997 1998 str = end + 1; 1999 } while (*end != ')' && *end != '\0'); 2000 2001 return 0; 2002 } 2003 2004 static int 2005 iavf_parse_queue_proto_xtr(const char *queues, struct iavf_devargs *devargs) 2006 { 2007 const char *queue_start; 2008 uint32_t idx; 2009 int xtr_type; 2010 char flex_name[32]; 2011 2012 while (isblank(*queues)) 2013 queues++; 2014 2015 if (*queues != '[') { 2016 xtr_type = iavf_lookup_proto_xtr_type(queues); 2017 if (xtr_type < 0) 2018 return -1; 2019 2020 devargs->proto_xtr_dflt = xtr_type; 2021 2022 return 0; 2023 } 2024 2025 queues++; 2026 do { 2027 while (isblank(*queues)) 2028 queues++; 2029 if (*queues == '\0') 2030 return -1; 2031 2032 queue_start = queues; 2033 2034 /* go across a complete bracket */ 2035 if (*queue_start == '(') { 2036 queues += strcspn(queues, ")"); 2037 if (*queues != ')') 2038 return -1; 2039 } 2040 2041 /* scan the separator ':' */ 2042 queues += strcspn(queues, ":"); 2043 if (*queues++ != ':') 2044 return -1; 2045 while (isblank(*queues)) 2046 queues++; 2047 2048 for (idx = 0; ; idx++) { 2049 if (isblank(queues[idx]) || 2050 queues[idx] == ',' || 2051 queues[idx] == ']' || 2052 queues[idx] == '\0') 2053 break; 2054 2055 if (idx > sizeof(flex_name) - 2) 2056 return -1; 2057 2058 flex_name[idx] = queues[idx]; 2059 } 2060 flex_name[idx] = '\0'; 2061 xtr_type = iavf_lookup_proto_xtr_type(flex_name); 2062 if (xtr_type < 0) 2063 return -1; 2064 2065 queues += idx; 2066 2067 while (isblank(*queues) || *queues == ',' || *queues == ']') 2068 queues++; 2069 2070 if (iavf_parse_queue_set(queue_start, xtr_type, devargs) < 0) 2071 return -1; 2072 } while (*queues != '\0'); 2073 2074 return 0; 2075 } 2076 2077 static int 2078 iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value, 2079 void *extra_args) 2080 { 2081 struct iavf_devargs *devargs = extra_args; 2082 2083 if (!value || !extra_args) 2084 return -EINVAL; 2085 2086 if (iavf_parse_queue_proto_xtr(value, devargs) < 0) { 2087 PMD_DRV_LOG(ERR, "the proto_xtr's parameter is wrong : '%s'", 2088 value); 2089 return -1; 2090 } 2091 2092 return 0; 2093 } 2094 2095 static int iavf_parse_devargs(struct rte_eth_dev *dev) 2096 { 2097 struct iavf_adapter *ad = 2098 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2099 struct rte_devargs *devargs = dev->device->devargs; 2100 struct rte_kvargs *kvlist; 2101 int ret; 2102 2103 if (!devargs) 2104 return 0; 2105 2106 kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args); 2107 if (!kvlist) { 2108 PMD_INIT_LOG(ERR, "invalid kvargs key\n"); 2109 return -EINVAL; 2110 } 2111 2112 ad->devargs.proto_xtr_dflt = IAVF_PROTO_XTR_NONE; 2113 memset(ad->devargs.proto_xtr, IAVF_PROTO_XTR_NONE, 2114 sizeof(ad->devargs.proto_xtr)); 2115 2116 ret = rte_kvargs_process(kvlist, IAVF_PROTO_XTR_ARG, 2117 &iavf_handle_proto_xtr_arg, &ad->devargs); 2118 if (ret) 2119 goto bail; 2120 2121 bail: 2122 rte_kvargs_free(kvlist); 2123 return ret; 2124 } 2125 2126 static void 2127 iavf_init_proto_xtr(struct rte_eth_dev *dev) 2128 { 2129 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 2130 struct iavf_adapter *ad = 2131 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2132 const struct iavf_proto_xtr_ol *xtr_ol; 2133 bool proto_xtr_enable = false; 2134 int offset; 2135 uint16_t i; 2136 2137 vf->proto_xtr = rte_zmalloc("vf proto xtr", 2138 vf->vsi_res->num_queue_pairs, 0); 2139 if (unlikely(!(vf->proto_xtr))) { 2140 PMD_DRV_LOG(ERR, "no memory for setting up proto_xtr's table"); 2141 return; 2142 } 2143 2144 for (i = 0; i < vf->vsi_res->num_queue_pairs; i++) { 2145 vf->proto_xtr[i] = ad->devargs.proto_xtr[i] != 2146 IAVF_PROTO_XTR_NONE ? 2147 ad->devargs.proto_xtr[i] : 2148 ad->devargs.proto_xtr_dflt; 2149 2150 if (vf->proto_xtr[i] != IAVF_PROTO_XTR_NONE) { 2151 uint8_t type = vf->proto_xtr[i]; 2152 2153 iavf_proto_xtr_params[type].required = true; 2154 proto_xtr_enable = true; 2155 } 2156 } 2157 2158 if (likely(!proto_xtr_enable)) 2159 return; 2160 2161 offset = rte_mbuf_dynfield_register(&iavf_proto_xtr_metadata_param); 2162 if (unlikely(offset == -1)) { 2163 PMD_DRV_LOG(ERR, 2164 "failed to extract protocol metadata, error %d", 2165 -rte_errno); 2166 return; 2167 } 2168 2169 PMD_DRV_LOG(DEBUG, 2170 "proto_xtr metadata offset in mbuf is : %d", 2171 offset); 2172 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = offset; 2173 2174 for (i = 0; i < RTE_DIM(iavf_proto_xtr_params); i++) { 2175 xtr_ol = &iavf_proto_xtr_params[i]; 2176 2177 uint8_t rxdid = iavf_proto_xtr_type_to_rxdid((uint8_t)i); 2178 2179 if (!xtr_ol->required) 2180 continue; 2181 2182 if (!(vf->supported_rxdid & BIT(rxdid))) { 2183 PMD_DRV_LOG(ERR, 2184 "rxdid[%u] is not supported in hardware", 2185 rxdid); 2186 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1; 2187 break; 2188 } 2189 2190 offset = rte_mbuf_dynflag_register(&xtr_ol->param); 2191 if (unlikely(offset == -1)) { 2192 PMD_DRV_LOG(ERR, 2193 "failed to register proto_xtr offload '%s', error %d", 2194 xtr_ol->param.name, -rte_errno); 2195 2196 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1; 2197 break; 2198 } 2199 2200 PMD_DRV_LOG(DEBUG, 2201 "proto_xtr offload '%s' offset in mbuf is : %d", 2202 xtr_ol->param.name, offset); 2203 *xtr_ol->ol_flag = 1ULL << offset; 2204 } 2205 } 2206 2207 static int 2208 iavf_init_vf(struct rte_eth_dev *dev) 2209 { 2210 int err, bufsz; 2211 struct iavf_adapter *adapter = 2212 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2213 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2214 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 2215 2216 vf->eth_dev = dev; 2217 2218 err = iavf_parse_devargs(dev); 2219 if (err) { 2220 PMD_INIT_LOG(ERR, "Failed to parse devargs"); 2221 goto err; 2222 } 2223 2224 err = iavf_set_mac_type(hw); 2225 if (err) { 2226 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err); 2227 goto err; 2228 } 2229 2230 err = iavf_check_vf_reset_done(hw); 2231 if (err) { 2232 PMD_INIT_LOG(ERR, "VF is still resetting"); 2233 goto err; 2234 } 2235 2236 iavf_init_adminq_parameter(hw); 2237 err = iavf_init_adminq(hw); 2238 if (err) { 2239 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err); 2240 goto err; 2241 } 2242 2243 vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0); 2244 if (!vf->aq_resp) { 2245 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory"); 2246 goto err_aq; 2247 } 2248 if (iavf_check_api_version(adapter) != 0) { 2249 PMD_INIT_LOG(ERR, "check_api version failed"); 2250 goto err_api; 2251 } 2252 2253 bufsz = sizeof(struct virtchnl_vf_resource) + 2254 (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); 2255 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0); 2256 if (!vf->vf_res) { 2257 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); 2258 goto err_api; 2259 } 2260 2261 if (iavf_get_vf_resource(adapter) != 0) { 2262 PMD_INIT_LOG(ERR, "iavf_get_vf_config failed"); 2263 goto err_alloc; 2264 } 2265 /* Allocate memort for RSS info */ 2266 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 2267 vf->rss_key = rte_zmalloc("rss_key", 2268 vf->vf_res->rss_key_size, 0); 2269 if (!vf->rss_key) { 2270 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory"); 2271 goto err_rss; 2272 } 2273 vf->rss_lut = rte_zmalloc("rss_lut", 2274 vf->vf_res->rss_lut_size, 0); 2275 if (!vf->rss_lut) { 2276 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory"); 2277 goto err_rss; 2278 } 2279 } 2280 2281 if (vf->vsi_res->num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) 2282 vf->lv_enabled = true; 2283 2284 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) { 2285 if (iavf_get_supported_rxdid(adapter) != 0) { 2286 PMD_INIT_LOG(ERR, "failed to do get supported rxdid"); 2287 goto err_rss; 2288 } 2289 } 2290 2291 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { 2292 if (iavf_get_vlan_offload_caps_v2(adapter) != 0) { 2293 PMD_INIT_LOG(ERR, "failed to do get VLAN offload v2 capabilities"); 2294 goto err_rss; 2295 } 2296 } 2297 2298 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) { 2299 bufsz = sizeof(struct virtchnl_qos_cap_list) + 2300 IAVF_MAX_TRAFFIC_CLASS * 2301 sizeof(struct virtchnl_qos_cap_elem); 2302 vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0); 2303 if (!vf->qos_cap) { 2304 PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory"); 2305 goto err_rss; 2306 } 2307 iavf_tm_conf_init(dev); 2308 } 2309 2310 iavf_init_proto_xtr(dev); 2311 2312 return 0; 2313 err_rss: 2314 rte_free(vf->rss_key); 2315 rte_free(vf->rss_lut); 2316 err_alloc: 2317 rte_free(vf->qos_cap); 2318 rte_free(vf->vf_res); 2319 vf->vsi_res = NULL; 2320 err_api: 2321 rte_free(vf->aq_resp); 2322 err_aq: 2323 iavf_shutdown_adminq(hw); 2324 err: 2325 return -1; 2326 } 2327 2328 static void 2329 iavf_uninit_vf(struct rte_eth_dev *dev) 2330 { 2331 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2332 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 2333 2334 iavf_shutdown_adminq(hw); 2335 2336 rte_free(vf->vf_res); 2337 vf->vsi_res = NULL; 2338 vf->vf_res = NULL; 2339 2340 rte_free(vf->aq_resp); 2341 vf->aq_resp = NULL; 2342 2343 rte_free(vf->qos_cap); 2344 vf->qos_cap = NULL; 2345 2346 rte_free(vf->rss_lut); 2347 vf->rss_lut = NULL; 2348 rte_free(vf->rss_key); 2349 vf->rss_key = NULL; 2350 } 2351 2352 /* Enable default admin queue interrupt setting */ 2353 static inline void 2354 iavf_enable_irq0(struct iavf_hw *hw) 2355 { 2356 /* Enable admin queue interrupt trigger */ 2357 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 2358 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 2359 2360 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, 2361 IAVF_VFINT_DYN_CTL01_INTENA_MASK | 2362 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK | 2363 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 2364 2365 IAVF_WRITE_FLUSH(hw); 2366 } 2367 2368 static inline void 2369 iavf_disable_irq0(struct iavf_hw *hw) 2370 { 2371 /* Disable all interrupt types */ 2372 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0); 2373 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, 2374 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 2375 IAVF_WRITE_FLUSH(hw); 2376 } 2377 2378 static void 2379 iavf_dev_interrupt_handler(void *param) 2380 { 2381 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2382 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2383 2384 iavf_disable_irq0(hw); 2385 2386 iavf_handle_virtchnl_msg(dev); 2387 2388 iavf_enable_irq0(hw); 2389 } 2390 2391 void 2392 iavf_dev_alarm_handler(void *param) 2393 { 2394 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2395 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2396 uint32_t icr0; 2397 2398 iavf_disable_irq0(hw); 2399 2400 /* read out interrupt causes */ 2401 icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01); 2402 2403 if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) { 2404 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported"); 2405 iavf_handle_virtchnl_msg(dev); 2406 } 2407 2408 iavf_enable_irq0(hw); 2409 2410 rte_eal_alarm_set(IAVF_ALARM_INTERVAL, 2411 iavf_dev_alarm_handler, dev); 2412 } 2413 2414 static int 2415 iavf_dev_flow_ops_get(struct rte_eth_dev *dev, 2416 const struct rte_flow_ops **ops) 2417 { 2418 if (!dev) 2419 return -EINVAL; 2420 2421 *ops = &iavf_flow_ops; 2422 return 0; 2423 } 2424 2425 static void 2426 iavf_default_rss_disable(struct iavf_adapter *adapter) 2427 { 2428 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 2429 int ret = 0; 2430 2431 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 2432 /* Set hena = 0 to ask PF to cleanup all existing RSS. */ 2433 ret = iavf_set_hena(adapter, 0); 2434 if (ret) 2435 /* It is a workaround, temporarily allow error to be 2436 * returned due to possible lack of PF handling for 2437 * hena = 0. 2438 */ 2439 PMD_INIT_LOG(WARNING, "fail to disable default RSS," 2440 "lack PF support"); 2441 } 2442 } 2443 2444 static int 2445 iavf_dev_init(struct rte_eth_dev *eth_dev) 2446 { 2447 struct iavf_adapter *adapter = 2448 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); 2449 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); 2450 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); 2451 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2452 int ret = 0; 2453 2454 PMD_INIT_FUNC_TRACE(); 2455 2456 /* assign ops func pointer */ 2457 eth_dev->dev_ops = &iavf_eth_dev_ops; 2458 eth_dev->rx_queue_count = iavf_dev_rxq_count; 2459 eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status; 2460 eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status; 2461 eth_dev->rx_pkt_burst = &iavf_recv_pkts; 2462 eth_dev->tx_pkt_burst = &iavf_xmit_pkts; 2463 eth_dev->tx_pkt_prepare = &iavf_prep_pkts; 2464 2465 /* For secondary processes, we don't initialise any further as primary 2466 * has already done this work. Only check if we need a different RX 2467 * and TX function. 2468 */ 2469 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2470 iavf_set_rx_function(eth_dev); 2471 iavf_set_tx_function(eth_dev); 2472 return 0; 2473 } 2474 rte_eth_copy_pci_info(eth_dev, pci_dev); 2475 2476 hw->vendor_id = pci_dev->id.vendor_id; 2477 hw->device_id = pci_dev->id.device_id; 2478 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 2479 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 2480 hw->bus.bus_id = pci_dev->addr.bus; 2481 hw->bus.device = pci_dev->addr.devid; 2482 hw->bus.func = pci_dev->addr.function; 2483 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 2484 hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); 2485 adapter->dev_data = eth_dev->data; 2486 adapter->stopped = 1; 2487 2488 if (iavf_init_vf(eth_dev) != 0) { 2489 PMD_INIT_LOG(ERR, "Init vf failed"); 2490 return -1; 2491 } 2492 2493 /* set default ptype table */ 2494 iavf_set_default_ptype_table(eth_dev); 2495 2496 /* copy mac addr */ 2497 eth_dev->data->mac_addrs = rte_zmalloc( 2498 "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0); 2499 if (!eth_dev->data->mac_addrs) { 2500 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to" 2501 " store MAC addresses", 2502 RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX); 2503 ret = -ENOMEM; 2504 goto init_vf_err; 2505 } 2506 /* If the MAC address is not configured by host, 2507 * generate a random one. 2508 */ 2509 if (!rte_is_valid_assigned_ether_addr( 2510 (struct rte_ether_addr *)hw->mac.addr)) 2511 rte_eth_random_addr(hw->mac.addr); 2512 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 2513 ð_dev->data->mac_addrs[0]); 2514 2515 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { 2516 /* register callback func to eal lib */ 2517 rte_intr_callback_register(pci_dev->intr_handle, 2518 iavf_dev_interrupt_handler, 2519 (void *)eth_dev); 2520 2521 /* enable uio intr after callback register */ 2522 rte_intr_enable(pci_dev->intr_handle); 2523 } else { 2524 rte_eal_alarm_set(IAVF_ALARM_INTERVAL, 2525 iavf_dev_alarm_handler, eth_dev); 2526 } 2527 2528 /* configure and enable device interrupt */ 2529 iavf_enable_irq0(hw); 2530 2531 ret = iavf_flow_init(adapter); 2532 if (ret) { 2533 PMD_INIT_LOG(ERR, "Failed to initialize flow"); 2534 goto flow_init_err; 2535 } 2536 2537 /** Check if the IPsec Crypto offload is supported and create 2538 * security_ctx if it is. 2539 */ 2540 if (iavf_ipsec_crypto_supported(adapter)) { 2541 /* Initialize security_ctx only for primary process*/ 2542 ret = iavf_security_ctx_create(adapter); 2543 if (ret) { 2544 PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance"); 2545 return ret; 2546 } 2547 2548 ret = iavf_security_init(adapter); 2549 if (ret) { 2550 PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources"); 2551 return ret; 2552 } 2553 } 2554 2555 iavf_default_rss_disable(adapter); 2556 2557 2558 /* Start device watchdog */ 2559 iavf_dev_watchdog_enable(adapter); 2560 2561 2562 return 0; 2563 2564 flow_init_err: 2565 rte_free(eth_dev->data->mac_addrs); 2566 eth_dev->data->mac_addrs = NULL; 2567 2568 init_vf_err: 2569 iavf_uninit_vf(eth_dev); 2570 2571 return ret; 2572 } 2573 2574 static int 2575 iavf_dev_close(struct rte_eth_dev *dev) 2576 { 2577 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2578 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2579 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2580 struct iavf_adapter *adapter = 2581 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2582 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); 2583 int ret; 2584 2585 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2586 return 0; 2587 2588 ret = iavf_dev_stop(dev); 2589 2590 iavf_flow_flush(dev, NULL); 2591 iavf_flow_uninit(adapter); 2592 2593 /* 2594 * disable promiscuous mode before reset vf 2595 * it is a workaround solution when work with kernel driver 2596 * and it is not the normal way 2597 */ 2598 if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled) 2599 iavf_config_promisc(adapter, false, false); 2600 2601 iavf_shutdown_adminq(hw); 2602 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { 2603 /* disable uio intr before callback unregister */ 2604 rte_intr_disable(intr_handle); 2605 2606 /* unregister callback func from eal lib */ 2607 rte_intr_callback_unregister(intr_handle, 2608 iavf_dev_interrupt_handler, dev); 2609 } else { 2610 rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev); 2611 } 2612 iavf_disable_irq0(hw); 2613 2614 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) 2615 iavf_tm_conf_uninit(dev); 2616 2617 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 2618 if (vf->rss_lut) { 2619 rte_free(vf->rss_lut); 2620 vf->rss_lut = NULL; 2621 } 2622 if (vf->rss_key) { 2623 rte_free(vf->rss_key); 2624 vf->rss_key = NULL; 2625 } 2626 } 2627 2628 rte_free(vf->vf_res); 2629 vf->vsi_res = NULL; 2630 vf->vf_res = NULL; 2631 2632 rte_free(vf->aq_resp); 2633 vf->aq_resp = NULL; 2634 2635 /* 2636 * If the VF is reset via VFLR, the device will be knocked out of bus 2637 * master mode, and the driver will fail to recover from the reset. Fix 2638 * this by enabling bus mastering after every reset. In a non-VFLR case, 2639 * the bus master bit will not be disabled, and this call will have no 2640 * effect. 2641 */ 2642 if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true)) 2643 vf->vf_reset = false; 2644 2645 /* disable watchdog */ 2646 iavf_dev_watchdog_disable(adapter); 2647 2648 return ret; 2649 } 2650 2651 static int 2652 iavf_dev_uninit(struct rte_eth_dev *dev) 2653 { 2654 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2655 return -EPERM; 2656 2657 iavf_dev_close(dev); 2658 2659 return 0; 2660 } 2661 2662 /* 2663 * Reset VF device only to re-initialize resources in PMD layer 2664 */ 2665 static int 2666 iavf_dev_reset(struct rte_eth_dev *dev) 2667 { 2668 int ret; 2669 2670 ret = iavf_dev_uninit(dev); 2671 if (ret) 2672 return ret; 2673 2674 return iavf_dev_init(dev); 2675 } 2676 2677 static int 2678 iavf_dcf_cap_check_handler(__rte_unused const char *key, 2679 const char *value, __rte_unused void *opaque) 2680 { 2681 if (strcmp(value, "dcf")) 2682 return -1; 2683 2684 return 0; 2685 } 2686 2687 static int 2688 iavf_dcf_cap_selected(struct rte_devargs *devargs) 2689 { 2690 struct rte_kvargs *kvlist; 2691 const char *key = "cap"; 2692 int ret = 0; 2693 2694 if (devargs == NULL) 2695 return 0; 2696 2697 kvlist = rte_kvargs_parse(devargs->args, NULL); 2698 if (kvlist == NULL) 2699 return 0; 2700 2701 if (!rte_kvargs_count(kvlist, key)) 2702 goto exit; 2703 2704 /* dcf capability selected when there's a key-value pair: cap=dcf */ 2705 if (rte_kvargs_process(kvlist, key, 2706 iavf_dcf_cap_check_handler, NULL) < 0) 2707 goto exit; 2708 2709 ret = 1; 2710 2711 exit: 2712 rte_kvargs_free(kvlist); 2713 return ret; 2714 } 2715 2716 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2717 struct rte_pci_device *pci_dev) 2718 { 2719 if (iavf_dcf_cap_selected(pci_dev->device.devargs)) 2720 return 1; 2721 2722 return rte_eth_dev_pci_generic_probe(pci_dev, 2723 sizeof(struct iavf_adapter), iavf_dev_init); 2724 } 2725 2726 static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev) 2727 { 2728 return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit); 2729 } 2730 2731 /* Adaptive virtual function driver struct */ 2732 static struct rte_pci_driver rte_iavf_pmd = { 2733 .id_table = pci_id_iavf_map, 2734 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2735 .probe = eth_iavf_pci_probe, 2736 .remove = eth_iavf_pci_remove, 2737 }; 2738 2739 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd); 2740 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map); 2741 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci"); 2742 RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf"); 2743 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE); 2744 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE); 2745 #ifdef RTE_ETHDEV_DEBUG_RX 2746 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG); 2747 #endif 2748 #ifdef RTE_ETHDEV_DEBUG_TX 2749 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG); 2750 #endif 2751