1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation 3 */ 4 5 #include <rte_string_fns.h> 6 #include <ethdev_pci.h> 7 8 #include <stdio.h> 9 #include <sys/types.h> 10 #include <sys/stat.h> 11 #include <unistd.h> 12 13 #include <rte_tailq.h> 14 15 #include "eal_firmware.h" 16 17 #include "base/ice_sched.h" 18 #include "base/ice_flow.h" 19 #include "base/ice_dcb.h" 20 #include "base/ice_common.h" 21 22 #include "rte_pmd_ice.h" 23 #include "ice_ethdev.h" 24 #include "ice_rxtx.h" 25 #include "ice_generic_flow.h" 26 27 /* devargs */ 28 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support" 29 #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support" 30 #define ICE_PROTO_XTR_ARG "proto_xtr" 31 #define ICE_HW_DEBUG_MASK_ARG "hw_debug_mask" 32 #define ICE_ONE_PPS_OUT_ARG "pps_out" 33 34 static const char * const ice_valid_args[] = { 35 ICE_SAFE_MODE_SUPPORT_ARG, 36 ICE_PIPELINE_MODE_SUPPORT_ARG, 37 ICE_PROTO_XTR_ARG, 38 ICE_HW_DEBUG_MASK_ARG, 39 ICE_ONE_PPS_OUT_ARG, 40 NULL 41 }; 42 43 #define NSEC_PER_SEC 1000000000 44 #define PPS_OUT_DELAY_NS 1 45 46 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = { 47 .name = "intel_pmd_dynfield_proto_xtr_metadata", 48 .size = sizeof(uint32_t), 49 .align = __alignof__(uint32_t), 50 .flags = 0, 51 }; 52 53 struct proto_xtr_ol_flag { 54 const struct rte_mbuf_dynflag param; 55 uint64_t *ol_flag; 56 bool required; 57 }; 58 59 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX]; 60 61 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = { 62 [PROTO_XTR_VLAN] = { 63 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" }, 64 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask }, 65 [PROTO_XTR_IPV4] = { 66 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" }, 67 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask }, 68 [PROTO_XTR_IPV6] = { 69 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" }, 70 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask }, 71 [PROTO_XTR_IPV6_FLOW] = { 72 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" }, 73 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask }, 74 [PROTO_XTR_TCP] = { 75 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" }, 76 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask }, 77 [PROTO_XTR_IP_OFFSET] = { 78 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" }, 79 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask }, 80 }; 81 82 #define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package" 83 #define ICE_COMMS_PKG_NAME "ICE COMMS Package" 84 #define ICE_MAX_RES_DESC_NUM 1024 85 86 static int ice_dev_configure(struct rte_eth_dev *dev); 87 static int ice_dev_start(struct rte_eth_dev *dev); 88 static int ice_dev_stop(struct rte_eth_dev *dev); 89 static int ice_dev_close(struct rte_eth_dev *dev); 90 static int ice_dev_reset(struct rte_eth_dev *dev); 91 static int ice_dev_info_get(struct rte_eth_dev *dev, 92 struct rte_eth_dev_info *dev_info); 93 static int ice_link_update(struct rte_eth_dev *dev, 94 int wait_to_complete); 95 static int ice_dev_set_link_up(struct rte_eth_dev *dev); 96 static int ice_dev_set_link_down(struct rte_eth_dev *dev); 97 98 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 99 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask); 100 static int ice_rss_reta_update(struct rte_eth_dev *dev, 101 struct rte_eth_rss_reta_entry64 *reta_conf, 102 uint16_t reta_size); 103 static int ice_rss_reta_query(struct rte_eth_dev *dev, 104 struct rte_eth_rss_reta_entry64 *reta_conf, 105 uint16_t reta_size); 106 static int ice_rss_hash_update(struct rte_eth_dev *dev, 107 struct rte_eth_rss_conf *rss_conf); 108 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev, 109 struct rte_eth_rss_conf *rss_conf); 110 static int ice_promisc_enable(struct rte_eth_dev *dev); 111 static int ice_promisc_disable(struct rte_eth_dev *dev); 112 static int ice_allmulti_enable(struct rte_eth_dev *dev); 113 static int ice_allmulti_disable(struct rte_eth_dev *dev); 114 static int ice_vlan_filter_set(struct rte_eth_dev *dev, 115 uint16_t vlan_id, 116 int on); 117 static int ice_macaddr_set(struct rte_eth_dev *dev, 118 struct rte_ether_addr *mac_addr); 119 static int ice_macaddr_add(struct rte_eth_dev *dev, 120 struct rte_ether_addr *mac_addr, 121 __rte_unused uint32_t index, 122 uint32_t pool); 123 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); 124 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev, 125 uint16_t queue_id); 126 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev, 127 uint16_t queue_id); 128 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 129 size_t fw_size); 130 static int ice_vlan_pvid_set(struct rte_eth_dev *dev, 131 uint16_t pvid, int on); 132 static int ice_get_eeprom_length(struct rte_eth_dev *dev); 133 static int ice_get_eeprom(struct rte_eth_dev *dev, 134 struct rte_dev_eeprom_info *eeprom); 135 static int ice_stats_get(struct rte_eth_dev *dev, 136 struct rte_eth_stats *stats); 137 static int ice_stats_reset(struct rte_eth_dev *dev); 138 static int ice_xstats_get(struct rte_eth_dev *dev, 139 struct rte_eth_xstat *xstats, unsigned int n); 140 static int ice_xstats_get_names(struct rte_eth_dev *dev, 141 struct rte_eth_xstat_name *xstats_names, 142 unsigned int limit); 143 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev, 144 const struct rte_flow_ops **ops); 145 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 146 struct rte_eth_udp_tunnel *udp_tunnel); 147 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 148 struct rte_eth_udp_tunnel *udp_tunnel); 149 150 static const struct rte_pci_id pci_id_ice_map[] = { 151 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) }, 152 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) }, 153 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) }, 154 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) }, 155 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) }, 156 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) }, 157 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) }, 158 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) }, 159 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) }, 160 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) }, 161 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) }, 162 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) }, 163 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) }, 164 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) }, 165 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) }, 166 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) }, 167 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) }, 168 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) }, 169 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) }, 170 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) }, 171 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) }, 172 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) }, 173 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) }, 174 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) }, 175 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) }, 176 { .vendor_id = 0, /* sentinel */ }, 177 }; 178 179 static const struct eth_dev_ops ice_eth_dev_ops = { 180 .dev_configure = ice_dev_configure, 181 .dev_start = ice_dev_start, 182 .dev_stop = ice_dev_stop, 183 .dev_close = ice_dev_close, 184 .dev_reset = ice_dev_reset, 185 .dev_set_link_up = ice_dev_set_link_up, 186 .dev_set_link_down = ice_dev_set_link_down, 187 .rx_queue_start = ice_rx_queue_start, 188 .rx_queue_stop = ice_rx_queue_stop, 189 .tx_queue_start = ice_tx_queue_start, 190 .tx_queue_stop = ice_tx_queue_stop, 191 .rx_queue_setup = ice_rx_queue_setup, 192 .rx_queue_release = ice_rx_queue_release, 193 .tx_queue_setup = ice_tx_queue_setup, 194 .tx_queue_release = ice_tx_queue_release, 195 .dev_infos_get = ice_dev_info_get, 196 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get, 197 .link_update = ice_link_update, 198 .mtu_set = ice_mtu_set, 199 .mac_addr_set = ice_macaddr_set, 200 .mac_addr_add = ice_macaddr_add, 201 .mac_addr_remove = ice_macaddr_remove, 202 .vlan_filter_set = ice_vlan_filter_set, 203 .vlan_offload_set = ice_vlan_offload_set, 204 .reta_update = ice_rss_reta_update, 205 .reta_query = ice_rss_reta_query, 206 .rss_hash_update = ice_rss_hash_update, 207 .rss_hash_conf_get = ice_rss_hash_conf_get, 208 .promiscuous_enable = ice_promisc_enable, 209 .promiscuous_disable = ice_promisc_disable, 210 .allmulticast_enable = ice_allmulti_enable, 211 .allmulticast_disable = ice_allmulti_disable, 212 .rx_queue_intr_enable = ice_rx_queue_intr_enable, 213 .rx_queue_intr_disable = ice_rx_queue_intr_disable, 214 .fw_version_get = ice_fw_version_get, 215 .vlan_pvid_set = ice_vlan_pvid_set, 216 .rxq_info_get = ice_rxq_info_get, 217 .txq_info_get = ice_txq_info_get, 218 .rx_burst_mode_get = ice_rx_burst_mode_get, 219 .tx_burst_mode_get = ice_tx_burst_mode_get, 220 .get_eeprom_length = ice_get_eeprom_length, 221 .get_eeprom = ice_get_eeprom, 222 .stats_get = ice_stats_get, 223 .stats_reset = ice_stats_reset, 224 .xstats_get = ice_xstats_get, 225 .xstats_get_names = ice_xstats_get_names, 226 .xstats_reset = ice_stats_reset, 227 .flow_ops_get = ice_dev_flow_ops_get, 228 .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add, 229 .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del, 230 .tx_done_cleanup = ice_tx_done_cleanup, 231 .get_monitor_addr = ice_get_monitor_addr, 232 }; 233 234 /* store statistics names and its offset in stats structure */ 235 struct ice_xstats_name_off { 236 char name[RTE_ETH_XSTATS_NAME_SIZE]; 237 unsigned int offset; 238 }; 239 240 static const struct ice_xstats_name_off ice_stats_strings[] = { 241 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)}, 242 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)}, 243 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)}, 244 {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)}, 245 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats, 246 rx_unknown_protocol)}, 247 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)}, 248 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)}, 249 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)}, 250 {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)}, 251 }; 252 253 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \ 254 sizeof(ice_stats_strings[0])) 255 256 static const struct ice_xstats_name_off ice_hw_port_strings[] = { 257 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats, 258 tx_dropped_link_down)}, 259 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)}, 260 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats, 261 illegal_bytes)}, 262 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)}, 263 {"mac_local_errors", offsetof(struct ice_hw_port_stats, 264 mac_local_faults)}, 265 {"mac_remote_errors", offsetof(struct ice_hw_port_stats, 266 mac_remote_faults)}, 267 {"rx_len_errors", offsetof(struct ice_hw_port_stats, 268 rx_len_errors)}, 269 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)}, 270 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)}, 271 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)}, 272 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)}, 273 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)}, 274 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats, 275 rx_size_127)}, 276 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats, 277 rx_size_255)}, 278 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats, 279 rx_size_511)}, 280 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats, 281 rx_size_1023)}, 282 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats, 283 rx_size_1522)}, 284 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats, 285 rx_size_big)}, 286 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats, 287 rx_undersize)}, 288 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats, 289 rx_oversize)}, 290 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats, 291 mac_short_pkt_dropped)}, 292 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats, 293 rx_fragments)}, 294 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)}, 295 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)}, 296 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats, 297 tx_size_127)}, 298 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats, 299 tx_size_255)}, 300 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats, 301 tx_size_511)}, 302 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats, 303 tx_size_1023)}, 304 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats, 305 tx_size_1522)}, 306 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats, 307 tx_size_big)}, 308 }; 309 310 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \ 311 sizeof(ice_hw_port_strings[0])) 312 313 static void 314 ice_init_controlq_parameter(struct ice_hw *hw) 315 { 316 /* fields for adminq */ 317 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN; 318 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN; 319 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ; 320 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ; 321 322 /* fields for mailboxq, DPDK used as PF host */ 323 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN; 324 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN; 325 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ; 326 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ; 327 } 328 329 static int 330 lookup_proto_xtr_type(const char *xtr_name) 331 { 332 static struct { 333 const char *name; 334 enum proto_xtr_type type; 335 } xtr_type_map[] = { 336 { "vlan", PROTO_XTR_VLAN }, 337 { "ipv4", PROTO_XTR_IPV4 }, 338 { "ipv6", PROTO_XTR_IPV6 }, 339 { "ipv6_flow", PROTO_XTR_IPV6_FLOW }, 340 { "tcp", PROTO_XTR_TCP }, 341 { "ip_offset", PROTO_XTR_IP_OFFSET }, 342 }; 343 uint32_t i; 344 345 for (i = 0; i < RTE_DIM(xtr_type_map); i++) { 346 if (strcmp(xtr_name, xtr_type_map[i].name) == 0) 347 return xtr_type_map[i].type; 348 } 349 350 return -1; 351 } 352 353 /* 354 * Parse elem, the elem could be single number/range or '(' ')' group 355 * 1) A single number elem, it's just a simple digit. e.g. 9 356 * 2) A single range elem, two digits with a '-' between. e.g. 2-6 357 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6) 358 * Within group elem, '-' used for a range separator; 359 * ',' used for a single number. 360 */ 361 static int 362 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs) 363 { 364 const char *str = input; 365 char *end = NULL; 366 uint32_t min, max; 367 uint32_t idx; 368 369 while (isblank(*str)) 370 str++; 371 372 if (!isdigit(*str) && *str != '(') 373 return -1; 374 375 /* process single number or single range of number */ 376 if (*str != '(') { 377 errno = 0; 378 idx = strtoul(str, &end, 10); 379 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) 380 return -1; 381 382 while (isblank(*end)) 383 end++; 384 385 min = idx; 386 max = idx; 387 388 /* process single <number>-<number> */ 389 if (*end == '-') { 390 end++; 391 while (isblank(*end)) 392 end++; 393 if (!isdigit(*end)) 394 return -1; 395 396 errno = 0; 397 idx = strtoul(end, &end, 10); 398 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) 399 return -1; 400 401 max = idx; 402 while (isblank(*end)) 403 end++; 404 } 405 406 if (*end != ':') 407 return -1; 408 409 for (idx = RTE_MIN(min, max); 410 idx <= RTE_MAX(min, max); idx++) 411 devargs->proto_xtr[idx] = xtr_type; 412 413 return 0; 414 } 415 416 /* process set within bracket */ 417 str++; 418 while (isblank(*str)) 419 str++; 420 if (*str == '\0') 421 return -1; 422 423 min = ICE_MAX_QUEUE_NUM; 424 do { 425 /* go ahead to the first digit */ 426 while (isblank(*str)) 427 str++; 428 if (!isdigit(*str)) 429 return -1; 430 431 /* get the digit value */ 432 errno = 0; 433 idx = strtoul(str, &end, 10); 434 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) 435 return -1; 436 437 /* go ahead to separator '-',',' and ')' */ 438 while (isblank(*end)) 439 end++; 440 if (*end == '-') { 441 if (min == ICE_MAX_QUEUE_NUM) 442 min = idx; 443 else /* avoid continuous '-' */ 444 return -1; 445 } else if (*end == ',' || *end == ')') { 446 max = idx; 447 if (min == ICE_MAX_QUEUE_NUM) 448 min = idx; 449 450 for (idx = RTE_MIN(min, max); 451 idx <= RTE_MAX(min, max); idx++) 452 devargs->proto_xtr[idx] = xtr_type; 453 454 min = ICE_MAX_QUEUE_NUM; 455 } else { 456 return -1; 457 } 458 459 str = end + 1; 460 } while (*end != ')' && *end != '\0'); 461 462 return 0; 463 } 464 465 static int 466 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs) 467 { 468 const char *queue_start; 469 uint32_t idx; 470 int xtr_type; 471 char xtr_name[32]; 472 473 while (isblank(*queues)) 474 queues++; 475 476 if (*queues != '[') { 477 xtr_type = lookup_proto_xtr_type(queues); 478 if (xtr_type < 0) 479 return -1; 480 481 devargs->proto_xtr_dflt = xtr_type; 482 483 return 0; 484 } 485 486 queues++; 487 do { 488 while (isblank(*queues)) 489 queues++; 490 if (*queues == '\0') 491 return -1; 492 493 queue_start = queues; 494 495 /* go across a complete bracket */ 496 if (*queue_start == '(') { 497 queues += strcspn(queues, ")"); 498 if (*queues != ')') 499 return -1; 500 } 501 502 /* scan the separator ':' */ 503 queues += strcspn(queues, ":"); 504 if (*queues++ != ':') 505 return -1; 506 while (isblank(*queues)) 507 queues++; 508 509 for (idx = 0; ; idx++) { 510 if (isblank(queues[idx]) || 511 queues[idx] == ',' || 512 queues[idx] == ']' || 513 queues[idx] == '\0') 514 break; 515 516 if (idx > sizeof(xtr_name) - 2) 517 return -1; 518 519 xtr_name[idx] = queues[idx]; 520 } 521 xtr_name[idx] = '\0'; 522 xtr_type = lookup_proto_xtr_type(xtr_name); 523 if (xtr_type < 0) 524 return -1; 525 526 queues += idx; 527 528 while (isblank(*queues) || *queues == ',' || *queues == ']') 529 queues++; 530 531 if (parse_queue_set(queue_start, xtr_type, devargs) < 0) 532 return -1; 533 } while (*queues != '\0'); 534 535 return 0; 536 } 537 538 static int 539 handle_proto_xtr_arg(__rte_unused const char *key, const char *value, 540 void *extra_args) 541 { 542 struct ice_devargs *devargs = extra_args; 543 544 if (value == NULL || extra_args == NULL) 545 return -EINVAL; 546 547 if (parse_queue_proto_xtr(value, devargs) < 0) { 548 PMD_DRV_LOG(ERR, 549 "The protocol extraction parameter is wrong : '%s'", 550 value); 551 return -1; 552 } 553 554 return 0; 555 } 556 557 static void 558 ice_check_proto_xtr_support(struct ice_hw *hw) 559 { 560 #define FLX_REG(val, fld, idx) \ 561 (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \ 562 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S) 563 static struct { 564 uint32_t rxdid; 565 uint8_t opcode; 566 uint8_t protid_0; 567 uint8_t protid_1; 568 } xtr_sets[] = { 569 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN, 570 ICE_RX_OPC_EXTRACT, 571 ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O}, 572 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4, 573 ICE_RX_OPC_EXTRACT, 574 ICE_PROT_IPV4_OF_OR_S, 575 ICE_PROT_IPV4_OF_OR_S }, 576 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6, 577 ICE_RX_OPC_EXTRACT, 578 ICE_PROT_IPV6_OF_OR_S, 579 ICE_PROT_IPV6_OF_OR_S }, 580 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW, 581 ICE_RX_OPC_EXTRACT, 582 ICE_PROT_IPV6_OF_OR_S, 583 ICE_PROT_IPV6_OF_OR_S }, 584 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP, 585 ICE_RX_OPC_EXTRACT, 586 ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL }, 587 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET, 588 ICE_RX_OPC_PROTID, 589 ICE_PROT_IPV4_OF_OR_S, 590 ICE_PROT_IPV6_OF_OR_S }, 591 }; 592 uint32_t i; 593 594 for (i = 0; i < RTE_DIM(xtr_sets); i++) { 595 uint32_t rxdid = xtr_sets[i].rxdid; 596 uint32_t v; 597 598 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) { 599 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid)); 600 601 if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 && 602 FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode) 603 ice_proto_xtr_hw_support[i] = true; 604 } 605 606 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) { 607 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid)); 608 609 if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 && 610 FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode) 611 ice_proto_xtr_hw_support[i] = true; 612 } 613 } 614 } 615 616 static int 617 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base, 618 uint32_t num) 619 { 620 struct pool_entry *entry; 621 622 if (!pool || !num) 623 return -EINVAL; 624 625 entry = rte_zmalloc(NULL, sizeof(*entry), 0); 626 if (!entry) { 627 PMD_INIT_LOG(ERR, 628 "Failed to allocate memory for resource pool"); 629 return -ENOMEM; 630 } 631 632 /* queue heap initialize */ 633 pool->num_free = num; 634 pool->num_alloc = 0; 635 pool->base = base; 636 LIST_INIT(&pool->alloc_list); 637 LIST_INIT(&pool->free_list); 638 639 /* Initialize element */ 640 entry->base = 0; 641 entry->len = num; 642 643 LIST_INSERT_HEAD(&pool->free_list, entry, next); 644 return 0; 645 } 646 647 static int 648 ice_res_pool_alloc(struct ice_res_pool_info *pool, 649 uint16_t num) 650 { 651 struct pool_entry *entry, *valid_entry; 652 653 if (!pool || !num) { 654 PMD_INIT_LOG(ERR, "Invalid parameter"); 655 return -EINVAL; 656 } 657 658 if (pool->num_free < num) { 659 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u", 660 num, pool->num_free); 661 return -ENOMEM; 662 } 663 664 valid_entry = NULL; 665 /* Lookup in free list and find most fit one */ 666 LIST_FOREACH(entry, &pool->free_list, next) { 667 if (entry->len >= num) { 668 /* Find best one */ 669 if (entry->len == num) { 670 valid_entry = entry; 671 break; 672 } 673 if (!valid_entry || 674 valid_entry->len > entry->len) 675 valid_entry = entry; 676 } 677 } 678 679 /* Not find one to satisfy the request, return */ 680 if (!valid_entry) { 681 PMD_INIT_LOG(ERR, "No valid entry found"); 682 return -ENOMEM; 683 } 684 /** 685 * The entry have equal queue number as requested, 686 * remove it from alloc_list. 687 */ 688 if (valid_entry->len == num) { 689 LIST_REMOVE(valid_entry, next); 690 } else { 691 /** 692 * The entry have more numbers than requested, 693 * create a new entry for alloc_list and minus its 694 * queue base and number in free_list. 695 */ 696 entry = rte_zmalloc(NULL, sizeof(*entry), 0); 697 if (!entry) { 698 PMD_INIT_LOG(ERR, 699 "Failed to allocate memory for " 700 "resource pool"); 701 return -ENOMEM; 702 } 703 entry->base = valid_entry->base; 704 entry->len = num; 705 valid_entry->base += num; 706 valid_entry->len -= num; 707 valid_entry = entry; 708 } 709 710 /* Insert it into alloc list, not sorted */ 711 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next); 712 713 pool->num_free -= valid_entry->len; 714 pool->num_alloc += valid_entry->len; 715 716 return valid_entry->base + pool->base; 717 } 718 719 static void 720 ice_res_pool_destroy(struct ice_res_pool_info *pool) 721 { 722 struct pool_entry *entry, *next_entry; 723 724 if (!pool) 725 return; 726 727 for (entry = LIST_FIRST(&pool->alloc_list); 728 entry && (next_entry = LIST_NEXT(entry, next), 1); 729 entry = next_entry) { 730 LIST_REMOVE(entry, next); 731 rte_free(entry); 732 } 733 734 for (entry = LIST_FIRST(&pool->free_list); 735 entry && (next_entry = LIST_NEXT(entry, next), 1); 736 entry = next_entry) { 737 LIST_REMOVE(entry, next); 738 rte_free(entry); 739 } 740 741 pool->num_free = 0; 742 pool->num_alloc = 0; 743 pool->base = 0; 744 LIST_INIT(&pool->alloc_list); 745 LIST_INIT(&pool->free_list); 746 } 747 748 static void 749 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info) 750 { 751 /* Set VSI LUT selection */ 752 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI & 753 ICE_AQ_VSI_Q_OPT_RSS_LUT_M; 754 /* Set Hash scheme */ 755 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ & 756 ICE_AQ_VSI_Q_OPT_RSS_HASH_M; 757 /* enable TC */ 758 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M; 759 } 760 761 static enum ice_status 762 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi, 763 struct ice_aqc_vsi_props *info, 764 uint8_t enabled_tcmap) 765 { 766 uint16_t bsf, qp_idx; 767 768 /* default tc 0 now. Multi-TC supporting need to be done later. 769 * Configure TC and queue mapping parameters, for enabled TC, 770 * allocate qpnum_per_tc queues to this traffic. 771 */ 772 if (enabled_tcmap != 0x01) { 773 PMD_INIT_LOG(ERR, "only TC0 is supported"); 774 return -ENOTSUP; 775 } 776 777 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC); 778 bsf = rte_bsf32(vsi->nb_qps); 779 /* Adjust the queue number to actual queues that can be applied */ 780 vsi->nb_qps = 0x1 << bsf; 781 782 qp_idx = 0; 783 /* Set tc and queue mapping with VSI */ 784 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx << 785 ICE_AQ_VSI_TC_Q_OFFSET_S) | 786 (bsf << ICE_AQ_VSI_TC_Q_NUM_S)); 787 788 /* Associate queue number with VSI */ 789 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG); 790 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); 791 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps); 792 info->valid_sections |= 793 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 794 /* Set the info.ingress_table and info.egress_table 795 * for UP translate table. Now just set it to 1:1 map by default 796 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688 797 */ 798 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688 799 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); 800 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); 801 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); 802 return 0; 803 } 804 805 static int 806 ice_init_mac_address(struct rte_eth_dev *dev) 807 { 808 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 809 810 if (!rte_is_unicast_ether_addr 811 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) { 812 PMD_INIT_LOG(ERR, "Invalid MAC address"); 813 return -EINVAL; 814 } 815 816 rte_ether_addr_copy( 817 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr, 818 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr); 819 820 dev->data->mac_addrs = 821 rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0); 822 if (!dev->data->mac_addrs) { 823 PMD_INIT_LOG(ERR, 824 "Failed to allocate memory to store mac address"); 825 return -ENOMEM; 826 } 827 /* store it to dev data */ 828 rte_ether_addr_copy( 829 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr, 830 &dev->data->mac_addrs[0]); 831 return 0; 832 } 833 834 /* Find out specific MAC filter */ 835 static struct ice_mac_filter * 836 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr) 837 { 838 struct ice_mac_filter *f; 839 840 TAILQ_FOREACH(f, &vsi->mac_list, next) { 841 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr)) 842 return f; 843 } 844 845 return NULL; 846 } 847 848 static int 849 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr) 850 { 851 struct ice_fltr_list_entry *m_list_itr = NULL; 852 struct ice_mac_filter *f; 853 struct LIST_HEAD_TYPE list_head; 854 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 855 int ret = 0; 856 857 /* If it's added and configured, return */ 858 f = ice_find_mac_filter(vsi, mac_addr); 859 if (f) { 860 PMD_DRV_LOG(INFO, "This MAC filter already exists."); 861 return 0; 862 } 863 864 INIT_LIST_HEAD(&list_head); 865 866 m_list_itr = (struct ice_fltr_list_entry *) 867 ice_malloc(hw, sizeof(*m_list_itr)); 868 if (!m_list_itr) { 869 ret = -ENOMEM; 870 goto DONE; 871 } 872 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr, 873 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); 874 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 875 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 876 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 877 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 878 m_list_itr->fltr_info.vsi_handle = vsi->idx; 879 880 LIST_ADD(&m_list_itr->list_entry, &list_head); 881 882 /* Add the mac */ 883 ret = ice_add_mac(hw, &list_head); 884 if (ret != ICE_SUCCESS) { 885 PMD_DRV_LOG(ERR, "Failed to add MAC filter"); 886 ret = -EINVAL; 887 goto DONE; 888 } 889 /* Add the mac addr into mac list */ 890 f = rte_zmalloc(NULL, sizeof(*f), 0); 891 if (!f) { 892 PMD_DRV_LOG(ERR, "failed to allocate memory"); 893 ret = -ENOMEM; 894 goto DONE; 895 } 896 rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr); 897 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); 898 vsi->mac_num++; 899 900 ret = 0; 901 902 DONE: 903 rte_free(m_list_itr); 904 return ret; 905 } 906 907 static int 908 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr) 909 { 910 struct ice_fltr_list_entry *m_list_itr = NULL; 911 struct ice_mac_filter *f; 912 struct LIST_HEAD_TYPE list_head; 913 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 914 int ret = 0; 915 916 /* Can't find it, return an error */ 917 f = ice_find_mac_filter(vsi, mac_addr); 918 if (!f) 919 return -EINVAL; 920 921 INIT_LIST_HEAD(&list_head); 922 923 m_list_itr = (struct ice_fltr_list_entry *) 924 ice_malloc(hw, sizeof(*m_list_itr)); 925 if (!m_list_itr) { 926 ret = -ENOMEM; 927 goto DONE; 928 } 929 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr, 930 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); 931 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 932 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 933 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 934 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 935 m_list_itr->fltr_info.vsi_handle = vsi->idx; 936 937 LIST_ADD(&m_list_itr->list_entry, &list_head); 938 939 /* remove the mac filter */ 940 ret = ice_remove_mac(hw, &list_head); 941 if (ret != ICE_SUCCESS) { 942 PMD_DRV_LOG(ERR, "Failed to remove MAC filter"); 943 ret = -EINVAL; 944 goto DONE; 945 } 946 947 /* Remove the mac addr from mac list */ 948 TAILQ_REMOVE(&vsi->mac_list, f, next); 949 rte_free(f); 950 vsi->mac_num--; 951 952 ret = 0; 953 DONE: 954 rte_free(m_list_itr); 955 return ret; 956 } 957 958 /* Find out specific VLAN filter */ 959 static struct ice_vlan_filter * 960 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan) 961 { 962 struct ice_vlan_filter *f; 963 964 TAILQ_FOREACH(f, &vsi->vlan_list, next) { 965 if (vlan->tpid == f->vlan_info.vlan.tpid && 966 vlan->vid == f->vlan_info.vlan.vid) 967 return f; 968 } 969 970 return NULL; 971 } 972 973 static int 974 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan) 975 { 976 struct ice_fltr_list_entry *v_list_itr = NULL; 977 struct ice_vlan_filter *f; 978 struct LIST_HEAD_TYPE list_head; 979 struct ice_hw *hw; 980 int ret = 0; 981 982 if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID) 983 return -EINVAL; 984 985 hw = ICE_VSI_TO_HW(vsi); 986 987 /* If it's added and configured, return. */ 988 f = ice_find_vlan_filter(vsi, vlan); 989 if (f) { 990 PMD_DRV_LOG(INFO, "This VLAN filter already exists."); 991 return 0; 992 } 993 994 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) 995 return 0; 996 997 INIT_LIST_HEAD(&list_head); 998 999 v_list_itr = (struct ice_fltr_list_entry *) 1000 ice_malloc(hw, sizeof(*v_list_itr)); 1001 if (!v_list_itr) { 1002 ret = -ENOMEM; 1003 goto DONE; 1004 } 1005 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid; 1006 v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid; 1007 v_list_itr->fltr_info.l_data.vlan.tpid_valid = true; 1008 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 1009 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1010 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1011 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 1012 v_list_itr->fltr_info.vsi_handle = vsi->idx; 1013 1014 LIST_ADD(&v_list_itr->list_entry, &list_head); 1015 1016 /* Add the vlan */ 1017 ret = ice_add_vlan(hw, &list_head); 1018 if (ret != ICE_SUCCESS) { 1019 PMD_DRV_LOG(ERR, "Failed to add VLAN filter"); 1020 ret = -EINVAL; 1021 goto DONE; 1022 } 1023 1024 /* Add vlan into vlan list */ 1025 f = rte_zmalloc(NULL, sizeof(*f), 0); 1026 if (!f) { 1027 PMD_DRV_LOG(ERR, "failed to allocate memory"); 1028 ret = -ENOMEM; 1029 goto DONE; 1030 } 1031 f->vlan_info.vlan.tpid = vlan->tpid; 1032 f->vlan_info.vlan.vid = vlan->vid; 1033 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next); 1034 vsi->vlan_num++; 1035 1036 ret = 0; 1037 1038 DONE: 1039 rte_free(v_list_itr); 1040 return ret; 1041 } 1042 1043 static int 1044 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan) 1045 { 1046 struct ice_fltr_list_entry *v_list_itr = NULL; 1047 struct ice_vlan_filter *f; 1048 struct LIST_HEAD_TYPE list_head; 1049 struct ice_hw *hw; 1050 int ret = 0; 1051 1052 if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID) 1053 return -EINVAL; 1054 1055 hw = ICE_VSI_TO_HW(vsi); 1056 1057 /* Can't find it, return an error */ 1058 f = ice_find_vlan_filter(vsi, vlan); 1059 if (!f) 1060 return -EINVAL; 1061 1062 INIT_LIST_HEAD(&list_head); 1063 1064 v_list_itr = (struct ice_fltr_list_entry *) 1065 ice_malloc(hw, sizeof(*v_list_itr)); 1066 if (!v_list_itr) { 1067 ret = -ENOMEM; 1068 goto DONE; 1069 } 1070 1071 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid; 1072 v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid; 1073 v_list_itr->fltr_info.l_data.vlan.tpid_valid = true; 1074 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 1075 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1076 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1077 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 1078 v_list_itr->fltr_info.vsi_handle = vsi->idx; 1079 1080 LIST_ADD(&v_list_itr->list_entry, &list_head); 1081 1082 /* remove the vlan filter */ 1083 ret = ice_remove_vlan(hw, &list_head); 1084 if (ret != ICE_SUCCESS) { 1085 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter"); 1086 ret = -EINVAL; 1087 goto DONE; 1088 } 1089 1090 /* Remove the vlan id from vlan list */ 1091 TAILQ_REMOVE(&vsi->vlan_list, f, next); 1092 rte_free(f); 1093 vsi->vlan_num--; 1094 1095 ret = 0; 1096 DONE: 1097 rte_free(v_list_itr); 1098 return ret; 1099 } 1100 1101 static int 1102 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi) 1103 { 1104 struct ice_mac_filter *m_f; 1105 struct ice_vlan_filter *v_f; 1106 void *temp; 1107 int ret = 0; 1108 1109 if (!vsi || !vsi->mac_num) 1110 return -EINVAL; 1111 1112 TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) { 1113 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr); 1114 if (ret != ICE_SUCCESS) { 1115 ret = -EINVAL; 1116 goto DONE; 1117 } 1118 } 1119 1120 if (vsi->vlan_num == 0) 1121 return 0; 1122 1123 TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) { 1124 ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan); 1125 if (ret != ICE_SUCCESS) { 1126 ret = -EINVAL; 1127 goto DONE; 1128 } 1129 } 1130 1131 DONE: 1132 return ret; 1133 } 1134 1135 /* Enable IRQ0 */ 1136 static void 1137 ice_pf_enable_irq0(struct ice_hw *hw) 1138 { 1139 /* reset the registers */ 1140 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0); 1141 ICE_READ_REG(hw, PFINT_OICR); 1142 1143 #ifdef ICE_LSE_SPT 1144 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 1145 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M & 1146 (~PFINT_OICR_LINK_STAT_CHANGE_M))); 1147 1148 ICE_WRITE_REG(hw, PFINT_OICR_CTL, 1149 (0 & PFINT_OICR_CTL_MSIX_INDX_M) | 1150 ((0 << PFINT_OICR_CTL_ITR_INDX_S) & 1151 PFINT_OICR_CTL_ITR_INDX_M) | 1152 PFINT_OICR_CTL_CAUSE_ENA_M); 1153 1154 ICE_WRITE_REG(hw, PFINT_FW_CTL, 1155 (0 & PFINT_FW_CTL_MSIX_INDX_M) | 1156 ((0 << PFINT_FW_CTL_ITR_INDX_S) & 1157 PFINT_FW_CTL_ITR_INDX_M) | 1158 PFINT_FW_CTL_CAUSE_ENA_M); 1159 #else 1160 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M); 1161 #endif 1162 1163 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), 1164 GLINT_DYN_CTL_INTENA_M | 1165 GLINT_DYN_CTL_CLEARPBA_M | 1166 GLINT_DYN_CTL_ITR_INDX_M); 1167 1168 ice_flush(hw); 1169 } 1170 1171 /* Disable IRQ0 */ 1172 static void 1173 ice_pf_disable_irq0(struct ice_hw *hw) 1174 { 1175 /* Disable all interrupt types */ 1176 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M); 1177 ice_flush(hw); 1178 } 1179 1180 #ifdef ICE_LSE_SPT 1181 static void 1182 ice_handle_aq_msg(struct rte_eth_dev *dev) 1183 { 1184 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1185 struct ice_ctl_q_info *cq = &hw->adminq; 1186 struct ice_rq_event_info event; 1187 uint16_t pending, opcode; 1188 int ret; 1189 1190 event.buf_len = ICE_AQ_MAX_BUF_LEN; 1191 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0); 1192 if (!event.msg_buf) { 1193 PMD_DRV_LOG(ERR, "Failed to allocate mem"); 1194 return; 1195 } 1196 1197 pending = 1; 1198 while (pending) { 1199 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1200 1201 if (ret != ICE_SUCCESS) { 1202 PMD_DRV_LOG(INFO, 1203 "Failed to read msg from AdminQ, " 1204 "adminq_err: %u", 1205 hw->adminq.sq_last_status); 1206 break; 1207 } 1208 opcode = rte_le_to_cpu_16(event.desc.opcode); 1209 1210 switch (opcode) { 1211 case ice_aqc_opc_get_link_status: 1212 ret = ice_link_update(dev, 0); 1213 if (!ret) 1214 rte_eth_dev_callback_process 1215 (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1216 break; 1217 default: 1218 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", 1219 opcode); 1220 break; 1221 } 1222 } 1223 rte_free(event.msg_buf); 1224 } 1225 #endif 1226 1227 /** 1228 * Interrupt handler triggered by NIC for handling 1229 * specific interrupt. 1230 * 1231 * @param handle 1232 * Pointer to interrupt handle. 1233 * @param param 1234 * The address of parameter (struct rte_eth_dev *) regsitered before. 1235 * 1236 * @return 1237 * void 1238 */ 1239 static void 1240 ice_interrupt_handler(void *param) 1241 { 1242 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1243 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1244 uint32_t oicr; 1245 uint32_t reg; 1246 uint8_t pf_num; 1247 uint8_t event; 1248 uint16_t queue; 1249 int ret; 1250 #ifdef ICE_LSE_SPT 1251 uint32_t int_fw_ctl; 1252 #endif 1253 1254 /* Disable interrupt */ 1255 ice_pf_disable_irq0(hw); 1256 1257 /* read out interrupt causes */ 1258 oicr = ICE_READ_REG(hw, PFINT_OICR); 1259 #ifdef ICE_LSE_SPT 1260 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL); 1261 #endif 1262 1263 /* No interrupt event indicated */ 1264 if (!(oicr & PFINT_OICR_INTEVENT_M)) { 1265 PMD_DRV_LOG(INFO, "No interrupt event"); 1266 goto done; 1267 } 1268 1269 #ifdef ICE_LSE_SPT 1270 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) { 1271 PMD_DRV_LOG(INFO, "FW_CTL: link state change event"); 1272 ice_handle_aq_msg(dev); 1273 } 1274 #else 1275 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) { 1276 PMD_DRV_LOG(INFO, "OICR: link state change event"); 1277 ret = ice_link_update(dev, 0); 1278 if (!ret) 1279 rte_eth_dev_callback_process 1280 (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1281 } 1282 #endif 1283 1284 if (oicr & PFINT_OICR_MAL_DETECT_M) { 1285 PMD_DRV_LOG(WARNING, "OICR: MDD event"); 1286 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM); 1287 if (reg & GL_MDET_TX_PQM_VALID_M) { 1288 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1289 GL_MDET_TX_PQM_PF_NUM_S; 1290 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1291 GL_MDET_TX_PQM_MAL_TYPE_S; 1292 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> 1293 GL_MDET_TX_PQM_QNUM_S; 1294 1295 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event " 1296 "%d by PQM on TX queue %d PF# %d", 1297 event, queue, pf_num); 1298 } 1299 1300 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN); 1301 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1302 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1303 GL_MDET_TX_TCLAN_PF_NUM_S; 1304 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1305 GL_MDET_TX_TCLAN_MAL_TYPE_S; 1306 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1307 GL_MDET_TX_TCLAN_QNUM_S; 1308 1309 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event " 1310 "%d by TCLAN on TX queue %d PF# %d", 1311 event, queue, pf_num); 1312 } 1313 } 1314 done: 1315 /* Enable interrupt */ 1316 ice_pf_enable_irq0(hw); 1317 rte_intr_ack(dev->intr_handle); 1318 } 1319 1320 static void 1321 ice_init_proto_xtr(struct rte_eth_dev *dev) 1322 { 1323 struct ice_adapter *ad = 1324 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1325 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1326 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1327 const struct proto_xtr_ol_flag *ol_flag; 1328 bool proto_xtr_enable = false; 1329 int offset; 1330 uint16_t i; 1331 1332 pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0); 1333 if (unlikely(pf->proto_xtr == NULL)) { 1334 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table"); 1335 return; 1336 } 1337 1338 for (i = 0; i < pf->lan_nb_qps; i++) { 1339 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ? 1340 ad->devargs.proto_xtr[i] : 1341 ad->devargs.proto_xtr_dflt; 1342 1343 if (pf->proto_xtr[i] != PROTO_XTR_NONE) { 1344 uint8_t type = pf->proto_xtr[i]; 1345 1346 ice_proto_xtr_ol_flag_params[type].required = true; 1347 proto_xtr_enable = true; 1348 } 1349 } 1350 1351 if (likely(!proto_xtr_enable)) 1352 return; 1353 1354 ice_check_proto_xtr_support(hw); 1355 1356 offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param); 1357 if (unlikely(offset == -1)) { 1358 PMD_DRV_LOG(ERR, 1359 "Protocol extraction metadata is disabled in mbuf with error %d", 1360 -rte_errno); 1361 return; 1362 } 1363 1364 PMD_DRV_LOG(DEBUG, 1365 "Protocol extraction metadata offset in mbuf is : %d", 1366 offset); 1367 rte_net_ice_dynfield_proto_xtr_metadata_offs = offset; 1368 1369 for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) { 1370 ol_flag = &ice_proto_xtr_ol_flag_params[i]; 1371 1372 if (!ol_flag->required) 1373 continue; 1374 1375 if (!ice_proto_xtr_hw_support[i]) { 1376 PMD_DRV_LOG(ERR, 1377 "Protocol extraction type %u is not supported in hardware", 1378 i); 1379 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1; 1380 break; 1381 } 1382 1383 offset = rte_mbuf_dynflag_register(&ol_flag->param); 1384 if (unlikely(offset == -1)) { 1385 PMD_DRV_LOG(ERR, 1386 "Protocol extraction offload '%s' failed to register with error %d", 1387 ol_flag->param.name, -rte_errno); 1388 1389 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1; 1390 break; 1391 } 1392 1393 PMD_DRV_LOG(DEBUG, 1394 "Protocol extraction offload '%s' offset in mbuf is : %d", 1395 ol_flag->param.name, offset); 1396 *ol_flag->ol_flag = 1ULL << offset; 1397 } 1398 } 1399 1400 /* Initialize SW parameters of PF */ 1401 static int 1402 ice_pf_sw_init(struct rte_eth_dev *dev) 1403 { 1404 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1405 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1406 1407 pf->lan_nb_qp_max = 1408 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq, 1409 hw->func_caps.common_cap.num_rxq); 1410 1411 pf->lan_nb_qps = pf->lan_nb_qp_max; 1412 1413 ice_init_proto_xtr(dev); 1414 1415 if (hw->func_caps.fd_fltr_guar > 0 || 1416 hw->func_caps.fd_fltr_best_effort > 0) { 1417 pf->flags |= ICE_FLAG_FDIR; 1418 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR; 1419 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps; 1420 } else { 1421 pf->fdir_nb_qps = 0; 1422 } 1423 pf->fdir_qp_offset = 0; 1424 1425 return 0; 1426 } 1427 1428 struct ice_vsi * 1429 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) 1430 { 1431 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1432 struct ice_vsi *vsi = NULL; 1433 struct ice_vsi_ctx vsi_ctx; 1434 int ret; 1435 struct rte_ether_addr broadcast = { 1436 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }; 1437 struct rte_ether_addr mac_addr; 1438 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 1439 uint8_t tc_bitmap = 0x1; 1440 uint16_t cfg; 1441 1442 /* hw->num_lports = 1 in NIC mode */ 1443 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0); 1444 if (!vsi) 1445 return NULL; 1446 1447 vsi->idx = pf->next_vsi_idx; 1448 pf->next_vsi_idx++; 1449 vsi->type = type; 1450 vsi->adapter = ICE_PF_TO_ADAPTER(pf); 1451 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX; 1452 vsi->vlan_anti_spoof_on = 0; 1453 vsi->vlan_filter_on = 1; 1454 TAILQ_INIT(&vsi->mac_list); 1455 TAILQ_INIT(&vsi->vlan_list); 1456 1457 /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */ 1458 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size > 1459 ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 : 1460 hw->func_caps.common_cap.rss_table_size; 1461 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE; 1462 1463 memset(&vsi_ctx, 0, sizeof(vsi_ctx)); 1464 switch (type) { 1465 case ICE_VSI_PF: 1466 vsi->nb_qps = pf->lan_nb_qps; 1467 vsi->base_queue = 1; 1468 ice_vsi_config_default_rss(&vsi_ctx.info); 1469 vsi_ctx.alloc_from_pool = true; 1470 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; 1471 /* switch_id is queried by get_switch_config aq, which is done 1472 * by ice_init_hw 1473 */ 1474 vsi_ctx.info.sw_id = hw->port_info->sw_id; 1475 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 1476 /* Allow all untagged or tagged packets */ 1477 vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; 1478 vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 1479 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF | 1480 ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1481 if (ice_is_dvm_ena(hw)) { 1482 vsi_ctx.info.outer_vlan_flags = 1483 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << 1484 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & 1485 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M; 1486 vsi_ctx.info.outer_vlan_flags |= 1487 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 << 1488 ICE_AQ_VSI_OUTER_TAG_TYPE_S) & 1489 ICE_AQ_VSI_OUTER_TAG_TYPE_M; 1490 } 1491 1492 /* FDIR */ 1493 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID | 1494 ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 1495 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); 1496 cfg = ICE_AQ_VSI_FD_ENABLE; 1497 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); 1498 vsi_ctx.info.max_fd_fltr_dedicated = 1499 rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar); 1500 vsi_ctx.info.max_fd_fltr_shared = 1501 rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort); 1502 1503 /* Enable VLAN/UP trip */ 1504 ret = ice_vsi_config_tc_queue_mapping(vsi, 1505 &vsi_ctx.info, 1506 ICE_DEFAULT_TCMAP); 1507 if (ret) { 1508 PMD_INIT_LOG(ERR, 1509 "tc queue mapping with vsi failed, " 1510 "err = %d", 1511 ret); 1512 goto fail_mem; 1513 } 1514 1515 break; 1516 case ICE_VSI_CTRL: 1517 vsi->nb_qps = pf->fdir_nb_qps; 1518 vsi->base_queue = ICE_FDIR_QUEUE_ID; 1519 vsi_ctx.alloc_from_pool = true; 1520 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; 1521 1522 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 1523 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); 1524 cfg = ICE_AQ_VSI_FD_PROG_ENABLE; 1525 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); 1526 vsi_ctx.info.sw_id = hw->port_info->sw_id; 1527 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 1528 ret = ice_vsi_config_tc_queue_mapping(vsi, 1529 &vsi_ctx.info, 1530 ICE_DEFAULT_TCMAP); 1531 if (ret) { 1532 PMD_INIT_LOG(ERR, 1533 "tc queue mapping with vsi failed, " 1534 "err = %d", 1535 ret); 1536 goto fail_mem; 1537 } 1538 break; 1539 default: 1540 /* for other types of VSI */ 1541 PMD_INIT_LOG(ERR, "other types of VSI not supported"); 1542 goto fail_mem; 1543 } 1544 1545 /* VF has MSIX interrupt in VF range, don't allocate here */ 1546 if (type == ICE_VSI_PF) { 1547 ret = ice_res_pool_alloc(&pf->msix_pool, 1548 RTE_MIN(vsi->nb_qps, 1549 RTE_MAX_RXTX_INTR_VEC_ID)); 1550 if (ret < 0) { 1551 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d", 1552 vsi->vsi_id, ret); 1553 } 1554 vsi->msix_intr = ret; 1555 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); 1556 } else if (type == ICE_VSI_CTRL) { 1557 ret = ice_res_pool_alloc(&pf->msix_pool, 1); 1558 if (ret < 0) { 1559 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", 1560 vsi->vsi_id, ret); 1561 } 1562 vsi->msix_intr = ret; 1563 vsi->nb_msix = 1; 1564 } else { 1565 vsi->msix_intr = 0; 1566 vsi->nb_msix = 0; 1567 } 1568 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL); 1569 if (ret != ICE_SUCCESS) { 1570 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret); 1571 goto fail_mem; 1572 } 1573 /* store vsi information is SW structure */ 1574 vsi->vsi_id = vsi_ctx.vsi_num; 1575 vsi->info = vsi_ctx.info; 1576 pf->vsis_allocated = vsi_ctx.vsis_allocd; 1577 pf->vsis_unallocated = vsi_ctx.vsis_unallocated; 1578 1579 if (type == ICE_VSI_PF) { 1580 /* MAC configuration */ 1581 rte_ether_addr_copy((struct rte_ether_addr *) 1582 hw->port_info->mac.perm_addr, 1583 &pf->dev_addr); 1584 1585 rte_ether_addr_copy(&pf->dev_addr, &mac_addr); 1586 ret = ice_add_mac_filter(vsi, &mac_addr); 1587 if (ret != ICE_SUCCESS) 1588 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter"); 1589 1590 rte_ether_addr_copy(&broadcast, &mac_addr); 1591 ret = ice_add_mac_filter(vsi, &mac_addr); 1592 if (ret != ICE_SUCCESS) 1593 PMD_INIT_LOG(ERR, "Failed to add MAC filter"); 1594 } 1595 1596 /* At the beginning, only TC0. */ 1597 /* What we need here is the maximam number of the TX queues. 1598 * Currently vsi->nb_qps means it. 1599 * Correct it if any change. 1600 */ 1601 max_txqs[0] = vsi->nb_qps; 1602 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx, 1603 tc_bitmap, max_txqs); 1604 if (ret != ICE_SUCCESS) 1605 PMD_INIT_LOG(ERR, "Failed to config vsi sched"); 1606 1607 return vsi; 1608 fail_mem: 1609 rte_free(vsi); 1610 pf->next_vsi_idx--; 1611 return NULL; 1612 } 1613 1614 static int 1615 ice_send_driver_ver(struct ice_hw *hw) 1616 { 1617 struct ice_driver_ver dv; 1618 1619 /* we don't have driver version use 0 for dummy */ 1620 dv.major_ver = 0; 1621 dv.minor_ver = 0; 1622 dv.build_ver = 0; 1623 dv.subbuild_ver = 0; 1624 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string)); 1625 1626 return ice_aq_send_driver_ver(hw, &dv, NULL); 1627 } 1628 1629 static int 1630 ice_pf_setup(struct ice_pf *pf) 1631 { 1632 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1633 struct ice_vsi *vsi; 1634 uint16_t unused; 1635 1636 /* Clear all stats counters */ 1637 pf->offset_loaded = false; 1638 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats)); 1639 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats)); 1640 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats)); 1641 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats)); 1642 1643 /* force guaranteed filter pool for PF */ 1644 ice_alloc_fd_guar_item(hw, &unused, 1645 hw->func_caps.fd_fltr_guar); 1646 /* force shared filter pool for PF */ 1647 ice_alloc_fd_shrd_item(hw, &unused, 1648 hw->func_caps.fd_fltr_best_effort); 1649 1650 vsi = ice_setup_vsi(pf, ICE_VSI_PF); 1651 if (!vsi) { 1652 PMD_INIT_LOG(ERR, "Failed to add vsi for PF"); 1653 return -EINVAL; 1654 } 1655 1656 pf->main_vsi = vsi; 1657 1658 return 0; 1659 } 1660 1661 static enum ice_pkg_type 1662 ice_load_pkg_type(struct ice_hw *hw) 1663 { 1664 enum ice_pkg_type package_type; 1665 1666 /* store the activated package type (OS default or Comms) */ 1667 if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME, 1668 ICE_PKG_NAME_SIZE)) 1669 package_type = ICE_PKG_TYPE_OS_DEFAULT; 1670 else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME, 1671 ICE_PKG_NAME_SIZE)) 1672 package_type = ICE_PKG_TYPE_COMMS; 1673 else 1674 package_type = ICE_PKG_TYPE_UNKNOWN; 1675 1676 PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)", 1677 hw->active_pkg_ver.major, hw->active_pkg_ver.minor, 1678 hw->active_pkg_ver.update, hw->active_pkg_ver.draft, 1679 hw->active_pkg_name, 1680 ice_is_dvm_ena(hw) ? "double" : "single"); 1681 1682 return package_type; 1683 } 1684 1685 int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn) 1686 { 1687 struct ice_hw *hw = &adapter->hw; 1688 char pkg_file[ICE_MAX_PKG_FILENAME_SIZE]; 1689 char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE]; 1690 void *buf; 1691 size_t bufsz; 1692 int err; 1693 1694 if (!use_dsn) 1695 goto no_dsn; 1696 1697 memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE); 1698 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE, 1699 "ice-%016" PRIx64 ".pkg", dsn); 1700 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES, 1701 ICE_MAX_PKG_FILENAME_SIZE); 1702 strcat(pkg_file, opt_ddp_filename); 1703 if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0) 1704 goto load_fw; 1705 1706 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT, 1707 ICE_MAX_PKG_FILENAME_SIZE); 1708 strcat(pkg_file, opt_ddp_filename); 1709 if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0) 1710 goto load_fw; 1711 1712 no_dsn: 1713 strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE); 1714 if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0) 1715 goto load_fw; 1716 1717 strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE); 1718 if (rte_firmware_read(pkg_file, &buf, &bufsz) < 0) { 1719 PMD_INIT_LOG(ERR, "failed to search file path\n"); 1720 return -1; 1721 } 1722 1723 load_fw: 1724 PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_file); 1725 1726 err = ice_copy_and_init_pkg(hw, buf, bufsz); 1727 if (err) { 1728 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err); 1729 goto out; 1730 } 1731 1732 /* store the loaded pkg type info */ 1733 adapter->active_pkg_type = ice_load_pkg_type(hw); 1734 1735 out: 1736 free(buf); 1737 return err; 1738 } 1739 1740 static void 1741 ice_base_queue_get(struct ice_pf *pf) 1742 { 1743 uint32_t reg; 1744 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1745 1746 reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC); 1747 if (reg & PFLAN_RX_QALLOC_VALID_M) { 1748 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M; 1749 } else { 1750 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue" 1751 " index"); 1752 } 1753 } 1754 1755 static int 1756 parse_bool(const char *key, const char *value, void *args) 1757 { 1758 int *i = (int *)args; 1759 char *end; 1760 int num; 1761 1762 num = strtoul(value, &end, 10); 1763 1764 if (num != 0 && num != 1) { 1765 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", " 1766 "value must be 0 or 1", 1767 value, key); 1768 return -1; 1769 } 1770 1771 *i = num; 1772 return 0; 1773 } 1774 1775 static int 1776 parse_u64(const char *key, const char *value, void *args) 1777 { 1778 u64 *num = (u64 *)args; 1779 u64 tmp; 1780 1781 errno = 0; 1782 tmp = strtoull(value, NULL, 16); 1783 if (errno) { 1784 PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u64", 1785 key, value); 1786 return -1; 1787 } 1788 1789 *num = tmp; 1790 1791 return 0; 1792 } 1793 1794 static int 1795 lookup_pps_type(const char *pps_name) 1796 { 1797 static struct { 1798 const char *name; 1799 enum pps_type type; 1800 } pps_type_map[] = { 1801 { "pin", PPS_PIN }, 1802 }; 1803 1804 uint32_t i; 1805 1806 for (i = 0; i < RTE_DIM(pps_type_map); i++) { 1807 if (strcmp(pps_name, pps_type_map[i].name) == 0) 1808 return pps_type_map[i].type; 1809 } 1810 1811 return -1; 1812 } 1813 1814 static int 1815 parse_pin_set(const char *input, int pps_type, struct ice_devargs *devargs) 1816 { 1817 const char *str = input; 1818 char *end = NULL; 1819 uint32_t idx; 1820 1821 while (isblank(*str)) 1822 str++; 1823 1824 if (!isdigit(*str)) 1825 return -1; 1826 1827 if (pps_type == PPS_PIN) { 1828 idx = strtoul(str, &end, 10); 1829 if (end == NULL || idx >= ICE_MAX_PIN_NUM) 1830 return -1; 1831 1832 devargs->pin_idx = idx; 1833 devargs->pps_out_ena = 1; 1834 } 1835 1836 while (isblank(*end)) 1837 end++; 1838 1839 if (*end != ']') 1840 return -1; 1841 1842 return 0; 1843 } 1844 1845 static int 1846 parse_pps_out_parameter(const char *pins, struct ice_devargs *devargs) 1847 { 1848 const char *pin_start; 1849 uint32_t idx; 1850 int pps_type; 1851 char pps_name[32]; 1852 1853 while (isblank(*pins)) 1854 pins++; 1855 1856 pins++; 1857 while (isblank(*pins)) 1858 pins++; 1859 if (*pins == '\0') 1860 return -1; 1861 1862 for (idx = 0; ; idx++) { 1863 if (isblank(pins[idx]) || 1864 pins[idx] == ':' || 1865 pins[idx] == '\0') 1866 break; 1867 1868 pps_name[idx] = pins[idx]; 1869 } 1870 pps_name[idx] = '\0'; 1871 pps_type = lookup_pps_type(pps_name); 1872 if (pps_type < 0) 1873 return -1; 1874 1875 pins += idx; 1876 1877 pins += strcspn(pins, ":"); 1878 if (*pins++ != ':') 1879 return -1; 1880 while (isblank(*pins)) 1881 pins++; 1882 1883 pin_start = pins; 1884 1885 while (isblank(*pins)) 1886 pins++; 1887 1888 if (parse_pin_set(pin_start, pps_type, devargs) < 0) 1889 return -1; 1890 1891 return 0; 1892 } 1893 1894 static int 1895 handle_pps_out_arg(__rte_unused const char *key, const char *value, 1896 void *extra_args) 1897 { 1898 struct ice_devargs *devargs = extra_args; 1899 1900 if (value == NULL || extra_args == NULL) 1901 return -EINVAL; 1902 1903 if (parse_pps_out_parameter(value, devargs) < 0) { 1904 PMD_DRV_LOG(ERR, 1905 "The GPIO pin parameter is wrong : '%s'", 1906 value); 1907 return -1; 1908 } 1909 1910 return 0; 1911 } 1912 1913 static int ice_parse_devargs(struct rte_eth_dev *dev) 1914 { 1915 struct ice_adapter *ad = 1916 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1917 struct rte_devargs *devargs = dev->device->devargs; 1918 struct rte_kvargs *kvlist; 1919 int ret; 1920 1921 if (devargs == NULL) 1922 return 0; 1923 1924 kvlist = rte_kvargs_parse(devargs->args, ice_valid_args); 1925 if (kvlist == NULL) { 1926 PMD_INIT_LOG(ERR, "Invalid kvargs key\n"); 1927 return -EINVAL; 1928 } 1929 1930 ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE; 1931 memset(ad->devargs.proto_xtr, PROTO_XTR_NONE, 1932 sizeof(ad->devargs.proto_xtr)); 1933 1934 ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG, 1935 &handle_proto_xtr_arg, &ad->devargs); 1936 if (ret) 1937 goto bail; 1938 1939 ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG, 1940 &parse_bool, &ad->devargs.safe_mode_support); 1941 if (ret) 1942 goto bail; 1943 1944 ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG, 1945 &parse_bool, &ad->devargs.pipe_mode_support); 1946 if (ret) 1947 goto bail; 1948 1949 ret = rte_kvargs_process(kvlist, ICE_HW_DEBUG_MASK_ARG, 1950 &parse_u64, &ad->hw.debug_mask); 1951 if (ret) 1952 goto bail; 1953 1954 ret = rte_kvargs_process(kvlist, ICE_ONE_PPS_OUT_ARG, 1955 &handle_pps_out_arg, &ad->devargs); 1956 if (ret) 1957 goto bail; 1958 1959 bail: 1960 rte_kvargs_free(kvlist); 1961 return ret; 1962 } 1963 1964 /* Forward LLDP packets to default VSI by set switch rules */ 1965 static int 1966 ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on) 1967 { 1968 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 1969 struct ice_fltr_list_entry *s_list_itr = NULL; 1970 struct LIST_HEAD_TYPE list_head; 1971 int ret = 0; 1972 1973 INIT_LIST_HEAD(&list_head); 1974 1975 s_list_itr = (struct ice_fltr_list_entry *) 1976 ice_malloc(hw, sizeof(*s_list_itr)); 1977 if (!s_list_itr) 1978 return -ENOMEM; 1979 s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 1980 s_list_itr->fltr_info.vsi_handle = vsi->idx; 1981 s_list_itr->fltr_info.l_data.ethertype_mac.ethertype = 1982 RTE_ETHER_TYPE_LLDP; 1983 s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1984 s_list_itr->fltr_info.flag = ICE_FLTR_RX; 1985 s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT; 1986 LIST_ADD(&s_list_itr->list_entry, &list_head); 1987 if (on) 1988 ret = ice_add_eth_mac(hw, &list_head); 1989 else 1990 ret = ice_remove_eth_mac(hw, &list_head); 1991 1992 rte_free(s_list_itr); 1993 return ret; 1994 } 1995 1996 static enum ice_status 1997 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type, 1998 uint16_t num, uint16_t desc_id, 1999 uint16_t *prof_buf, uint16_t *num_prof) 2000 { 2001 struct ice_aqc_res_elem *resp_buf; 2002 int ret; 2003 uint16_t buf_len; 2004 bool res_shared = 1; 2005 struct ice_aq_desc aq_desc; 2006 struct ice_sq_cd *cd = NULL; 2007 struct ice_aqc_get_allocd_res_desc *cmd = 2008 &aq_desc.params.get_res_desc; 2009 2010 buf_len = sizeof(*resp_buf) * num; 2011 resp_buf = ice_malloc(hw, buf_len); 2012 if (!resp_buf) 2013 return -ENOMEM; 2014 2015 ice_fill_dflt_direct_cmd_desc(&aq_desc, 2016 ice_aqc_opc_get_allocd_res_desc); 2017 2018 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) & 2019 ICE_AQC_RES_TYPE_M) | (res_shared ? 2020 ICE_AQC_RES_TYPE_FLAG_SHARED : 0)); 2021 cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id); 2022 2023 ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd); 2024 if (!ret) 2025 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc); 2026 else 2027 goto exit; 2028 2029 ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) * 2030 (*num_prof), ICE_NONDMA_TO_NONDMA); 2031 2032 exit: 2033 rte_free(resp_buf); 2034 return ret; 2035 } 2036 static int 2037 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type) 2038 { 2039 int ret; 2040 uint16_t prof_id; 2041 uint16_t prof_buf[ICE_MAX_RES_DESC_NUM]; 2042 uint16_t first_desc = 1; 2043 uint16_t num_prof = 0; 2044 2045 ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM, 2046 first_desc, prof_buf, &num_prof); 2047 if (ret) { 2048 PMD_INIT_LOG(ERR, "Failed to get fxp resource"); 2049 return ret; 2050 } 2051 2052 for (prof_id = 0; prof_id < num_prof; prof_id++) { 2053 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]); 2054 if (ret) { 2055 PMD_INIT_LOG(ERR, "Failed to free fxp resource"); 2056 return ret; 2057 } 2058 } 2059 return 0; 2060 } 2061 2062 static int 2063 ice_reset_fxp_resource(struct ice_hw *hw) 2064 { 2065 int ret; 2066 2067 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID); 2068 if (ret) { 2069 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource"); 2070 return ret; 2071 } 2072 2073 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID); 2074 if (ret) { 2075 PMD_INIT_LOG(ERR, "Failed to clearup rss resource"); 2076 return ret; 2077 } 2078 2079 return 0; 2080 } 2081 2082 static void 2083 ice_rss_ctx_init(struct ice_pf *pf) 2084 { 2085 memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx)); 2086 } 2087 2088 static uint64_t 2089 ice_get_supported_rxdid(struct ice_hw *hw) 2090 { 2091 uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */ 2092 uint32_t regval; 2093 int i; 2094 2095 supported_rxdid |= BIT(ICE_RXDID_LEGACY_1); 2096 2097 for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { 2098 regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0)); 2099 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) 2100 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) 2101 supported_rxdid |= BIT(i); 2102 } 2103 return supported_rxdid; 2104 } 2105 2106 static int 2107 ice_dev_init(struct rte_eth_dev *dev) 2108 { 2109 struct rte_pci_device *pci_dev; 2110 struct rte_intr_handle *intr_handle; 2111 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2112 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2113 struct ice_adapter *ad = 2114 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2115 struct ice_vsi *vsi; 2116 int ret; 2117 #ifndef RTE_EXEC_ENV_WINDOWS 2118 off_t pos; 2119 uint32_t dsn_low, dsn_high; 2120 uint64_t dsn; 2121 bool use_dsn; 2122 #endif 2123 2124 dev->dev_ops = &ice_eth_dev_ops; 2125 dev->rx_queue_count = ice_rx_queue_count; 2126 dev->rx_descriptor_status = ice_rx_descriptor_status; 2127 dev->tx_descriptor_status = ice_tx_descriptor_status; 2128 dev->rx_pkt_burst = ice_recv_pkts; 2129 dev->tx_pkt_burst = ice_xmit_pkts; 2130 dev->tx_pkt_prepare = ice_prep_pkts; 2131 2132 /* for secondary processes, we don't initialise any further as primary 2133 * has already done this work. 2134 */ 2135 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2136 ice_set_rx_function(dev); 2137 ice_set_tx_function(dev); 2138 return 0; 2139 } 2140 2141 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2142 2143 ice_set_default_ptype_table(dev); 2144 pci_dev = RTE_DEV_TO_PCI(dev->device); 2145 intr_handle = &pci_dev->intr_handle; 2146 2147 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2148 pf->dev_data = dev->data; 2149 hw->back = pf->adapter; 2150 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr; 2151 hw->vendor_id = pci_dev->id.vendor_id; 2152 hw->device_id = pci_dev->id.device_id; 2153 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 2154 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 2155 hw->bus.device = pci_dev->addr.devid; 2156 hw->bus.func = pci_dev->addr.function; 2157 2158 ret = ice_parse_devargs(dev); 2159 if (ret) { 2160 PMD_INIT_LOG(ERR, "Failed to parse devargs"); 2161 return -EINVAL; 2162 } 2163 2164 ice_init_controlq_parameter(hw); 2165 2166 ret = ice_init_hw(hw); 2167 if (ret) { 2168 PMD_INIT_LOG(ERR, "Failed to initialize HW"); 2169 return -EINVAL; 2170 } 2171 2172 #ifndef RTE_EXEC_ENV_WINDOWS 2173 use_dsn = false; 2174 dsn = 0; 2175 pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN); 2176 if (pos) { 2177 if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0 || 2178 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) { 2179 PMD_INIT_LOG(ERR, "Failed to read pci config space\n"); 2180 } else { 2181 use_dsn = true; 2182 dsn = (uint64_t)dsn_high << 32 | dsn_low; 2183 } 2184 } else { 2185 PMD_INIT_LOG(ERR, "Failed to read device serial number\n"); 2186 } 2187 2188 ret = ice_load_pkg(pf->adapter, use_dsn, dsn); 2189 if (ret == 0) { 2190 ret = ice_init_hw_tbls(hw); 2191 if (ret) { 2192 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", ret); 2193 rte_free(hw->pkg_copy); 2194 } 2195 } 2196 2197 if (ret) { 2198 if (ad->devargs.safe_mode_support == 0) { 2199 PMD_INIT_LOG(ERR, "Failed to load the DDP package," 2200 "Use safe-mode-support=1 to enter Safe Mode"); 2201 goto err_init_fw; 2202 } 2203 2204 PMD_INIT_LOG(WARNING, "Failed to load the DDP package," 2205 "Entering Safe Mode"); 2206 ad->is_safe_mode = 1; 2207 } 2208 #endif 2209 2210 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d", 2211 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, 2212 hw->api_maj_ver, hw->api_min_ver); 2213 2214 ice_pf_sw_init(dev); 2215 ret = ice_init_mac_address(dev); 2216 if (ret) { 2217 PMD_INIT_LOG(ERR, "Failed to initialize mac address"); 2218 goto err_init_mac; 2219 } 2220 2221 ret = ice_res_pool_init(&pf->msix_pool, 1, 2222 hw->func_caps.common_cap.num_msix_vectors - 1); 2223 if (ret) { 2224 PMD_INIT_LOG(ERR, "Failed to init MSIX pool"); 2225 goto err_msix_pool_init; 2226 } 2227 2228 ret = ice_pf_setup(pf); 2229 if (ret) { 2230 PMD_INIT_LOG(ERR, "Failed to setup PF"); 2231 goto err_pf_setup; 2232 } 2233 2234 ret = ice_send_driver_ver(hw); 2235 if (ret) { 2236 PMD_INIT_LOG(ERR, "Failed to send driver version"); 2237 goto err_pf_setup; 2238 } 2239 2240 vsi = pf->main_vsi; 2241 2242 ret = ice_aq_stop_lldp(hw, true, false, NULL); 2243 if (ret != ICE_SUCCESS) 2244 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n"); 2245 ret = ice_init_dcb(hw, true); 2246 if (ret != ICE_SUCCESS) 2247 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n"); 2248 /* Forward LLDP packets to default VSI */ 2249 ret = ice_vsi_config_sw_lldp(vsi, true); 2250 if (ret != ICE_SUCCESS) 2251 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n"); 2252 /* register callback func to eal lib */ 2253 rte_intr_callback_register(intr_handle, 2254 ice_interrupt_handler, dev); 2255 2256 ice_pf_enable_irq0(hw); 2257 2258 /* enable uio intr after callback register */ 2259 rte_intr_enable(intr_handle); 2260 2261 /* get base queue pairs index in the device */ 2262 ice_base_queue_get(pf); 2263 2264 /* Initialize RSS context for gtpu_eh */ 2265 ice_rss_ctx_init(pf); 2266 2267 if (!ad->is_safe_mode) { 2268 ret = ice_flow_init(ad); 2269 if (ret) { 2270 PMD_INIT_LOG(ERR, "Failed to initialize flow"); 2271 goto err_flow_init; 2272 } 2273 } 2274 2275 ret = ice_reset_fxp_resource(hw); 2276 if (ret) { 2277 PMD_INIT_LOG(ERR, "Failed to reset fxp resource"); 2278 goto err_flow_init; 2279 } 2280 2281 pf->supported_rxdid = ice_get_supported_rxdid(hw); 2282 2283 return 0; 2284 2285 err_flow_init: 2286 ice_flow_uninit(ad); 2287 rte_intr_disable(intr_handle); 2288 ice_pf_disable_irq0(hw); 2289 rte_intr_callback_unregister(intr_handle, 2290 ice_interrupt_handler, dev); 2291 err_pf_setup: 2292 ice_res_pool_destroy(&pf->msix_pool); 2293 err_msix_pool_init: 2294 rte_free(dev->data->mac_addrs); 2295 dev->data->mac_addrs = NULL; 2296 err_init_mac: 2297 rte_free(pf->proto_xtr); 2298 #ifndef RTE_EXEC_ENV_WINDOWS 2299 err_init_fw: 2300 #endif 2301 ice_deinit_hw(hw); 2302 2303 return ret; 2304 } 2305 2306 int 2307 ice_release_vsi(struct ice_vsi *vsi) 2308 { 2309 struct ice_hw *hw; 2310 struct ice_vsi_ctx vsi_ctx; 2311 enum ice_status ret; 2312 int error = 0; 2313 2314 if (!vsi) 2315 return error; 2316 2317 hw = ICE_VSI_TO_HW(vsi); 2318 2319 ice_remove_all_mac_vlan_filters(vsi); 2320 2321 memset(&vsi_ctx, 0, sizeof(vsi_ctx)); 2322 2323 vsi_ctx.vsi_num = vsi->vsi_id; 2324 vsi_ctx.info = vsi->info; 2325 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL); 2326 if (ret != ICE_SUCCESS) { 2327 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id); 2328 error = -1; 2329 } 2330 2331 rte_free(vsi->rss_lut); 2332 rte_free(vsi->rss_key); 2333 rte_free(vsi); 2334 return error; 2335 } 2336 2337 void 2338 ice_vsi_disable_queues_intr(struct ice_vsi *vsi) 2339 { 2340 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id]; 2341 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 2342 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2343 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 2344 uint16_t msix_intr, i; 2345 2346 /* disable interrupt and also clear all the exist config */ 2347 for (i = 0; i < vsi->nb_qps; i++) { 2348 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0); 2349 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0); 2350 rte_wmb(); 2351 } 2352 2353 if (rte_intr_allow_others(intr_handle)) 2354 /* vfio-pci */ 2355 for (i = 0; i < vsi->nb_msix; i++) { 2356 msix_intr = vsi->msix_intr + i; 2357 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), 2358 GLINT_DYN_CTL_WB_ON_ITR_M); 2359 } 2360 else 2361 /* igb_uio */ 2362 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M); 2363 } 2364 2365 static int 2366 ice_dev_stop(struct rte_eth_dev *dev) 2367 { 2368 struct rte_eth_dev_data *data = dev->data; 2369 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2370 struct ice_vsi *main_vsi = pf->main_vsi; 2371 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 2372 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2373 uint16_t i; 2374 2375 /* avoid stopping again */ 2376 if (pf->adapter_stopped) 2377 return 0; 2378 2379 /* stop and clear all Rx queues */ 2380 for (i = 0; i < data->nb_rx_queues; i++) 2381 ice_rx_queue_stop(dev, i); 2382 2383 /* stop and clear all Tx queues */ 2384 for (i = 0; i < data->nb_tx_queues; i++) 2385 ice_tx_queue_stop(dev, i); 2386 2387 /* disable all queue interrupts */ 2388 ice_vsi_disable_queues_intr(main_vsi); 2389 2390 if (pf->init_link_up) 2391 ice_dev_set_link_up(dev); 2392 else 2393 ice_dev_set_link_down(dev); 2394 2395 /* Clean datapath event and queue/vec mapping */ 2396 rte_intr_efd_disable(intr_handle); 2397 if (intr_handle->intr_vec) { 2398 rte_free(intr_handle->intr_vec); 2399 intr_handle->intr_vec = NULL; 2400 } 2401 2402 pf->adapter_stopped = true; 2403 dev->data->dev_started = 0; 2404 2405 return 0; 2406 } 2407 2408 static int 2409 ice_dev_close(struct rte_eth_dev *dev) 2410 { 2411 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2412 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2413 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2414 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2415 struct ice_adapter *ad = 2416 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2417 int ret; 2418 uint32_t val; 2419 uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned; 2420 uint32_t pin_idx = ad->devargs.pin_idx; 2421 2422 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2423 return 0; 2424 2425 /* Since stop will make link down, then the link event will be 2426 * triggered, disable the irq firstly to avoid the port_infoe etc 2427 * resources deallocation causing the interrupt service thread 2428 * crash. 2429 */ 2430 ice_pf_disable_irq0(hw); 2431 2432 ret = ice_dev_stop(dev); 2433 2434 if (!ad->is_safe_mode) 2435 ice_flow_uninit(ad); 2436 2437 /* release all queue resource */ 2438 ice_free_queues(dev); 2439 2440 ice_res_pool_destroy(&pf->msix_pool); 2441 ice_release_vsi(pf->main_vsi); 2442 ice_sched_cleanup_all(hw); 2443 ice_free_hw_tbls(hw); 2444 rte_free(hw->port_info); 2445 hw->port_info = NULL; 2446 ice_shutdown_all_ctrlq(hw); 2447 rte_free(pf->proto_xtr); 2448 pf->proto_xtr = NULL; 2449 2450 if (ad->devargs.pps_out_ena) { 2451 ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0); 2452 ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0); 2453 ICE_WRITE_REG(hw, GLTSYN_TGT_L(pin_idx, timer), 0); 2454 ICE_WRITE_REG(hw, GLTSYN_TGT_H(pin_idx, timer), 0); 2455 2456 val = GLGEN_GPIO_CTL_PIN_DIR_M; 2457 ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(pin_idx), val); 2458 } 2459 2460 /* disable uio intr before callback unregister */ 2461 rte_intr_disable(intr_handle); 2462 2463 /* unregister callback func from eal lib */ 2464 rte_intr_callback_unregister(intr_handle, 2465 ice_interrupt_handler, dev); 2466 2467 return ret; 2468 } 2469 2470 static int 2471 ice_dev_uninit(struct rte_eth_dev *dev) 2472 { 2473 ice_dev_close(dev); 2474 2475 return 0; 2476 } 2477 2478 static bool 2479 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg) 2480 { 2481 return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false; 2482 } 2483 2484 static void 2485 hash_cfg_reset(struct ice_rss_hash_cfg *cfg) 2486 { 2487 cfg->hash_flds = 0; 2488 cfg->addl_hdrs = 0; 2489 cfg->symm = 0; 2490 cfg->hdr_type = ICE_RSS_OUTER_HEADERS; 2491 } 2492 2493 static int 2494 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2495 { 2496 enum ice_status status = ICE_SUCCESS; 2497 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2498 struct ice_vsi *vsi = pf->main_vsi; 2499 2500 if (!is_hash_cfg_valid(cfg)) 2501 return -ENOENT; 2502 2503 status = ice_rem_rss_cfg(hw, vsi->idx, cfg); 2504 if (status && status != ICE_ERR_DOES_NOT_EXIST) { 2505 PMD_DRV_LOG(ERR, 2506 "ice_rem_rss_cfg failed for VSI:%d, error:%d\n", 2507 vsi->idx, status); 2508 return -EBUSY; 2509 } 2510 2511 return 0; 2512 } 2513 2514 static int 2515 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2516 { 2517 enum ice_status status = ICE_SUCCESS; 2518 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2519 struct ice_vsi *vsi = pf->main_vsi; 2520 2521 if (!is_hash_cfg_valid(cfg)) 2522 return -ENOENT; 2523 2524 status = ice_add_rss_cfg(hw, vsi->idx, cfg); 2525 if (status) { 2526 PMD_DRV_LOG(ERR, 2527 "ice_add_rss_cfg failed for VSI:%d, error:%d\n", 2528 vsi->idx, status); 2529 return -EBUSY; 2530 } 2531 2532 return 0; 2533 } 2534 2535 static int 2536 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2537 { 2538 int ret; 2539 2540 ret = ice_hash_moveout(pf, cfg); 2541 if (ret && (ret != -ENOENT)) 2542 return ret; 2543 2544 hash_cfg_reset(cfg); 2545 2546 return 0; 2547 } 2548 2549 static int 2550 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx, 2551 u8 ctx_idx) 2552 { 2553 int ret; 2554 2555 switch (ctx_idx) { 2556 case ICE_HASH_GTPU_CTX_EH_IP: 2557 ret = ice_hash_remove(pf, 2558 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2559 if (ret && (ret != -ENOENT)) 2560 return ret; 2561 2562 ret = ice_hash_remove(pf, 2563 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2564 if (ret && (ret != -ENOENT)) 2565 return ret; 2566 2567 ret = ice_hash_remove(pf, 2568 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2569 if (ret && (ret != -ENOENT)) 2570 return ret; 2571 2572 ret = ice_hash_remove(pf, 2573 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2574 if (ret && (ret != -ENOENT)) 2575 return ret; 2576 2577 ret = ice_hash_remove(pf, 2578 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2579 if (ret && (ret != -ENOENT)) 2580 return ret; 2581 2582 ret = ice_hash_remove(pf, 2583 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2584 if (ret && (ret != -ENOENT)) 2585 return ret; 2586 2587 ret = ice_hash_remove(pf, 2588 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2589 if (ret && (ret != -ENOENT)) 2590 return ret; 2591 2592 ret = ice_hash_remove(pf, 2593 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2594 if (ret && (ret != -ENOENT)) 2595 return ret; 2596 2597 break; 2598 case ICE_HASH_GTPU_CTX_EH_IP_UDP: 2599 ret = ice_hash_remove(pf, 2600 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2601 if (ret && (ret != -ENOENT)) 2602 return ret; 2603 2604 ret = ice_hash_remove(pf, 2605 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2606 if (ret && (ret != -ENOENT)) 2607 return ret; 2608 2609 ret = ice_hash_moveout(pf, 2610 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2611 if (ret && (ret != -ENOENT)) 2612 return ret; 2613 2614 ret = ice_hash_moveout(pf, 2615 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2616 if (ret && (ret != -ENOENT)) 2617 return ret; 2618 2619 ret = ice_hash_moveout(pf, 2620 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2621 if (ret && (ret != -ENOENT)) 2622 return ret; 2623 2624 ret = ice_hash_moveout(pf, 2625 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2626 if (ret && (ret != -ENOENT)) 2627 return ret; 2628 2629 break; 2630 case ICE_HASH_GTPU_CTX_EH_IP_TCP: 2631 ret = ice_hash_remove(pf, 2632 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2633 if (ret && (ret != -ENOENT)) 2634 return ret; 2635 2636 ret = ice_hash_remove(pf, 2637 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2638 if (ret && (ret != -ENOENT)) 2639 return ret; 2640 2641 ret = ice_hash_moveout(pf, 2642 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2643 if (ret && (ret != -ENOENT)) 2644 return ret; 2645 2646 ret = ice_hash_moveout(pf, 2647 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2648 if (ret && (ret != -ENOENT)) 2649 return ret; 2650 2651 ret = ice_hash_moveout(pf, 2652 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2653 if (ret && (ret != -ENOENT)) 2654 return ret; 2655 2656 ret = ice_hash_moveout(pf, 2657 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2658 if (ret && (ret != -ENOENT)) 2659 return ret; 2660 2661 break; 2662 case ICE_HASH_GTPU_CTX_UP_IP: 2663 ret = ice_hash_remove(pf, 2664 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2665 if (ret && (ret != -ENOENT)) 2666 return ret; 2667 2668 ret = ice_hash_remove(pf, 2669 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2670 if (ret && (ret != -ENOENT)) 2671 return ret; 2672 2673 ret = ice_hash_moveout(pf, 2674 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2675 if (ret && (ret != -ENOENT)) 2676 return ret; 2677 2678 ret = ice_hash_moveout(pf, 2679 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2680 if (ret && (ret != -ENOENT)) 2681 return ret; 2682 2683 ret = ice_hash_moveout(pf, 2684 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2685 if (ret && (ret != -ENOENT)) 2686 return ret; 2687 2688 break; 2689 case ICE_HASH_GTPU_CTX_UP_IP_UDP: 2690 case ICE_HASH_GTPU_CTX_UP_IP_TCP: 2691 ret = ice_hash_moveout(pf, 2692 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2693 if (ret && (ret != -ENOENT)) 2694 return ret; 2695 2696 ret = ice_hash_moveout(pf, 2697 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2698 if (ret && (ret != -ENOENT)) 2699 return ret; 2700 2701 ret = ice_hash_moveout(pf, 2702 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2703 if (ret && (ret != -ENOENT)) 2704 return ret; 2705 2706 break; 2707 case ICE_HASH_GTPU_CTX_DW_IP: 2708 ret = ice_hash_remove(pf, 2709 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2710 if (ret && (ret != -ENOENT)) 2711 return ret; 2712 2713 ret = ice_hash_remove(pf, 2714 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2715 if (ret && (ret != -ENOENT)) 2716 return ret; 2717 2718 ret = ice_hash_moveout(pf, 2719 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2720 if (ret && (ret != -ENOENT)) 2721 return ret; 2722 2723 ret = ice_hash_moveout(pf, 2724 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2725 if (ret && (ret != -ENOENT)) 2726 return ret; 2727 2728 ret = ice_hash_moveout(pf, 2729 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2730 if (ret && (ret != -ENOENT)) 2731 return ret; 2732 2733 break; 2734 case ICE_HASH_GTPU_CTX_DW_IP_UDP: 2735 case ICE_HASH_GTPU_CTX_DW_IP_TCP: 2736 ret = ice_hash_moveout(pf, 2737 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2738 if (ret && (ret != -ENOENT)) 2739 return ret; 2740 2741 ret = ice_hash_moveout(pf, 2742 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2743 if (ret && (ret != -ENOENT)) 2744 return ret; 2745 2746 ret = ice_hash_moveout(pf, 2747 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2748 if (ret && (ret != -ENOENT)) 2749 return ret; 2750 2751 break; 2752 default: 2753 break; 2754 } 2755 2756 return 0; 2757 } 2758 2759 static u8 calc_gtpu_ctx_idx(uint32_t hdr) 2760 { 2761 u8 eh_idx, ip_idx; 2762 2763 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) 2764 eh_idx = 0; 2765 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP) 2766 eh_idx = 1; 2767 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN) 2768 eh_idx = 2; 2769 else 2770 return ICE_HASH_GTPU_CTX_MAX; 2771 2772 ip_idx = 0; 2773 if (hdr & ICE_FLOW_SEG_HDR_UDP) 2774 ip_idx = 1; 2775 else if (hdr & ICE_FLOW_SEG_HDR_TCP) 2776 ip_idx = 2; 2777 2778 if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)) 2779 return eh_idx * 3 + ip_idx; 2780 else 2781 return ICE_HASH_GTPU_CTX_MAX; 2782 } 2783 2784 static int 2785 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr) 2786 { 2787 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr); 2788 2789 if (hdr & ICE_FLOW_SEG_HDR_IPV4) 2790 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4, 2791 gtpu_ctx_idx); 2792 else if (hdr & ICE_FLOW_SEG_HDR_IPV6) 2793 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6, 2794 gtpu_ctx_idx); 2795 2796 return 0; 2797 } 2798 2799 static int 2800 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx, 2801 u8 ctx_idx, struct ice_rss_hash_cfg *cfg) 2802 { 2803 int ret; 2804 2805 if (ctx_idx < ICE_HASH_GTPU_CTX_MAX) 2806 ctx->ctx[ctx_idx] = *cfg; 2807 2808 switch (ctx_idx) { 2809 case ICE_HASH_GTPU_CTX_EH_IP: 2810 break; 2811 case ICE_HASH_GTPU_CTX_EH_IP_UDP: 2812 ret = ice_hash_moveback(pf, 2813 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2814 if (ret && (ret != -ENOENT)) 2815 return ret; 2816 2817 ret = ice_hash_moveback(pf, 2818 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2819 if (ret && (ret != -ENOENT)) 2820 return ret; 2821 2822 ret = ice_hash_moveback(pf, 2823 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2824 if (ret && (ret != -ENOENT)) 2825 return ret; 2826 2827 ret = ice_hash_moveback(pf, 2828 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2829 if (ret && (ret != -ENOENT)) 2830 return ret; 2831 2832 break; 2833 case ICE_HASH_GTPU_CTX_EH_IP_TCP: 2834 ret = ice_hash_moveback(pf, 2835 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2836 if (ret && (ret != -ENOENT)) 2837 return ret; 2838 2839 ret = ice_hash_moveback(pf, 2840 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2841 if (ret && (ret != -ENOENT)) 2842 return ret; 2843 2844 ret = ice_hash_moveback(pf, 2845 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2846 if (ret && (ret != -ENOENT)) 2847 return ret; 2848 2849 ret = ice_hash_moveback(pf, 2850 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2851 if (ret && (ret != -ENOENT)) 2852 return ret; 2853 2854 break; 2855 case ICE_HASH_GTPU_CTX_UP_IP: 2856 case ICE_HASH_GTPU_CTX_UP_IP_UDP: 2857 case ICE_HASH_GTPU_CTX_UP_IP_TCP: 2858 case ICE_HASH_GTPU_CTX_DW_IP: 2859 case ICE_HASH_GTPU_CTX_DW_IP_UDP: 2860 case ICE_HASH_GTPU_CTX_DW_IP_TCP: 2861 ret = ice_hash_moveback(pf, 2862 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2863 if (ret && (ret != -ENOENT)) 2864 return ret; 2865 2866 ret = ice_hash_moveback(pf, 2867 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2868 if (ret && (ret != -ENOENT)) 2869 return ret; 2870 2871 ret = ice_hash_moveback(pf, 2872 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2873 if (ret && (ret != -ENOENT)) 2874 return ret; 2875 2876 break; 2877 default: 2878 break; 2879 } 2880 2881 return 0; 2882 } 2883 2884 static int 2885 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2886 { 2887 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs); 2888 2889 if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) 2890 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4, 2891 gtpu_ctx_idx, cfg); 2892 else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) 2893 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6, 2894 gtpu_ctx_idx, cfg); 2895 2896 return 0; 2897 } 2898 2899 static void 2900 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr) 2901 { 2902 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr); 2903 2904 if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX) 2905 return; 2906 2907 if (hdr & ICE_FLOW_SEG_HDR_IPV4) 2908 hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]); 2909 else if (hdr & ICE_FLOW_SEG_HDR_IPV6) 2910 hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]); 2911 } 2912 2913 int 2914 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, 2915 struct ice_rss_hash_cfg *cfg) 2916 { 2917 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2918 int ret; 2919 2920 ret = ice_rem_rss_cfg(hw, vsi_id, cfg); 2921 if (ret && ret != ICE_ERR_DOES_NOT_EXIST) 2922 PMD_DRV_LOG(ERR, "remove rss cfg failed\n"); 2923 2924 ice_rem_rss_cfg_post(pf, cfg->addl_hdrs); 2925 2926 return 0; 2927 } 2928 2929 int 2930 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, 2931 struct ice_rss_hash_cfg *cfg) 2932 { 2933 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2934 int ret; 2935 2936 ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs); 2937 if (ret) 2938 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n"); 2939 2940 ret = ice_add_rss_cfg(hw, vsi_id, cfg); 2941 if (ret) 2942 PMD_DRV_LOG(ERR, "add rss cfg failed\n"); 2943 2944 ret = ice_add_rss_cfg_post(pf, cfg); 2945 if (ret) 2946 PMD_DRV_LOG(ERR, "add rss cfg post failed\n"); 2947 2948 return 0; 2949 } 2950 2951 static void 2952 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf) 2953 { 2954 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2955 struct ice_vsi *vsi = pf->main_vsi; 2956 struct ice_rss_hash_cfg cfg; 2957 int ret; 2958 2959 #define ICE_RSS_HF_ALL ( \ 2960 ETH_RSS_IPV4 | \ 2961 ETH_RSS_IPV6 | \ 2962 ETH_RSS_NONFRAG_IPV4_UDP | \ 2963 ETH_RSS_NONFRAG_IPV6_UDP | \ 2964 ETH_RSS_NONFRAG_IPV4_TCP | \ 2965 ETH_RSS_NONFRAG_IPV6_TCP | \ 2966 ETH_RSS_NONFRAG_IPV4_SCTP | \ 2967 ETH_RSS_NONFRAG_IPV6_SCTP) 2968 2969 ret = ice_rem_vsi_rss_cfg(hw, vsi->idx); 2970 if (ret) 2971 PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d", 2972 __func__, ret); 2973 2974 cfg.symm = 0; 2975 cfg.hdr_type = ICE_RSS_OUTER_HEADERS; 2976 /* Configure RSS for IPv4 with src/dst addr as input set */ 2977 if (rss_hf & ETH_RSS_IPV4) { 2978 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 2979 cfg.hash_flds = ICE_FLOW_HASH_IPV4; 2980 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 2981 if (ret) 2982 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", 2983 __func__, ret); 2984 } 2985 2986 /* Configure RSS for IPv6 with src/dst addr as input set */ 2987 if (rss_hf & ETH_RSS_IPV6) { 2988 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 2989 cfg.hash_flds = ICE_FLOW_HASH_IPV6; 2990 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 2991 if (ret) 2992 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", 2993 __func__, ret); 2994 } 2995 2996 /* Configure RSS for udp4 with src/dst addr and port as input set */ 2997 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { 2998 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 | 2999 ICE_FLOW_SEG_HDR_IPV_OTHER; 3000 cfg.hash_flds = ICE_HASH_UDP_IPV4; 3001 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3002 if (ret) 3003 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", 3004 __func__, ret); 3005 } 3006 3007 /* Configure RSS for udp6 with src/dst addr and port as input set */ 3008 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) { 3009 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 | 3010 ICE_FLOW_SEG_HDR_IPV_OTHER; 3011 cfg.hash_flds = ICE_HASH_UDP_IPV6; 3012 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3013 if (ret) 3014 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", 3015 __func__, ret); 3016 } 3017 3018 /* Configure RSS for tcp4 with src/dst addr and port as input set */ 3019 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 3020 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 | 3021 ICE_FLOW_SEG_HDR_IPV_OTHER; 3022 cfg.hash_flds = ICE_HASH_TCP_IPV4; 3023 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3024 if (ret) 3025 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", 3026 __func__, ret); 3027 } 3028 3029 /* Configure RSS for tcp6 with src/dst addr and port as input set */ 3030 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { 3031 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 | 3032 ICE_FLOW_SEG_HDR_IPV_OTHER; 3033 cfg.hash_flds = ICE_HASH_TCP_IPV6; 3034 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3035 if (ret) 3036 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", 3037 __func__, ret); 3038 } 3039 3040 /* Configure RSS for sctp4 with src/dst addr and port as input set */ 3041 if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) { 3042 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 | 3043 ICE_FLOW_SEG_HDR_IPV_OTHER; 3044 cfg.hash_flds = ICE_HASH_SCTP_IPV4; 3045 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3046 if (ret) 3047 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d", 3048 __func__, ret); 3049 } 3050 3051 /* Configure RSS for sctp6 with src/dst addr and port as input set */ 3052 if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) { 3053 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 | 3054 ICE_FLOW_SEG_HDR_IPV_OTHER; 3055 cfg.hash_flds = ICE_HASH_SCTP_IPV6; 3056 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3057 if (ret) 3058 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d", 3059 __func__, ret); 3060 } 3061 3062 if (rss_hf & ETH_RSS_IPV4) { 3063 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 | 3064 ICE_FLOW_SEG_HDR_IPV_OTHER; 3065 cfg.hash_flds = ICE_FLOW_HASH_IPV4; 3066 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3067 if (ret) 3068 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d", 3069 __func__, ret); 3070 } 3071 3072 if (rss_hf & ETH_RSS_IPV6) { 3073 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 | 3074 ICE_FLOW_SEG_HDR_IPV_OTHER; 3075 cfg.hash_flds = ICE_FLOW_HASH_IPV6; 3076 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3077 if (ret) 3078 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d", 3079 __func__, ret); 3080 } 3081 3082 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { 3083 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP | 3084 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3085 cfg.hash_flds = ICE_HASH_UDP_IPV4; 3086 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3087 if (ret) 3088 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d", 3089 __func__, ret); 3090 } 3091 3092 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) { 3093 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP | 3094 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3095 cfg.hash_flds = ICE_HASH_UDP_IPV6; 3096 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3097 if (ret) 3098 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d", 3099 __func__, ret); 3100 } 3101 3102 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 3103 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP | 3104 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3105 cfg.hash_flds = ICE_HASH_TCP_IPV4; 3106 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3107 if (ret) 3108 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d", 3109 __func__, ret); 3110 } 3111 3112 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { 3113 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP | 3114 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3115 cfg.hash_flds = ICE_HASH_TCP_IPV6; 3116 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3117 if (ret) 3118 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d", 3119 __func__, ret); 3120 } 3121 3122 pf->rss_hf = rss_hf & ICE_RSS_HF_ALL; 3123 } 3124 3125 static void 3126 ice_get_default_rss_key(uint8_t *rss_key, uint32_t rss_key_size) 3127 { 3128 static struct ice_aqc_get_set_rss_keys default_key; 3129 static bool default_key_done; 3130 uint8_t *key = (uint8_t *)&default_key; 3131 size_t i; 3132 3133 if (rss_key_size > sizeof(default_key)) { 3134 PMD_DRV_LOG(WARNING, 3135 "requested size %u is larger than default %zu, " 3136 "only %zu bytes are gotten for key\n", 3137 rss_key_size, sizeof(default_key), 3138 sizeof(default_key)); 3139 } 3140 3141 if (!default_key_done) { 3142 /* Calculate the default hash key */ 3143 for (i = 0; i < sizeof(default_key); i++) 3144 key[i] = (uint8_t)rte_rand(); 3145 default_key_done = true; 3146 } 3147 rte_memcpy(rss_key, key, RTE_MIN(rss_key_size, sizeof(default_key))); 3148 } 3149 3150 static int ice_init_rss(struct ice_pf *pf) 3151 { 3152 struct ice_hw *hw = ICE_PF_TO_HW(pf); 3153 struct ice_vsi *vsi = pf->main_vsi; 3154 struct rte_eth_dev_data *dev_data = pf->dev_data; 3155 struct ice_aq_get_set_rss_lut_params lut_params; 3156 struct rte_eth_rss_conf *rss_conf; 3157 struct ice_aqc_get_set_rss_keys key; 3158 uint16_t i, nb_q; 3159 int ret = 0; 3160 bool is_safe_mode = pf->adapter->is_safe_mode; 3161 uint32_t reg; 3162 3163 rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf; 3164 nb_q = dev_data->nb_rx_queues; 3165 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE; 3166 vsi->rss_lut_size = pf->hash_lut_size; 3167 3168 if (nb_q == 0) { 3169 PMD_DRV_LOG(WARNING, 3170 "RSS is not supported as rx queues number is zero\n"); 3171 return 0; 3172 } 3173 3174 if (is_safe_mode) { 3175 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n"); 3176 return 0; 3177 } 3178 3179 if (!vsi->rss_key) { 3180 vsi->rss_key = rte_zmalloc(NULL, 3181 vsi->rss_key_size, 0); 3182 if (vsi->rss_key == NULL) { 3183 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key"); 3184 return -ENOMEM; 3185 } 3186 } 3187 if (!vsi->rss_lut) { 3188 vsi->rss_lut = rte_zmalloc(NULL, 3189 vsi->rss_lut_size, 0); 3190 if (vsi->rss_lut == NULL) { 3191 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key"); 3192 rte_free(vsi->rss_key); 3193 vsi->rss_key = NULL; 3194 return -ENOMEM; 3195 } 3196 } 3197 /* configure RSS key */ 3198 if (!rss_conf->rss_key) 3199 ice_get_default_rss_key(vsi->rss_key, vsi->rss_key_size); 3200 else 3201 rte_memcpy(vsi->rss_key, rss_conf->rss_key, 3202 RTE_MIN(rss_conf->rss_key_len, 3203 vsi->rss_key_size)); 3204 3205 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size); 3206 ret = ice_aq_set_rss_key(hw, vsi->idx, &key); 3207 if (ret) 3208 goto out; 3209 3210 /* init RSS LUT table */ 3211 for (i = 0; i < vsi->rss_lut_size; i++) 3212 vsi->rss_lut[i] = i % nb_q; 3213 3214 lut_params.vsi_handle = vsi->idx; 3215 lut_params.lut_size = vsi->rss_lut_size; 3216 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 3217 lut_params.lut = vsi->rss_lut; 3218 lut_params.global_lut_id = 0; 3219 ret = ice_aq_set_rss_lut(hw, &lut_params); 3220 if (ret) 3221 goto out; 3222 3223 /* Enable registers for symmetric_toeplitz function. */ 3224 reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id)); 3225 reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) | 3226 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); 3227 ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); 3228 3229 /* RSS hash configuration */ 3230 ice_rss_hash_set(pf, rss_conf->rss_hf); 3231 3232 return 0; 3233 out: 3234 rte_free(vsi->rss_key); 3235 vsi->rss_key = NULL; 3236 rte_free(vsi->rss_lut); 3237 vsi->rss_lut = NULL; 3238 return -EINVAL; 3239 } 3240 3241 static int 3242 ice_dev_configure(struct rte_eth_dev *dev) 3243 { 3244 struct ice_adapter *ad = 3245 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3246 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3247 int ret; 3248 3249 /* Initialize to TRUE. If any of Rx queues doesn't meet the 3250 * bulk allocation or vector Rx preconditions we will reset it. 3251 */ 3252 ad->rx_bulk_alloc_allowed = true; 3253 ad->tx_simple_allowed = true; 3254 3255 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 3256 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 3257 3258 if (dev->data->nb_rx_queues) { 3259 ret = ice_init_rss(pf); 3260 if (ret) { 3261 PMD_DRV_LOG(ERR, "Failed to enable rss for PF"); 3262 return ret; 3263 } 3264 } 3265 3266 return 0; 3267 } 3268 3269 static void 3270 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect, 3271 int base_queue, int nb_queue) 3272 { 3273 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 3274 uint32_t val, val_tx; 3275 int i; 3276 3277 for (i = 0; i < nb_queue; i++) { 3278 /*do actual bind*/ 3279 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) | 3280 (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M; 3281 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) | 3282 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M; 3283 3284 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d", 3285 base_queue + i, msix_vect); 3286 /* set ITR0 value */ 3287 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2); 3288 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val); 3289 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx); 3290 } 3291 } 3292 3293 void 3294 ice_vsi_queues_bind_intr(struct ice_vsi *vsi) 3295 { 3296 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id]; 3297 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 3298 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3299 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 3300 uint16_t msix_vect = vsi->msix_intr; 3301 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); 3302 uint16_t queue_idx = 0; 3303 int record = 0; 3304 int i; 3305 3306 /* clear Rx/Tx queue interrupt */ 3307 for (i = 0; i < vsi->nb_used_qps; i++) { 3308 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0); 3309 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0); 3310 } 3311 3312 /* PF bind interrupt */ 3313 if (rte_intr_dp_is_en(intr_handle)) { 3314 queue_idx = 0; 3315 record = 1; 3316 } 3317 3318 for (i = 0; i < vsi->nb_used_qps; i++) { 3319 if (nb_msix <= 1) { 3320 if (!rte_intr_allow_others(intr_handle)) 3321 msix_vect = ICE_MISC_VEC_ID; 3322 3323 /* uio mapping all queue to one msix_vect */ 3324 __vsi_queues_bind_intr(vsi, msix_vect, 3325 vsi->base_queue + i, 3326 vsi->nb_used_qps - i); 3327 3328 for (; !!record && i < vsi->nb_used_qps; i++) 3329 intr_handle->intr_vec[queue_idx + i] = 3330 msix_vect; 3331 break; 3332 } 3333 3334 /* vfio 1:1 queue/msix_vect mapping */ 3335 __vsi_queues_bind_intr(vsi, msix_vect, 3336 vsi->base_queue + i, 1); 3337 3338 if (!!record) 3339 intr_handle->intr_vec[queue_idx + i] = msix_vect; 3340 3341 msix_vect++; 3342 nb_msix--; 3343 } 3344 } 3345 3346 void 3347 ice_vsi_enable_queues_intr(struct ice_vsi *vsi) 3348 { 3349 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id]; 3350 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 3351 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3352 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 3353 uint16_t msix_intr, i; 3354 3355 if (rte_intr_allow_others(intr_handle)) 3356 for (i = 0; i < vsi->nb_used_qps; i++) { 3357 msix_intr = vsi->msix_intr + i; 3358 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), 3359 GLINT_DYN_CTL_INTENA_M | 3360 GLINT_DYN_CTL_CLEARPBA_M | 3361 GLINT_DYN_CTL_ITR_INDX_M | 3362 GLINT_DYN_CTL_WB_ON_ITR_M); 3363 } 3364 else 3365 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), 3366 GLINT_DYN_CTL_INTENA_M | 3367 GLINT_DYN_CTL_CLEARPBA_M | 3368 GLINT_DYN_CTL_ITR_INDX_M | 3369 GLINT_DYN_CTL_WB_ON_ITR_M); 3370 } 3371 3372 static int 3373 ice_rxq_intr_setup(struct rte_eth_dev *dev) 3374 { 3375 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3376 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 3377 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3378 struct ice_vsi *vsi = pf->main_vsi; 3379 uint32_t intr_vector = 0; 3380 3381 rte_intr_disable(intr_handle); 3382 3383 /* check and configure queue intr-vector mapping */ 3384 if ((rte_intr_cap_multiple(intr_handle) || 3385 !RTE_ETH_DEV_SRIOV(dev).active) && 3386 dev->data->dev_conf.intr_conf.rxq != 0) { 3387 intr_vector = dev->data->nb_rx_queues; 3388 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) { 3389 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 3390 ICE_MAX_INTR_QUEUE_NUM); 3391 return -ENOTSUP; 3392 } 3393 if (rte_intr_efd_enable(intr_handle, intr_vector)) 3394 return -1; 3395 } 3396 3397 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 3398 intr_handle->intr_vec = 3399 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int), 3400 0); 3401 if (!intr_handle->intr_vec) { 3402 PMD_DRV_LOG(ERR, 3403 "Failed to allocate %d rx_queues intr_vec", 3404 dev->data->nb_rx_queues); 3405 return -ENOMEM; 3406 } 3407 } 3408 3409 /* Map queues with MSIX interrupt */ 3410 vsi->nb_used_qps = dev->data->nb_rx_queues; 3411 ice_vsi_queues_bind_intr(vsi); 3412 3413 /* Enable interrupts for all the queues */ 3414 ice_vsi_enable_queues_intr(vsi); 3415 3416 rte_intr_enable(intr_handle); 3417 3418 return 0; 3419 } 3420 3421 static void 3422 ice_get_init_link_status(struct rte_eth_dev *dev) 3423 { 3424 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3425 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3426 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; 3427 struct ice_link_status link_status; 3428 int ret; 3429 3430 ret = ice_aq_get_link_info(hw->port_info, enable_lse, 3431 &link_status, NULL); 3432 if (ret != ICE_SUCCESS) { 3433 PMD_DRV_LOG(ERR, "Failed to get link info"); 3434 pf->init_link_up = false; 3435 return; 3436 } 3437 3438 if (link_status.link_info & ICE_AQ_LINK_UP) 3439 pf->init_link_up = true; 3440 } 3441 3442 static int 3443 ice_pps_out_cfg(struct ice_hw *hw, int idx, int timer) 3444 { 3445 uint64_t current_time, start_time; 3446 uint32_t hi, lo, lo2, func, val; 3447 3448 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer)); 3449 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer)); 3450 lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(timer)); 3451 3452 if (lo2 < lo) { 3453 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer)); 3454 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer)); 3455 } 3456 3457 current_time = ((uint64_t)hi << 32) | lo; 3458 3459 start_time = (current_time + NSEC_PER_SEC) / 3460 NSEC_PER_SEC * NSEC_PER_SEC; 3461 start_time = start_time - PPS_OUT_DELAY_NS; 3462 3463 func = 8 + idx + timer * 4; 3464 val = GLGEN_GPIO_CTL_PIN_DIR_M | 3465 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & 3466 GLGEN_GPIO_CTL_PIN_FUNC_M); 3467 3468 /* Write clkout with half of period value */ 3469 ICE_WRITE_REG(hw, GLTSYN_CLKO(idx, timer), NSEC_PER_SEC / 2); 3470 3471 /* Write TARGET time register */ 3472 ICE_WRITE_REG(hw, GLTSYN_TGT_L(idx, timer), start_time & 0xffffffff); 3473 ICE_WRITE_REG(hw, GLTSYN_TGT_H(idx, timer), start_time >> 32); 3474 3475 /* Write AUX_OUT register */ 3476 ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(idx, timer), 3477 GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M); 3478 3479 /* Write GPIO CTL register */ 3480 ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(idx), val); 3481 3482 return 0; 3483 } 3484 3485 static int 3486 ice_dev_start(struct rte_eth_dev *dev) 3487 { 3488 struct rte_eth_dev_data *data = dev->data; 3489 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3490 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3491 struct ice_vsi *vsi = pf->main_vsi; 3492 struct ice_adapter *ad = 3493 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3494 uint16_t nb_rxq = 0; 3495 uint16_t nb_txq, i; 3496 uint16_t max_frame_size; 3497 int mask, ret; 3498 uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned; 3499 uint32_t pin_idx = ad->devargs.pin_idx; 3500 3501 /* program Tx queues' context in hardware */ 3502 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) { 3503 ret = ice_tx_queue_start(dev, nb_txq); 3504 if (ret) { 3505 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq); 3506 goto tx_err; 3507 } 3508 } 3509 3510 /* program Rx queues' context in hardware*/ 3511 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) { 3512 ret = ice_rx_queue_start(dev, nb_rxq); 3513 if (ret) { 3514 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq); 3515 goto rx_err; 3516 } 3517 } 3518 3519 ice_set_rx_function(dev); 3520 ice_set_tx_function(dev); 3521 3522 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 3523 ETH_VLAN_EXTEND_MASK; 3524 ret = ice_vlan_offload_set(dev, mask); 3525 if (ret) { 3526 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 3527 goto rx_err; 3528 } 3529 3530 /* enable Rx interrput and mapping Rx queue to interrupt vector */ 3531 if (ice_rxq_intr_setup(dev)) 3532 return -EIO; 3533 3534 /* Enable receiving broadcast packets and transmitting packets */ 3535 ret = ice_set_vsi_promisc(hw, vsi->idx, 3536 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX | 3537 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX, 3538 0); 3539 if (ret != ICE_SUCCESS) 3540 PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); 3541 3542 ret = ice_aq_set_event_mask(hw, hw->port_info->lport, 3543 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT | 3544 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM | 3545 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS | 3546 ICE_AQ_LINK_EVENT_SIGNAL_DETECT | 3547 ICE_AQ_LINK_EVENT_AN_COMPLETED | 3548 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)), 3549 NULL); 3550 if (ret != ICE_SUCCESS) 3551 PMD_DRV_LOG(WARNING, "Fail to set phy mask"); 3552 3553 ice_get_init_link_status(dev); 3554 3555 ice_dev_set_link_up(dev); 3556 3557 /* Call get_link_info aq commond to enable/disable LSE */ 3558 ice_link_update(dev, 0); 3559 3560 pf->adapter_stopped = false; 3561 3562 /* Set the max frame size to default value*/ 3563 max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ? 3564 pf->dev_data->dev_conf.rxmode.max_rx_pkt_len : 3565 ICE_FRAME_SIZE_MAX; 3566 3567 /* Set the max frame size to HW*/ 3568 ice_aq_set_mac_cfg(hw, max_frame_size, NULL); 3569 3570 if (ad->devargs.pps_out_ena) { 3571 ret = ice_pps_out_cfg(hw, pin_idx, timer); 3572 if (ret) { 3573 PMD_DRV_LOG(ERR, "Fail to configure 1pps out"); 3574 goto rx_err; 3575 } 3576 } 3577 3578 return 0; 3579 3580 /* stop the started queues if failed to start all queues */ 3581 rx_err: 3582 for (i = 0; i < nb_rxq; i++) 3583 ice_rx_queue_stop(dev, i); 3584 tx_err: 3585 for (i = 0; i < nb_txq; i++) 3586 ice_tx_queue_stop(dev, i); 3587 3588 return -EIO; 3589 } 3590 3591 static int 3592 ice_dev_reset(struct rte_eth_dev *dev) 3593 { 3594 int ret; 3595 3596 if (dev->data->sriov.active) 3597 return -ENOTSUP; 3598 3599 ret = ice_dev_uninit(dev); 3600 if (ret) { 3601 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret); 3602 return -ENXIO; 3603 } 3604 3605 ret = ice_dev_init(dev); 3606 if (ret) { 3607 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret); 3608 return -ENXIO; 3609 } 3610 3611 return 0; 3612 } 3613 3614 static int 3615 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3616 { 3617 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3618 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3619 struct ice_vsi *vsi = pf->main_vsi; 3620 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); 3621 bool is_safe_mode = pf->adapter->is_safe_mode; 3622 u64 phy_type_low; 3623 u64 phy_type_high; 3624 3625 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN; 3626 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX; 3627 dev_info->max_rx_queues = vsi->nb_qps; 3628 dev_info->max_tx_queues = vsi->nb_qps; 3629 dev_info->max_mac_addrs = vsi->max_macaddrs; 3630 dev_info->max_vfs = pci_dev->max_vfs; 3631 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD; 3632 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3633 3634 dev_info->rx_offload_capa = 3635 DEV_RX_OFFLOAD_VLAN_STRIP | 3636 DEV_RX_OFFLOAD_JUMBO_FRAME | 3637 DEV_RX_OFFLOAD_KEEP_CRC | 3638 DEV_RX_OFFLOAD_SCATTER | 3639 DEV_RX_OFFLOAD_VLAN_FILTER; 3640 dev_info->tx_offload_capa = 3641 DEV_TX_OFFLOAD_VLAN_INSERT | 3642 DEV_TX_OFFLOAD_TCP_TSO | 3643 DEV_TX_OFFLOAD_MULTI_SEGS | 3644 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 3645 dev_info->flow_type_rss_offloads = 0; 3646 3647 if (!is_safe_mode) { 3648 dev_info->rx_offload_capa |= 3649 DEV_RX_OFFLOAD_IPV4_CKSUM | 3650 DEV_RX_OFFLOAD_UDP_CKSUM | 3651 DEV_RX_OFFLOAD_TCP_CKSUM | 3652 DEV_RX_OFFLOAD_QINQ_STRIP | 3653 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 3654 DEV_RX_OFFLOAD_VLAN_EXTEND | 3655 DEV_RX_OFFLOAD_RSS_HASH; 3656 dev_info->tx_offload_capa |= 3657 DEV_TX_OFFLOAD_QINQ_INSERT | 3658 DEV_TX_OFFLOAD_IPV4_CKSUM | 3659 DEV_TX_OFFLOAD_UDP_CKSUM | 3660 DEV_TX_OFFLOAD_TCP_CKSUM | 3661 DEV_TX_OFFLOAD_SCTP_CKSUM | 3662 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 3663 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; 3664 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; 3665 } 3666 3667 dev_info->rx_queue_offload_capa = 0; 3668 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; 3669 3670 dev_info->reta_size = pf->hash_lut_size; 3671 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); 3672 3673 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3674 .rx_thresh = { 3675 .pthresh = ICE_DEFAULT_RX_PTHRESH, 3676 .hthresh = ICE_DEFAULT_RX_HTHRESH, 3677 .wthresh = ICE_DEFAULT_RX_WTHRESH, 3678 }, 3679 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH, 3680 .rx_drop_en = 0, 3681 .offloads = 0, 3682 }; 3683 3684 dev_info->default_txconf = (struct rte_eth_txconf) { 3685 .tx_thresh = { 3686 .pthresh = ICE_DEFAULT_TX_PTHRESH, 3687 .hthresh = ICE_DEFAULT_TX_HTHRESH, 3688 .wthresh = ICE_DEFAULT_TX_WTHRESH, 3689 }, 3690 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH, 3691 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH, 3692 .offloads = 0, 3693 }; 3694 3695 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 3696 .nb_max = ICE_MAX_RING_DESC, 3697 .nb_min = ICE_MIN_RING_DESC, 3698 .nb_align = ICE_ALIGN_RING_DESC, 3699 }; 3700 3701 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 3702 .nb_max = ICE_MAX_RING_DESC, 3703 .nb_min = ICE_MIN_RING_DESC, 3704 .nb_align = ICE_ALIGN_RING_DESC, 3705 }; 3706 3707 dev_info->speed_capa = ETH_LINK_SPEED_10M | 3708 ETH_LINK_SPEED_100M | 3709 ETH_LINK_SPEED_1G | 3710 ETH_LINK_SPEED_2_5G | 3711 ETH_LINK_SPEED_5G | 3712 ETH_LINK_SPEED_10G | 3713 ETH_LINK_SPEED_20G | 3714 ETH_LINK_SPEED_25G; 3715 3716 phy_type_low = hw->port_info->phy.phy_type_low; 3717 phy_type_high = hw->port_info->phy.phy_type_high; 3718 3719 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low)) 3720 dev_info->speed_capa |= ETH_LINK_SPEED_50G; 3721 3722 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) || 3723 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high)) 3724 dev_info->speed_capa |= ETH_LINK_SPEED_100G; 3725 3726 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3727 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3728 3729 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST; 3730 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST; 3731 dev_info->default_rxportconf.nb_queues = 1; 3732 dev_info->default_txportconf.nb_queues = 1; 3733 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN; 3734 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN; 3735 3736 return 0; 3737 } 3738 3739 static inline int 3740 ice_atomic_read_link_status(struct rte_eth_dev *dev, 3741 struct rte_eth_link *link) 3742 { 3743 struct rte_eth_link *dst = link; 3744 struct rte_eth_link *src = &dev->data->dev_link; 3745 3746 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 3747 *(uint64_t *)src) == 0) 3748 return -1; 3749 3750 return 0; 3751 } 3752 3753 static inline int 3754 ice_atomic_write_link_status(struct rte_eth_dev *dev, 3755 struct rte_eth_link *link) 3756 { 3757 struct rte_eth_link *dst = &dev->data->dev_link; 3758 struct rte_eth_link *src = link; 3759 3760 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 3761 *(uint64_t *)src) == 0) 3762 return -1; 3763 3764 return 0; 3765 } 3766 3767 static int 3768 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) 3769 { 3770 #define CHECK_INTERVAL 100 /* 100ms */ 3771 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ 3772 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3773 struct ice_link_status link_status; 3774 struct rte_eth_link link, old; 3775 int status; 3776 unsigned int rep_cnt = MAX_REPEAT_TIME; 3777 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; 3778 3779 memset(&link, 0, sizeof(link)); 3780 memset(&old, 0, sizeof(old)); 3781 memset(&link_status, 0, sizeof(link_status)); 3782 ice_atomic_read_link_status(dev, &old); 3783 3784 do { 3785 /* Get link status information from hardware */ 3786 status = ice_aq_get_link_info(hw->port_info, enable_lse, 3787 &link_status, NULL); 3788 if (status != ICE_SUCCESS) { 3789 link.link_speed = ETH_SPEED_NUM_100M; 3790 link.link_duplex = ETH_LINK_FULL_DUPLEX; 3791 PMD_DRV_LOG(ERR, "Failed to get link info"); 3792 goto out; 3793 } 3794 3795 link.link_status = link_status.link_info & ICE_AQ_LINK_UP; 3796 if (!wait_to_complete || link.link_status) 3797 break; 3798 3799 rte_delay_ms(CHECK_INTERVAL); 3800 } while (--rep_cnt); 3801 3802 if (!link.link_status) 3803 goto out; 3804 3805 /* Full-duplex operation at all supported speeds */ 3806 link.link_duplex = ETH_LINK_FULL_DUPLEX; 3807 3808 /* Parse the link status */ 3809 switch (link_status.link_speed) { 3810 case ICE_AQ_LINK_SPEED_10MB: 3811 link.link_speed = ETH_SPEED_NUM_10M; 3812 break; 3813 case ICE_AQ_LINK_SPEED_100MB: 3814 link.link_speed = ETH_SPEED_NUM_100M; 3815 break; 3816 case ICE_AQ_LINK_SPEED_1000MB: 3817 link.link_speed = ETH_SPEED_NUM_1G; 3818 break; 3819 case ICE_AQ_LINK_SPEED_2500MB: 3820 link.link_speed = ETH_SPEED_NUM_2_5G; 3821 break; 3822 case ICE_AQ_LINK_SPEED_5GB: 3823 link.link_speed = ETH_SPEED_NUM_5G; 3824 break; 3825 case ICE_AQ_LINK_SPEED_10GB: 3826 link.link_speed = ETH_SPEED_NUM_10G; 3827 break; 3828 case ICE_AQ_LINK_SPEED_20GB: 3829 link.link_speed = ETH_SPEED_NUM_20G; 3830 break; 3831 case ICE_AQ_LINK_SPEED_25GB: 3832 link.link_speed = ETH_SPEED_NUM_25G; 3833 break; 3834 case ICE_AQ_LINK_SPEED_40GB: 3835 link.link_speed = ETH_SPEED_NUM_40G; 3836 break; 3837 case ICE_AQ_LINK_SPEED_50GB: 3838 link.link_speed = ETH_SPEED_NUM_50G; 3839 break; 3840 case ICE_AQ_LINK_SPEED_100GB: 3841 link.link_speed = ETH_SPEED_NUM_100G; 3842 break; 3843 case ICE_AQ_LINK_SPEED_UNKNOWN: 3844 PMD_DRV_LOG(ERR, "Unknown link speed"); 3845 link.link_speed = ETH_SPEED_NUM_UNKNOWN; 3846 break; 3847 default: 3848 PMD_DRV_LOG(ERR, "None link speed"); 3849 link.link_speed = ETH_SPEED_NUM_NONE; 3850 break; 3851 } 3852 3853 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 3854 ETH_LINK_SPEED_FIXED); 3855 3856 out: 3857 ice_atomic_write_link_status(dev, &link); 3858 if (link.link_status == old.link_status) 3859 return -1; 3860 3861 return 0; 3862 } 3863 3864 /* Force the physical link state by getting the current PHY capabilities from 3865 * hardware and setting the PHY config based on the determined capabilities. If 3866 * link changes, link event will be triggered because both the Enable Automatic 3867 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 3868 */ 3869 static enum ice_status 3870 ice_force_phys_link_state(struct ice_hw *hw, bool link_up) 3871 { 3872 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3873 struct ice_aqc_get_phy_caps_data *pcaps; 3874 struct ice_port_info *pi; 3875 enum ice_status status; 3876 3877 if (!hw || !hw->port_info) 3878 return ICE_ERR_PARAM; 3879 3880 pi = hw->port_info; 3881 3882 pcaps = (struct ice_aqc_get_phy_caps_data *) 3883 ice_malloc(hw, sizeof(*pcaps)); 3884 if (!pcaps) 3885 return ICE_ERR_NO_MEMORY; 3886 3887 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3888 pcaps, NULL); 3889 if (status) 3890 goto out; 3891 3892 /* No change in link */ 3893 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 3894 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 3895 goto out; 3896 3897 cfg.phy_type_low = pcaps->phy_type_low; 3898 cfg.phy_type_high = pcaps->phy_type_high; 3899 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3900 cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an; 3901 cfg.eee_cap = pcaps->eee_cap; 3902 cfg.eeer_value = pcaps->eeer_value; 3903 cfg.link_fec_opt = pcaps->link_fec_options; 3904 if (link_up) 3905 cfg.caps |= ICE_AQ_PHY_ENA_LINK; 3906 else 3907 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK; 3908 3909 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3910 3911 out: 3912 ice_free(hw, pcaps); 3913 return status; 3914 } 3915 3916 static int 3917 ice_dev_set_link_up(struct rte_eth_dev *dev) 3918 { 3919 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3920 3921 return ice_force_phys_link_state(hw, true); 3922 } 3923 3924 static int 3925 ice_dev_set_link_down(struct rte_eth_dev *dev) 3926 { 3927 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3928 3929 return ice_force_phys_link_state(hw, false); 3930 } 3931 3932 static int 3933 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 3934 { 3935 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3936 struct rte_eth_dev_data *dev_data = pf->dev_data; 3937 uint32_t frame_size = mtu + ICE_ETH_OVERHEAD; 3938 3939 /* check if mtu is within the allowed range */ 3940 if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX) 3941 return -EINVAL; 3942 3943 /* mtu setting is forbidden if port is start */ 3944 if (dev_data->dev_started) { 3945 PMD_DRV_LOG(ERR, 3946 "port %d must be stopped before configuration", 3947 dev_data->port_id); 3948 return -EBUSY; 3949 } 3950 3951 if (frame_size > ICE_ETH_MAX_LEN) 3952 dev_data->dev_conf.rxmode.offloads |= 3953 DEV_RX_OFFLOAD_JUMBO_FRAME; 3954 else 3955 dev_data->dev_conf.rxmode.offloads &= 3956 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 3957 3958 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 3959 3960 return 0; 3961 } 3962 3963 static int ice_macaddr_set(struct rte_eth_dev *dev, 3964 struct rte_ether_addr *mac_addr) 3965 { 3966 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3967 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3968 struct ice_vsi *vsi = pf->main_vsi; 3969 struct ice_mac_filter *f; 3970 uint8_t flags = 0; 3971 int ret; 3972 3973 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 3974 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); 3975 return -EINVAL; 3976 } 3977 3978 TAILQ_FOREACH(f, &vsi->mac_list, next) { 3979 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr)) 3980 break; 3981 } 3982 3983 if (!f) { 3984 PMD_DRV_LOG(ERR, "Failed to find filter for default mac"); 3985 return -EIO; 3986 } 3987 3988 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr); 3989 if (ret != ICE_SUCCESS) { 3990 PMD_DRV_LOG(ERR, "Failed to delete mac filter"); 3991 return -EIO; 3992 } 3993 ret = ice_add_mac_filter(vsi, mac_addr); 3994 if (ret != ICE_SUCCESS) { 3995 PMD_DRV_LOG(ERR, "Failed to add mac filter"); 3996 return -EIO; 3997 } 3998 rte_ether_addr_copy(mac_addr, &pf->dev_addr); 3999 4000 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 4001 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL); 4002 if (ret != ICE_SUCCESS) 4003 PMD_DRV_LOG(ERR, "Failed to set manage mac"); 4004 4005 return 0; 4006 } 4007 4008 /* Add a MAC address, and update filters */ 4009 static int 4010 ice_macaddr_add(struct rte_eth_dev *dev, 4011 struct rte_ether_addr *mac_addr, 4012 __rte_unused uint32_t index, 4013 __rte_unused uint32_t pool) 4014 { 4015 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4016 struct ice_vsi *vsi = pf->main_vsi; 4017 int ret; 4018 4019 ret = ice_add_mac_filter(vsi, mac_addr); 4020 if (ret != ICE_SUCCESS) { 4021 PMD_DRV_LOG(ERR, "Failed to add MAC filter"); 4022 return -EINVAL; 4023 } 4024 4025 return ICE_SUCCESS; 4026 } 4027 4028 /* Remove a MAC address, and update filters */ 4029 static void 4030 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) 4031 { 4032 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4033 struct ice_vsi *vsi = pf->main_vsi; 4034 struct rte_eth_dev_data *data = dev->data; 4035 struct rte_ether_addr *macaddr; 4036 int ret; 4037 4038 macaddr = &data->mac_addrs[index]; 4039 ret = ice_remove_mac_filter(vsi, macaddr); 4040 if (ret) { 4041 PMD_DRV_LOG(ERR, "Failed to remove MAC filter"); 4042 return; 4043 } 4044 } 4045 4046 static int 4047 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 4048 { 4049 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4050 struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id); 4051 struct ice_vsi *vsi = pf->main_vsi; 4052 int ret; 4053 4054 PMD_INIT_FUNC_TRACE(); 4055 4056 /** 4057 * Vlan 0 is the generic filter for untagged packets 4058 * and can't be removed or added by user. 4059 */ 4060 if (vlan_id == 0) 4061 return 0; 4062 4063 if (on) { 4064 ret = ice_add_vlan_filter(vsi, &vlan); 4065 if (ret < 0) { 4066 PMD_DRV_LOG(ERR, "Failed to add vlan filter"); 4067 return -EINVAL; 4068 } 4069 } else { 4070 ret = ice_remove_vlan_filter(vsi, &vlan); 4071 if (ret < 0) { 4072 PMD_DRV_LOG(ERR, "Failed to remove vlan filter"); 4073 return -EINVAL; 4074 } 4075 } 4076 4077 return 0; 4078 } 4079 4080 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are 4081 * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) 4082 * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via 4083 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID. 4084 * 4085 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic 4086 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged 4087 * traffic in SVM, since the VLAN TPID isn't part of filtering. 4088 * 4089 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be 4090 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is 4091 * part of filtering. 4092 */ 4093 static int 4094 ice_vsi_add_vlan_zero(struct ice_vsi *vsi) 4095 { 4096 struct ice_vlan vlan; 4097 int err; 4098 4099 vlan = ICE_VLAN(0, 0); 4100 err = ice_add_vlan_filter(vsi, &vlan); 4101 if (err) { 4102 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0"); 4103 return err; 4104 } 4105 4106 /* in SVM both VLAN 0 filters are identical */ 4107 if (!ice_is_dvm_ena(&vsi->adapter->hw)) 4108 return 0; 4109 4110 vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0); 4111 err = ice_add_vlan_filter(vsi, &vlan); 4112 if (err) { 4113 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode"); 4114 return err; 4115 } 4116 4117 return 0; 4118 } 4119 4120 /* 4121 * Delete the VLAN 0 filters in the same manner that they were added in 4122 * ice_vsi_add_vlan_zero. 4123 */ 4124 static int 4125 ice_vsi_del_vlan_zero(struct ice_vsi *vsi) 4126 { 4127 struct ice_vlan vlan; 4128 int err; 4129 4130 vlan = ICE_VLAN(0, 0); 4131 err = ice_remove_vlan_filter(vsi, &vlan); 4132 if (err) { 4133 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0"); 4134 return err; 4135 } 4136 4137 /* in SVM both VLAN 0 filters are identical */ 4138 if (!ice_is_dvm_ena(&vsi->adapter->hw)) 4139 return 0; 4140 4141 vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0); 4142 err = ice_remove_vlan_filter(vsi, &vlan); 4143 if (err) { 4144 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode"); 4145 return err; 4146 } 4147 4148 return 0; 4149 } 4150 4151 /* Configure vlan filter on or off */ 4152 static int 4153 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on) 4154 { 4155 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4156 struct ice_vsi_ctx ctxt; 4157 uint8_t sw_flags2; 4158 int ret = 0; 4159 4160 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 4161 4162 if (on) 4163 vsi->info.sw_flags2 |= sw_flags2; 4164 else 4165 vsi->info.sw_flags2 &= ~sw_flags2; 4166 4167 vsi->info.sw_id = hw->port_info->sw_id; 4168 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4169 ctxt.info.valid_sections = 4170 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID | 4171 ICE_AQ_VSI_PROP_SECURITY_VALID); 4172 ctxt.vsi_num = vsi->vsi_id; 4173 4174 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4175 if (ret) { 4176 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning", 4177 on ? "enable" : "disable"); 4178 return -EINVAL; 4179 } else { 4180 vsi->info.valid_sections |= 4181 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID | 4182 ICE_AQ_VSI_PROP_SECURITY_VALID); 4183 } 4184 4185 /* consist with other drivers, allow untagged packet when vlan filter on */ 4186 if (on) 4187 ret = ice_vsi_add_vlan_zero(vsi); 4188 else 4189 ret = ice_vsi_del_vlan_zero(vsi); 4190 4191 return 0; 4192 } 4193 4194 /* Manage VLAN stripping for the VSI for Rx */ 4195 static int 4196 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) 4197 { 4198 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4199 struct ice_vsi_ctx ctxt; 4200 enum ice_status status; 4201 int err = 0; 4202 4203 /* do not allow modifying VLAN stripping when a port VLAN is configured 4204 * on this VSI 4205 */ 4206 if (vsi->info.port_based_inner_vlan) 4207 return 0; 4208 4209 memset(&ctxt, 0, sizeof(ctxt)); 4210 4211 if (ena) 4212 /* Strip VLAN tag from Rx packet and put it in the desc */ 4213 ctxt.info.inner_vlan_flags = 4214 ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH; 4215 else 4216 /* Disable stripping. Leave tag in packet */ 4217 ctxt.info.inner_vlan_flags = 4218 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 4219 4220 /* Allow all packets untagged/tagged */ 4221 ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; 4222 4223 ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); 4224 4225 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4226 if (status) { 4227 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping", 4228 ena ? "enable" : "disable"); 4229 err = -EIO; 4230 } else { 4231 vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags; 4232 } 4233 4234 return err; 4235 } 4236 4237 static int 4238 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi) 4239 { 4240 return ice_vsi_manage_vlan_stripping(vsi, true); 4241 } 4242 4243 static int 4244 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi) 4245 { 4246 return ice_vsi_manage_vlan_stripping(vsi, false); 4247 } 4248 4249 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi) 4250 { 4251 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4252 struct ice_vsi_ctx ctxt; 4253 enum ice_status status; 4254 int err = 0; 4255 4256 /* do not allow modifying VLAN stripping when a port VLAN is configured 4257 * on this VSI 4258 */ 4259 if (vsi->info.port_based_outer_vlan) 4260 return 0; 4261 4262 memset(&ctxt, 0, sizeof(ctxt)); 4263 4264 ctxt.info.valid_sections = 4265 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); 4266 /* clear current outer VLAN strip settings */ 4267 ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags & 4268 ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M); 4269 ctxt.info.outer_vlan_flags |= 4270 (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH << 4271 ICE_AQ_VSI_OUTER_VLAN_EMODE_S) | 4272 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 << 4273 ICE_AQ_VSI_OUTER_TAG_TYPE_S); 4274 4275 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4276 if (status) { 4277 PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping"); 4278 err = -EIO; 4279 } else { 4280 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags; 4281 } 4282 4283 return err; 4284 } 4285 4286 static int 4287 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi) 4288 { 4289 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4290 struct ice_vsi_ctx ctxt; 4291 enum ice_status status; 4292 int err = 0; 4293 4294 if (vsi->info.port_based_outer_vlan) 4295 return 0; 4296 4297 memset(&ctxt, 0, sizeof(ctxt)); 4298 4299 ctxt.info.valid_sections = 4300 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); 4301 /* clear current outer VLAN strip settings */ 4302 ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags & 4303 ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M; 4304 ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING << 4305 ICE_AQ_VSI_OUTER_VLAN_EMODE_S; 4306 4307 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4308 if (status) { 4309 PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping"); 4310 err = -EIO; 4311 } else { 4312 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags; 4313 } 4314 4315 return err; 4316 } 4317 4318 static int 4319 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena) 4320 { 4321 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4322 int ret; 4323 4324 if (ice_is_dvm_ena(hw)) { 4325 if (ena) 4326 ret = ice_vsi_ena_outer_stripping(vsi); 4327 else 4328 ret = ice_vsi_dis_outer_stripping(vsi); 4329 } else { 4330 if (ena) 4331 ret = ice_vsi_ena_inner_stripping(vsi); 4332 else 4333 ret = ice_vsi_dis_inner_stripping(vsi); 4334 } 4335 4336 return ret; 4337 } 4338 4339 static int 4340 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask) 4341 { 4342 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4343 struct ice_vsi *vsi = pf->main_vsi; 4344 struct rte_eth_rxmode *rxmode; 4345 4346 rxmode = &dev->data->dev_conf.rxmode; 4347 if (mask & ETH_VLAN_FILTER_MASK) { 4348 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 4349 ice_vsi_config_vlan_filter(vsi, true); 4350 else 4351 ice_vsi_config_vlan_filter(vsi, false); 4352 } 4353 4354 if (mask & ETH_VLAN_STRIP_MASK) { 4355 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 4356 ice_vsi_config_vlan_stripping(vsi, true); 4357 else 4358 ice_vsi_config_vlan_stripping(vsi, false); 4359 } 4360 4361 return 0; 4362 } 4363 4364 static int 4365 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size) 4366 { 4367 struct ice_aq_get_set_rss_lut_params lut_params; 4368 struct ice_pf *pf = ICE_VSI_TO_PF(vsi); 4369 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4370 int ret; 4371 4372 if (!lut) 4373 return -EINVAL; 4374 4375 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) { 4376 lut_params.vsi_handle = vsi->idx; 4377 lut_params.lut_size = lut_size; 4378 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 4379 lut_params.lut = lut; 4380 lut_params.global_lut_id = 0; 4381 ret = ice_aq_get_rss_lut(hw, &lut_params); 4382 if (ret) { 4383 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); 4384 return -EINVAL; 4385 } 4386 } else { 4387 uint64_t *lut_dw = (uint64_t *)lut; 4388 uint16_t i, lut_size_dw = lut_size / 4; 4389 4390 for (i = 0; i < lut_size_dw; i++) 4391 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i)); 4392 } 4393 4394 return 0; 4395 } 4396 4397 static int 4398 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size) 4399 { 4400 struct ice_aq_get_set_rss_lut_params lut_params; 4401 struct ice_pf *pf; 4402 struct ice_hw *hw; 4403 int ret; 4404 4405 if (!vsi || !lut) 4406 return -EINVAL; 4407 4408 pf = ICE_VSI_TO_PF(vsi); 4409 hw = ICE_VSI_TO_HW(vsi); 4410 4411 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) { 4412 lut_params.vsi_handle = vsi->idx; 4413 lut_params.lut_size = lut_size; 4414 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 4415 lut_params.lut = lut; 4416 lut_params.global_lut_id = 0; 4417 ret = ice_aq_set_rss_lut(hw, &lut_params); 4418 if (ret) { 4419 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); 4420 return -EINVAL; 4421 } 4422 } else { 4423 uint64_t *lut_dw = (uint64_t *)lut; 4424 uint16_t i, lut_size_dw = lut_size / 4; 4425 4426 for (i = 0; i < lut_size_dw; i++) 4427 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]); 4428 4429 ice_flush(hw); 4430 } 4431 4432 return 0; 4433 } 4434 4435 static int 4436 ice_rss_reta_update(struct rte_eth_dev *dev, 4437 struct rte_eth_rss_reta_entry64 *reta_conf, 4438 uint16_t reta_size) 4439 { 4440 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4441 uint16_t i, lut_size = pf->hash_lut_size; 4442 uint16_t idx, shift; 4443 uint8_t *lut; 4444 int ret; 4445 4446 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 && 4447 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 && 4448 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) { 4449 PMD_DRV_LOG(ERR, 4450 "The size of hash lookup table configured (%d)" 4451 "doesn't match the number hardware can " 4452 "supported (128, 512, 2048)", 4453 reta_size); 4454 return -EINVAL; 4455 } 4456 4457 /* It MUST use the current LUT size to get the RSS lookup table, 4458 * otherwise if will fail with -100 error code. 4459 */ 4460 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0); 4461 if (!lut) { 4462 PMD_DRV_LOG(ERR, "No memory can be allocated"); 4463 return -ENOMEM; 4464 } 4465 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size); 4466 if (ret) 4467 goto out; 4468 4469 for (i = 0; i < reta_size; i++) { 4470 idx = i / RTE_RETA_GROUP_SIZE; 4471 shift = i % RTE_RETA_GROUP_SIZE; 4472 if (reta_conf[idx].mask & (1ULL << shift)) 4473 lut[i] = reta_conf[idx].reta[shift]; 4474 } 4475 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size); 4476 if (ret == 0 && lut_size != reta_size) { 4477 PMD_DRV_LOG(INFO, 4478 "The size of hash lookup table is changed from (%d) to (%d)", 4479 lut_size, reta_size); 4480 pf->hash_lut_size = reta_size; 4481 } 4482 4483 out: 4484 rte_free(lut); 4485 4486 return ret; 4487 } 4488 4489 static int 4490 ice_rss_reta_query(struct rte_eth_dev *dev, 4491 struct rte_eth_rss_reta_entry64 *reta_conf, 4492 uint16_t reta_size) 4493 { 4494 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4495 uint16_t i, lut_size = pf->hash_lut_size; 4496 uint16_t idx, shift; 4497 uint8_t *lut; 4498 int ret; 4499 4500 if (reta_size != lut_size) { 4501 PMD_DRV_LOG(ERR, 4502 "The size of hash lookup table configured (%d)" 4503 "doesn't match the number hardware can " 4504 "supported (%d)", 4505 reta_size, lut_size); 4506 return -EINVAL; 4507 } 4508 4509 lut = rte_zmalloc(NULL, reta_size, 0); 4510 if (!lut) { 4511 PMD_DRV_LOG(ERR, "No memory can be allocated"); 4512 return -ENOMEM; 4513 } 4514 4515 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size); 4516 if (ret) 4517 goto out; 4518 4519 for (i = 0; i < reta_size; i++) { 4520 idx = i / RTE_RETA_GROUP_SIZE; 4521 shift = i % RTE_RETA_GROUP_SIZE; 4522 if (reta_conf[idx].mask & (1ULL << shift)) 4523 reta_conf[idx].reta[shift] = lut[i]; 4524 } 4525 4526 out: 4527 rte_free(lut); 4528 4529 return ret; 4530 } 4531 4532 static int 4533 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len) 4534 { 4535 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4536 int ret = 0; 4537 4538 if (!key || key_len == 0) { 4539 PMD_DRV_LOG(DEBUG, "No key to be configured"); 4540 return 0; 4541 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) * 4542 sizeof(uint32_t)) { 4543 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); 4544 return -EINVAL; 4545 } 4546 4547 struct ice_aqc_get_set_rss_keys *key_dw = 4548 (struct ice_aqc_get_set_rss_keys *)key; 4549 4550 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw); 4551 if (ret) { 4552 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ"); 4553 ret = -EINVAL; 4554 } 4555 4556 return ret; 4557 } 4558 4559 static int 4560 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len) 4561 { 4562 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4563 int ret; 4564 4565 if (!key || !key_len) 4566 return -EINVAL; 4567 4568 ret = ice_aq_get_rss_key 4569 (hw, vsi->idx, 4570 (struct ice_aqc_get_set_rss_keys *)key); 4571 if (ret) { 4572 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ"); 4573 return -EINVAL; 4574 } 4575 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); 4576 4577 return 0; 4578 } 4579 4580 static int 4581 ice_rss_hash_update(struct rte_eth_dev *dev, 4582 struct rte_eth_rss_conf *rss_conf) 4583 { 4584 enum ice_status status = ICE_SUCCESS; 4585 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4586 struct ice_vsi *vsi = pf->main_vsi; 4587 4588 /* set hash key */ 4589 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len); 4590 if (status) 4591 return status; 4592 4593 if (rss_conf->rss_hf == 0) { 4594 pf->rss_hf = 0; 4595 return 0; 4596 } 4597 4598 /* RSS hash configuration */ 4599 ice_rss_hash_set(pf, rss_conf->rss_hf); 4600 4601 return 0; 4602 } 4603 4604 static int 4605 ice_rss_hash_conf_get(struct rte_eth_dev *dev, 4606 struct rte_eth_rss_conf *rss_conf) 4607 { 4608 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4609 struct ice_vsi *vsi = pf->main_vsi; 4610 4611 ice_get_rss_key(vsi, rss_conf->rss_key, 4612 &rss_conf->rss_key_len); 4613 4614 rss_conf->rss_hf = pf->rss_hf; 4615 return 0; 4616 } 4617 4618 static int 4619 ice_promisc_enable(struct rte_eth_dev *dev) 4620 { 4621 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4622 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4623 struct ice_vsi *vsi = pf->main_vsi; 4624 enum ice_status status; 4625 uint8_t pmask; 4626 int ret = 0; 4627 4628 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX | 4629 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4630 4631 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0); 4632 switch (status) { 4633 case ICE_ERR_ALREADY_EXISTS: 4634 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled"); 4635 case ICE_SUCCESS: 4636 break; 4637 default: 4638 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status); 4639 ret = -EAGAIN; 4640 } 4641 4642 return ret; 4643 } 4644 4645 static int 4646 ice_promisc_disable(struct rte_eth_dev *dev) 4647 { 4648 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4649 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4650 struct ice_vsi *vsi = pf->main_vsi; 4651 enum ice_status status; 4652 uint8_t pmask; 4653 int ret = 0; 4654 4655 if (dev->data->all_multicast == 1) 4656 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX; 4657 else 4658 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX | 4659 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4660 4661 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0); 4662 if (status != ICE_SUCCESS) { 4663 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status); 4664 ret = -EAGAIN; 4665 } 4666 4667 return ret; 4668 } 4669 4670 static int 4671 ice_allmulti_enable(struct rte_eth_dev *dev) 4672 { 4673 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4674 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4675 struct ice_vsi *vsi = pf->main_vsi; 4676 enum ice_status status; 4677 uint8_t pmask; 4678 int ret = 0; 4679 4680 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4681 4682 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0); 4683 4684 switch (status) { 4685 case ICE_ERR_ALREADY_EXISTS: 4686 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled"); 4687 case ICE_SUCCESS: 4688 break; 4689 default: 4690 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status); 4691 ret = -EAGAIN; 4692 } 4693 4694 return ret; 4695 } 4696 4697 static int 4698 ice_allmulti_disable(struct rte_eth_dev *dev) 4699 { 4700 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4701 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4702 struct ice_vsi *vsi = pf->main_vsi; 4703 enum ice_status status; 4704 uint8_t pmask; 4705 int ret = 0; 4706 4707 if (dev->data->promiscuous == 1) 4708 return 0; /* must remain in all_multicast mode */ 4709 4710 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4711 4712 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0); 4713 if (status != ICE_SUCCESS) { 4714 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status); 4715 ret = -EAGAIN; 4716 } 4717 4718 return ret; 4719 } 4720 4721 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev, 4722 uint16_t queue_id) 4723 { 4724 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 4725 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4726 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4727 uint32_t val; 4728 uint16_t msix_intr; 4729 4730 msix_intr = intr_handle->intr_vec[queue_id]; 4731 4732 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 4733 GLINT_DYN_CTL_ITR_INDX_M; 4734 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M; 4735 4736 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val); 4737 rte_intr_ack(&pci_dev->intr_handle); 4738 4739 return 0; 4740 } 4741 4742 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev, 4743 uint16_t queue_id) 4744 { 4745 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 4746 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4747 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4748 uint16_t msix_intr; 4749 4750 msix_intr = intr_handle->intr_vec[queue_id]; 4751 4752 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M); 4753 4754 return 0; 4755 } 4756 4757 static int 4758 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 4759 { 4760 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4761 u8 ver, patch; 4762 u16 build; 4763 int ret; 4764 4765 ver = hw->flash.orom.major; 4766 patch = hw->flash.orom.patch; 4767 build = hw->flash.orom.build; 4768 4769 ret = snprintf(fw_version, fw_size, 4770 "%x.%02x 0x%08x %d.%d.%d", 4771 hw->flash.nvm.major, 4772 hw->flash.nvm.minor, 4773 hw->flash.nvm.eetrack, 4774 ver, build, patch); 4775 if (ret < 0) 4776 return -EINVAL; 4777 4778 /* add the size of '\0' */ 4779 ret += 1; 4780 if (fw_size < (size_t)ret) 4781 return ret; 4782 else 4783 return 0; 4784 } 4785 4786 static int 4787 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info) 4788 { 4789 struct ice_hw *hw; 4790 struct ice_vsi_ctx ctxt; 4791 uint8_t vlan_flags = 0; 4792 int ret; 4793 4794 if (!vsi || !info) { 4795 PMD_DRV_LOG(ERR, "invalid parameters"); 4796 return -EINVAL; 4797 } 4798 4799 if (info->on) { 4800 vsi->info.port_based_inner_vlan = info->config.pvid; 4801 /** 4802 * If insert pvid is enabled, only tagged pkts are 4803 * allowed to be sent out. 4804 */ 4805 vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID | 4806 ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED; 4807 } else { 4808 vsi->info.port_based_inner_vlan = 0; 4809 if (info->config.reject.tagged == 0) 4810 vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED; 4811 4812 if (info->config.reject.untagged == 0) 4813 vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED; 4814 } 4815 vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID | 4816 ICE_AQ_VSI_INNER_VLAN_EMODE_M); 4817 vsi->info.inner_vlan_flags |= vlan_flags; 4818 memset(&ctxt, 0, sizeof(ctxt)); 4819 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4820 ctxt.info.valid_sections = 4821 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); 4822 ctxt.vsi_num = vsi->vsi_id; 4823 4824 hw = ICE_VSI_TO_HW(vsi); 4825 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4826 if (ret != ICE_SUCCESS) { 4827 PMD_DRV_LOG(ERR, 4828 "update VSI for VLAN insert failed, err %d", 4829 ret); 4830 return -EINVAL; 4831 } 4832 4833 vsi->info.valid_sections |= 4834 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); 4835 4836 return ret; 4837 } 4838 4839 static int 4840 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 4841 { 4842 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4843 struct ice_vsi *vsi = pf->main_vsi; 4844 struct rte_eth_dev_data *data = pf->dev_data; 4845 struct ice_vsi_vlan_pvid_info info; 4846 int ret; 4847 4848 memset(&info, 0, sizeof(info)); 4849 info.on = on; 4850 if (info.on) { 4851 info.config.pvid = pvid; 4852 } else { 4853 info.config.reject.tagged = 4854 data->dev_conf.txmode.hw_vlan_reject_tagged; 4855 info.config.reject.untagged = 4856 data->dev_conf.txmode.hw_vlan_reject_untagged; 4857 } 4858 4859 ret = ice_vsi_vlan_pvid_set(vsi, &info); 4860 if (ret < 0) { 4861 PMD_DRV_LOG(ERR, "Failed to set pvid."); 4862 return -EINVAL; 4863 } 4864 4865 return 0; 4866 } 4867 4868 static int 4869 ice_get_eeprom_length(struct rte_eth_dev *dev) 4870 { 4871 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4872 4873 return hw->flash.flash_size; 4874 } 4875 4876 static int 4877 ice_get_eeprom(struct rte_eth_dev *dev, 4878 struct rte_dev_eeprom_info *eeprom) 4879 { 4880 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4881 enum ice_status status = ICE_SUCCESS; 4882 uint8_t *data = eeprom->data; 4883 4884 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 4885 4886 status = ice_acquire_nvm(hw, ICE_RES_READ); 4887 if (status) { 4888 PMD_DRV_LOG(ERR, "acquire nvm failed."); 4889 return -EIO; 4890 } 4891 4892 status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length, 4893 data, false); 4894 4895 ice_release_nvm(hw); 4896 4897 if (status) { 4898 PMD_DRV_LOG(ERR, "EEPROM read failed."); 4899 return -EIO; 4900 } 4901 4902 return 0; 4903 } 4904 4905 static void 4906 ice_stat_update_32(struct ice_hw *hw, 4907 uint32_t reg, 4908 bool offset_loaded, 4909 uint64_t *offset, 4910 uint64_t *stat) 4911 { 4912 uint64_t new_data; 4913 4914 new_data = (uint64_t)ICE_READ_REG(hw, reg); 4915 if (!offset_loaded) 4916 *offset = new_data; 4917 4918 if (new_data >= *offset) 4919 *stat = (uint64_t)(new_data - *offset); 4920 else 4921 *stat = (uint64_t)((new_data + 4922 ((uint64_t)1 << ICE_32_BIT_WIDTH)) 4923 - *offset); 4924 } 4925 4926 static void 4927 ice_stat_update_40(struct ice_hw *hw, 4928 uint32_t hireg, 4929 uint32_t loreg, 4930 bool offset_loaded, 4931 uint64_t *offset, 4932 uint64_t *stat) 4933 { 4934 uint64_t new_data; 4935 4936 new_data = (uint64_t)ICE_READ_REG(hw, loreg); 4937 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) << 4938 ICE_32_BIT_WIDTH; 4939 4940 if (!offset_loaded) 4941 *offset = new_data; 4942 4943 if (new_data >= *offset) 4944 *stat = new_data - *offset; 4945 else 4946 *stat = (uint64_t)((new_data + 4947 ((uint64_t)1 << ICE_40_BIT_WIDTH)) - 4948 *offset); 4949 4950 *stat &= ICE_40_BIT_MASK; 4951 } 4952 4953 /* Get all the statistics of a VSI */ 4954 static void 4955 ice_update_vsi_stats(struct ice_vsi *vsi) 4956 { 4957 struct ice_eth_stats *oes = &vsi->eth_stats_offset; 4958 struct ice_eth_stats *nes = &vsi->eth_stats; 4959 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4960 int idx = rte_le_to_cpu_16(vsi->vsi_id); 4961 4962 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx), 4963 vsi->offset_loaded, &oes->rx_bytes, 4964 &nes->rx_bytes); 4965 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx), 4966 vsi->offset_loaded, &oes->rx_unicast, 4967 &nes->rx_unicast); 4968 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx), 4969 vsi->offset_loaded, &oes->rx_multicast, 4970 &nes->rx_multicast); 4971 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx), 4972 vsi->offset_loaded, &oes->rx_broadcast, 4973 &nes->rx_broadcast); 4974 /* enlarge the limitation when rx_bytes overflowed */ 4975 if (vsi->offset_loaded) { 4976 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes) 4977 nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 4978 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes); 4979 } 4980 vsi->old_rx_bytes = nes->rx_bytes; 4981 /* exclude CRC bytes */ 4982 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast + 4983 nes->rx_broadcast) * RTE_ETHER_CRC_LEN; 4984 4985 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded, 4986 &oes->rx_discards, &nes->rx_discards); 4987 /* GLV_REPC not supported */ 4988 /* GLV_RMPC not supported */ 4989 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded, 4990 &oes->rx_unknown_protocol, 4991 &nes->rx_unknown_protocol); 4992 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx), 4993 vsi->offset_loaded, &oes->tx_bytes, 4994 &nes->tx_bytes); 4995 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx), 4996 vsi->offset_loaded, &oes->tx_unicast, 4997 &nes->tx_unicast); 4998 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx), 4999 vsi->offset_loaded, &oes->tx_multicast, 5000 &nes->tx_multicast); 5001 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx), 5002 vsi->offset_loaded, &oes->tx_broadcast, 5003 &nes->tx_broadcast); 5004 /* GLV_TDPC not supported */ 5005 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded, 5006 &oes->tx_errors, &nes->tx_errors); 5007 /* enlarge the limitation when tx_bytes overflowed */ 5008 if (vsi->offset_loaded) { 5009 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes) 5010 nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 5011 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes); 5012 } 5013 vsi->old_tx_bytes = nes->tx_bytes; 5014 vsi->offset_loaded = true; 5015 5016 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************", 5017 vsi->vsi_id); 5018 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes); 5019 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast); 5020 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast); 5021 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast); 5022 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards); 5023 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", 5024 nes->rx_unknown_protocol); 5025 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes); 5026 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast); 5027 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast); 5028 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast); 5029 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards); 5030 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors); 5031 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************", 5032 vsi->vsi_id); 5033 } 5034 5035 static void 5036 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw) 5037 { 5038 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */ 5039 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */ 5040 5041 /* Get statistics of struct ice_eth_stats */ 5042 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport), 5043 GLPRT_GORCL(hw->port_info->lport), 5044 pf->offset_loaded, &os->eth.rx_bytes, 5045 &ns->eth.rx_bytes); 5046 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport), 5047 GLPRT_UPRCL(hw->port_info->lport), 5048 pf->offset_loaded, &os->eth.rx_unicast, 5049 &ns->eth.rx_unicast); 5050 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport), 5051 GLPRT_MPRCL(hw->port_info->lport), 5052 pf->offset_loaded, &os->eth.rx_multicast, 5053 &ns->eth.rx_multicast); 5054 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport), 5055 GLPRT_BPRCL(hw->port_info->lport), 5056 pf->offset_loaded, &os->eth.rx_broadcast, 5057 &ns->eth.rx_broadcast); 5058 ice_stat_update_32(hw, PRTRPB_RDPC, 5059 pf->offset_loaded, &os->eth.rx_discards, 5060 &ns->eth.rx_discards); 5061 /* enlarge the limitation when rx_bytes overflowed */ 5062 if (pf->offset_loaded) { 5063 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes) 5064 ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 5065 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes); 5066 } 5067 pf->old_rx_bytes = ns->eth.rx_bytes; 5068 5069 /* Workaround: CRC size should not be included in byte statistics, 5070 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx 5071 * packet. 5072 */ 5073 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + 5074 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN; 5075 5076 /* GLPRT_REPC not supported */ 5077 /* GLPRT_RMPC not supported */ 5078 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport), 5079 pf->offset_loaded, 5080 &os->eth.rx_unknown_protocol, 5081 &ns->eth.rx_unknown_protocol); 5082 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport), 5083 GLPRT_GOTCL(hw->port_info->lport), 5084 pf->offset_loaded, &os->eth.tx_bytes, 5085 &ns->eth.tx_bytes); 5086 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport), 5087 GLPRT_UPTCL(hw->port_info->lport), 5088 pf->offset_loaded, &os->eth.tx_unicast, 5089 &ns->eth.tx_unicast); 5090 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport), 5091 GLPRT_MPTCL(hw->port_info->lport), 5092 pf->offset_loaded, &os->eth.tx_multicast, 5093 &ns->eth.tx_multicast); 5094 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport), 5095 GLPRT_BPTCL(hw->port_info->lport), 5096 pf->offset_loaded, &os->eth.tx_broadcast, 5097 &ns->eth.tx_broadcast); 5098 /* enlarge the limitation when tx_bytes overflowed */ 5099 if (pf->offset_loaded) { 5100 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes) 5101 ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 5102 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes); 5103 } 5104 pf->old_tx_bytes = ns->eth.tx_bytes; 5105 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + 5106 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN; 5107 5108 /* GLPRT_TEPC not supported */ 5109 5110 /* additional port specific stats */ 5111 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport), 5112 pf->offset_loaded, &os->tx_dropped_link_down, 5113 &ns->tx_dropped_link_down); 5114 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport), 5115 pf->offset_loaded, &os->crc_errors, 5116 &ns->crc_errors); 5117 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport), 5118 pf->offset_loaded, &os->illegal_bytes, 5119 &ns->illegal_bytes); 5120 /* GLPRT_ERRBC not supported */ 5121 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport), 5122 pf->offset_loaded, &os->mac_local_faults, 5123 &ns->mac_local_faults); 5124 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport), 5125 pf->offset_loaded, &os->mac_remote_faults, 5126 &ns->mac_remote_faults); 5127 5128 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport), 5129 pf->offset_loaded, &os->rx_len_errors, 5130 &ns->rx_len_errors); 5131 5132 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport), 5133 pf->offset_loaded, &os->link_xon_rx, 5134 &ns->link_xon_rx); 5135 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport), 5136 pf->offset_loaded, &os->link_xoff_rx, 5137 &ns->link_xoff_rx); 5138 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport), 5139 pf->offset_loaded, &os->link_xon_tx, 5140 &ns->link_xon_tx); 5141 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport), 5142 pf->offset_loaded, &os->link_xoff_tx, 5143 &ns->link_xoff_tx); 5144 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport), 5145 GLPRT_PRC64L(hw->port_info->lport), 5146 pf->offset_loaded, &os->rx_size_64, 5147 &ns->rx_size_64); 5148 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport), 5149 GLPRT_PRC127L(hw->port_info->lport), 5150 pf->offset_loaded, &os->rx_size_127, 5151 &ns->rx_size_127); 5152 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport), 5153 GLPRT_PRC255L(hw->port_info->lport), 5154 pf->offset_loaded, &os->rx_size_255, 5155 &ns->rx_size_255); 5156 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport), 5157 GLPRT_PRC511L(hw->port_info->lport), 5158 pf->offset_loaded, &os->rx_size_511, 5159 &ns->rx_size_511); 5160 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport), 5161 GLPRT_PRC1023L(hw->port_info->lport), 5162 pf->offset_loaded, &os->rx_size_1023, 5163 &ns->rx_size_1023); 5164 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport), 5165 GLPRT_PRC1522L(hw->port_info->lport), 5166 pf->offset_loaded, &os->rx_size_1522, 5167 &ns->rx_size_1522); 5168 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport), 5169 GLPRT_PRC9522L(hw->port_info->lport), 5170 pf->offset_loaded, &os->rx_size_big, 5171 &ns->rx_size_big); 5172 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport), 5173 pf->offset_loaded, &os->rx_undersize, 5174 &ns->rx_undersize); 5175 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport), 5176 pf->offset_loaded, &os->rx_fragments, 5177 &ns->rx_fragments); 5178 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport), 5179 pf->offset_loaded, &os->rx_oversize, 5180 &ns->rx_oversize); 5181 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport), 5182 pf->offset_loaded, &os->rx_jabber, 5183 &ns->rx_jabber); 5184 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport), 5185 GLPRT_PTC64L(hw->port_info->lport), 5186 pf->offset_loaded, &os->tx_size_64, 5187 &ns->tx_size_64); 5188 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport), 5189 GLPRT_PTC127L(hw->port_info->lport), 5190 pf->offset_loaded, &os->tx_size_127, 5191 &ns->tx_size_127); 5192 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport), 5193 GLPRT_PTC255L(hw->port_info->lport), 5194 pf->offset_loaded, &os->tx_size_255, 5195 &ns->tx_size_255); 5196 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport), 5197 GLPRT_PTC511L(hw->port_info->lport), 5198 pf->offset_loaded, &os->tx_size_511, 5199 &ns->tx_size_511); 5200 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport), 5201 GLPRT_PTC1023L(hw->port_info->lport), 5202 pf->offset_loaded, &os->tx_size_1023, 5203 &ns->tx_size_1023); 5204 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport), 5205 GLPRT_PTC1522L(hw->port_info->lport), 5206 pf->offset_loaded, &os->tx_size_1522, 5207 &ns->tx_size_1522); 5208 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport), 5209 GLPRT_PTC9522L(hw->port_info->lport), 5210 pf->offset_loaded, &os->tx_size_big, 5211 &ns->tx_size_big); 5212 5213 /* GLPRT_MSPDC not supported */ 5214 /* GLPRT_XEC not supported */ 5215 5216 pf->offset_loaded = true; 5217 5218 if (pf->main_vsi) 5219 ice_update_vsi_stats(pf->main_vsi); 5220 } 5221 5222 /* Get all statistics of a port */ 5223 static int 5224 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 5225 { 5226 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5227 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5228 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */ 5229 5230 /* call read registers - updates values, now write them to struct */ 5231 ice_read_stats_registers(pf, hw); 5232 5233 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + 5234 pf->main_vsi->eth_stats.rx_multicast + 5235 pf->main_vsi->eth_stats.rx_broadcast - 5236 pf->main_vsi->eth_stats.rx_discards; 5237 stats->opackets = ns->eth.tx_unicast + 5238 ns->eth.tx_multicast + 5239 ns->eth.tx_broadcast; 5240 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes; 5241 stats->obytes = ns->eth.tx_bytes; 5242 stats->oerrors = ns->eth.tx_errors + 5243 pf->main_vsi->eth_stats.tx_errors; 5244 5245 /* Rx Errors */ 5246 stats->imissed = ns->eth.rx_discards + 5247 pf->main_vsi->eth_stats.rx_discards; 5248 stats->ierrors = ns->crc_errors + 5249 ns->rx_undersize + 5250 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; 5251 5252 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************"); 5253 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes); 5254 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast); 5255 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast); 5256 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast); 5257 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards); 5258 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"", 5259 pf->main_vsi->eth_stats.rx_discards); 5260 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", 5261 ns->eth.rx_unknown_protocol); 5262 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes); 5263 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast); 5264 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast); 5265 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast); 5266 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards); 5267 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"", 5268 pf->main_vsi->eth_stats.tx_discards); 5269 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors); 5270 5271 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"", 5272 ns->tx_dropped_link_down); 5273 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors); 5274 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"", 5275 ns->illegal_bytes); 5276 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes); 5277 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"", 5278 ns->mac_local_faults); 5279 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"", 5280 ns->mac_remote_faults); 5281 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx); 5282 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx); 5283 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx); 5284 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx); 5285 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64); 5286 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127); 5287 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255); 5288 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511); 5289 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023); 5290 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522); 5291 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big); 5292 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize); 5293 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments); 5294 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize); 5295 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber); 5296 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64); 5297 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127); 5298 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255); 5299 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511); 5300 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023); 5301 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522); 5302 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big); 5303 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors); 5304 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************"); 5305 return 0; 5306 } 5307 5308 /* Reset the statistics */ 5309 static int 5310 ice_stats_reset(struct rte_eth_dev *dev) 5311 { 5312 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5313 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5314 5315 /* Mark PF and VSI stats to update the offset, aka "reset" */ 5316 pf->offset_loaded = false; 5317 if (pf->main_vsi) 5318 pf->main_vsi->offset_loaded = false; 5319 5320 /* read the stats, reading current register values into offset */ 5321 ice_read_stats_registers(pf, hw); 5322 5323 return 0; 5324 } 5325 5326 static uint32_t 5327 ice_xstats_calc_num(void) 5328 { 5329 uint32_t num; 5330 5331 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS; 5332 5333 return num; 5334 } 5335 5336 static int 5337 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 5338 unsigned int n) 5339 { 5340 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5341 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5342 unsigned int i; 5343 unsigned int count; 5344 struct ice_hw_port_stats *hw_stats = &pf->stats; 5345 5346 count = ice_xstats_calc_num(); 5347 if (n < count) 5348 return count; 5349 5350 ice_read_stats_registers(pf, hw); 5351 5352 if (!xstats) 5353 return 0; 5354 5355 count = 0; 5356 5357 /* Get stats from ice_eth_stats struct */ 5358 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) { 5359 xstats[count].value = 5360 *(uint64_t *)((char *)&hw_stats->eth + 5361 ice_stats_strings[i].offset); 5362 xstats[count].id = count; 5363 count++; 5364 } 5365 5366 /* Get individiual stats from ice_hw_port struct */ 5367 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { 5368 xstats[count].value = 5369 *(uint64_t *)((char *)hw_stats + 5370 ice_hw_port_strings[i].offset); 5371 xstats[count].id = count; 5372 count++; 5373 } 5374 5375 return count; 5376 } 5377 5378 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 5379 struct rte_eth_xstat_name *xstats_names, 5380 __rte_unused unsigned int limit) 5381 { 5382 unsigned int count = 0; 5383 unsigned int i; 5384 5385 if (!xstats_names) 5386 return ice_xstats_calc_num(); 5387 5388 /* Note: limit checked in rte_eth_xstats_names() */ 5389 5390 /* Get stats from ice_eth_stats struct */ 5391 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) { 5392 strlcpy(xstats_names[count].name, ice_stats_strings[i].name, 5393 sizeof(xstats_names[count].name)); 5394 count++; 5395 } 5396 5397 /* Get individiual stats from ice_hw_port struct */ 5398 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { 5399 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name, 5400 sizeof(xstats_names[count].name)); 5401 count++; 5402 } 5403 5404 return count; 5405 } 5406 5407 static int 5408 ice_dev_flow_ops_get(struct rte_eth_dev *dev, 5409 const struct rte_flow_ops **ops) 5410 { 5411 if (!dev) 5412 return -EINVAL; 5413 5414 *ops = &ice_flow_ops; 5415 return 0; 5416 } 5417 5418 /* Add UDP tunneling port */ 5419 static int 5420 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 5421 struct rte_eth_udp_tunnel *udp_tunnel) 5422 { 5423 int ret = 0; 5424 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5425 5426 if (udp_tunnel == NULL) 5427 return -EINVAL; 5428 5429 switch (udp_tunnel->prot_type) { 5430 case RTE_TUNNEL_TYPE_VXLAN: 5431 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port); 5432 break; 5433 default: 5434 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 5435 ret = -EINVAL; 5436 break; 5437 } 5438 5439 return ret; 5440 } 5441 5442 /* Delete UDP tunneling port */ 5443 static int 5444 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 5445 struct rte_eth_udp_tunnel *udp_tunnel) 5446 { 5447 int ret = 0; 5448 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5449 5450 if (udp_tunnel == NULL) 5451 return -EINVAL; 5452 5453 switch (udp_tunnel->prot_type) { 5454 case RTE_TUNNEL_TYPE_VXLAN: 5455 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0); 5456 break; 5457 default: 5458 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 5459 ret = -EINVAL; 5460 break; 5461 } 5462 5463 return ret; 5464 } 5465 5466 static int 5467 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 5468 struct rte_pci_device *pci_dev) 5469 { 5470 return rte_eth_dev_pci_generic_probe(pci_dev, 5471 sizeof(struct ice_adapter), 5472 ice_dev_init); 5473 } 5474 5475 static int 5476 ice_pci_remove(struct rte_pci_device *pci_dev) 5477 { 5478 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit); 5479 } 5480 5481 static struct rte_pci_driver rte_ice_pmd = { 5482 .id_table = pci_id_ice_map, 5483 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 5484 .probe = ice_pci_probe, 5485 .remove = ice_pci_remove, 5486 }; 5487 5488 /** 5489 * Driver initialization routine. 5490 * Invoked once at EAL init time. 5491 * Register itself as the [Poll Mode] Driver of PCI devices. 5492 */ 5493 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd); 5494 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map); 5495 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci"); 5496 RTE_PMD_REGISTER_PARAM_STRING(net_ice, 5497 ICE_HW_DEBUG_MASK_ARG "=0xXXX" 5498 ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>" 5499 ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>" 5500 ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"); 5501 5502 RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE); 5503 RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE); 5504 #ifdef RTE_ETHDEV_DEBUG_RX 5505 RTE_LOG_REGISTER_SUFFIX(ice_logtype_rx, rx, DEBUG); 5506 #endif 5507 #ifdef RTE_ETHDEV_DEBUG_TX 5508 RTE_LOG_REGISTER_SUFFIX(ice_logtype_tx, tx, DEBUG); 5509 #endif 5510