1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation 3 */ 4 5 #include <rte_string_fns.h> 6 #include <rte_ethdev_pci.h> 7 8 #include <stdio.h> 9 #include <sys/types.h> 10 #include <sys/stat.h> 11 #include <unistd.h> 12 13 #include "base/ice_sched.h" 14 #include "base/ice_flow.h" 15 #include "base/ice_dcb.h" 16 #include "base/ice_common.h" 17 18 #include "rte_pmd_ice.h" 19 #include "ice_ethdev.h" 20 #include "ice_rxtx.h" 21 #include "ice_generic_flow.h" 22 23 /* devargs */ 24 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support" 25 #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support" 26 #define ICE_PROTO_XTR_ARG "proto_xtr" 27 28 static const char * const ice_valid_args[] = { 29 ICE_SAFE_MODE_SUPPORT_ARG, 30 ICE_PIPELINE_MODE_SUPPORT_ARG, 31 ICE_PROTO_XTR_ARG, 32 NULL 33 }; 34 35 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = { 36 .name = "intel_pmd_dynfield_proto_xtr_metadata", 37 .size = sizeof(uint32_t), 38 .align = __alignof__(uint32_t), 39 .flags = 0, 40 }; 41 42 struct proto_xtr_ol_flag { 43 const struct rte_mbuf_dynflag param; 44 uint64_t *ol_flag; 45 bool required; 46 }; 47 48 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX]; 49 50 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = { 51 [PROTO_XTR_VLAN] = { 52 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" }, 53 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask }, 54 [PROTO_XTR_IPV4] = { 55 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" }, 56 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask }, 57 [PROTO_XTR_IPV6] = { 58 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" }, 59 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask }, 60 [PROTO_XTR_IPV6_FLOW] = { 61 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" }, 62 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask }, 63 [PROTO_XTR_TCP] = { 64 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" }, 65 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask }, 66 [PROTO_XTR_IP_OFFSET] = { 67 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" }, 68 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask }, 69 }; 70 71 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100 72 73 #define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package" 74 #define ICE_COMMS_PKG_NAME "ICE COMMS Package" 75 #define ICE_MAX_RES_DESC_NUM 1024 76 77 static int ice_dev_configure(struct rte_eth_dev *dev); 78 static int ice_dev_start(struct rte_eth_dev *dev); 79 static int ice_dev_stop(struct rte_eth_dev *dev); 80 static int ice_dev_close(struct rte_eth_dev *dev); 81 static int ice_dev_reset(struct rte_eth_dev *dev); 82 static int ice_dev_info_get(struct rte_eth_dev *dev, 83 struct rte_eth_dev_info *dev_info); 84 static int ice_link_update(struct rte_eth_dev *dev, 85 int wait_to_complete); 86 static int ice_dev_set_link_up(struct rte_eth_dev *dev); 87 static int ice_dev_set_link_down(struct rte_eth_dev *dev); 88 89 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 90 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask); 91 static int ice_rss_reta_update(struct rte_eth_dev *dev, 92 struct rte_eth_rss_reta_entry64 *reta_conf, 93 uint16_t reta_size); 94 static int ice_rss_reta_query(struct rte_eth_dev *dev, 95 struct rte_eth_rss_reta_entry64 *reta_conf, 96 uint16_t reta_size); 97 static int ice_rss_hash_update(struct rte_eth_dev *dev, 98 struct rte_eth_rss_conf *rss_conf); 99 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev, 100 struct rte_eth_rss_conf *rss_conf); 101 static int ice_promisc_enable(struct rte_eth_dev *dev); 102 static int ice_promisc_disable(struct rte_eth_dev *dev); 103 static int ice_allmulti_enable(struct rte_eth_dev *dev); 104 static int ice_allmulti_disable(struct rte_eth_dev *dev); 105 static int ice_vlan_filter_set(struct rte_eth_dev *dev, 106 uint16_t vlan_id, 107 int on); 108 static int ice_macaddr_set(struct rte_eth_dev *dev, 109 struct rte_ether_addr *mac_addr); 110 static int ice_macaddr_add(struct rte_eth_dev *dev, 111 struct rte_ether_addr *mac_addr, 112 __rte_unused uint32_t index, 113 uint32_t pool); 114 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); 115 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev, 116 uint16_t queue_id); 117 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev, 118 uint16_t queue_id); 119 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 120 size_t fw_size); 121 static int ice_vlan_pvid_set(struct rte_eth_dev *dev, 122 uint16_t pvid, int on); 123 static int ice_get_eeprom_length(struct rte_eth_dev *dev); 124 static int ice_get_eeprom(struct rte_eth_dev *dev, 125 struct rte_dev_eeprom_info *eeprom); 126 static int ice_stats_get(struct rte_eth_dev *dev, 127 struct rte_eth_stats *stats); 128 static int ice_stats_reset(struct rte_eth_dev *dev); 129 static int ice_xstats_get(struct rte_eth_dev *dev, 130 struct rte_eth_xstat *xstats, unsigned int n); 131 static int ice_xstats_get_names(struct rte_eth_dev *dev, 132 struct rte_eth_xstat_name *xstats_names, 133 unsigned int limit); 134 static int ice_dev_filter_ctrl(struct rte_eth_dev *dev, 135 enum rte_filter_type filter_type, 136 enum rte_filter_op filter_op, 137 void *arg); 138 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 139 struct rte_eth_udp_tunnel *udp_tunnel); 140 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 141 struct rte_eth_udp_tunnel *udp_tunnel); 142 143 static const struct rte_pci_id pci_id_ice_map[] = { 144 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) }, 145 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) }, 146 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) }, 147 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) }, 148 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) }, 149 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) }, 150 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) }, 151 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) }, 152 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) }, 153 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) }, 154 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) }, 155 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) }, 156 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) }, 157 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) }, 158 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) }, 159 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) }, 160 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) }, 161 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) }, 162 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) }, 163 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) }, 164 { .vendor_id = 0, /* sentinel */ }, 165 }; 166 167 static const struct eth_dev_ops ice_eth_dev_ops = { 168 .dev_configure = ice_dev_configure, 169 .dev_start = ice_dev_start, 170 .dev_stop = ice_dev_stop, 171 .dev_close = ice_dev_close, 172 .dev_reset = ice_dev_reset, 173 .dev_set_link_up = ice_dev_set_link_up, 174 .dev_set_link_down = ice_dev_set_link_down, 175 .rx_queue_start = ice_rx_queue_start, 176 .rx_queue_stop = ice_rx_queue_stop, 177 .tx_queue_start = ice_tx_queue_start, 178 .tx_queue_stop = ice_tx_queue_stop, 179 .rx_queue_setup = ice_rx_queue_setup, 180 .rx_queue_release = ice_rx_queue_release, 181 .tx_queue_setup = ice_tx_queue_setup, 182 .tx_queue_release = ice_tx_queue_release, 183 .dev_infos_get = ice_dev_info_get, 184 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get, 185 .link_update = ice_link_update, 186 .mtu_set = ice_mtu_set, 187 .mac_addr_set = ice_macaddr_set, 188 .mac_addr_add = ice_macaddr_add, 189 .mac_addr_remove = ice_macaddr_remove, 190 .vlan_filter_set = ice_vlan_filter_set, 191 .vlan_offload_set = ice_vlan_offload_set, 192 .reta_update = ice_rss_reta_update, 193 .reta_query = ice_rss_reta_query, 194 .rss_hash_update = ice_rss_hash_update, 195 .rss_hash_conf_get = ice_rss_hash_conf_get, 196 .promiscuous_enable = ice_promisc_enable, 197 .promiscuous_disable = ice_promisc_disable, 198 .allmulticast_enable = ice_allmulti_enable, 199 .allmulticast_disable = ice_allmulti_disable, 200 .rx_queue_intr_enable = ice_rx_queue_intr_enable, 201 .rx_queue_intr_disable = ice_rx_queue_intr_disable, 202 .fw_version_get = ice_fw_version_get, 203 .vlan_pvid_set = ice_vlan_pvid_set, 204 .rxq_info_get = ice_rxq_info_get, 205 .txq_info_get = ice_txq_info_get, 206 .rx_burst_mode_get = ice_rx_burst_mode_get, 207 .tx_burst_mode_get = ice_tx_burst_mode_get, 208 .get_eeprom_length = ice_get_eeprom_length, 209 .get_eeprom = ice_get_eeprom, 210 .stats_get = ice_stats_get, 211 .stats_reset = ice_stats_reset, 212 .xstats_get = ice_xstats_get, 213 .xstats_get_names = ice_xstats_get_names, 214 .xstats_reset = ice_stats_reset, 215 .filter_ctrl = ice_dev_filter_ctrl, 216 .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add, 217 .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del, 218 .tx_done_cleanup = ice_tx_done_cleanup, 219 }; 220 221 /* store statistics names and its offset in stats structure */ 222 struct ice_xstats_name_off { 223 char name[RTE_ETH_XSTATS_NAME_SIZE]; 224 unsigned int offset; 225 }; 226 227 static const struct ice_xstats_name_off ice_stats_strings[] = { 228 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)}, 229 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)}, 230 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)}, 231 {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)}, 232 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats, 233 rx_unknown_protocol)}, 234 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)}, 235 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)}, 236 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)}, 237 {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)}, 238 }; 239 240 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \ 241 sizeof(ice_stats_strings[0])) 242 243 static const struct ice_xstats_name_off ice_hw_port_strings[] = { 244 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats, 245 tx_dropped_link_down)}, 246 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)}, 247 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats, 248 illegal_bytes)}, 249 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)}, 250 {"mac_local_errors", offsetof(struct ice_hw_port_stats, 251 mac_local_faults)}, 252 {"mac_remote_errors", offsetof(struct ice_hw_port_stats, 253 mac_remote_faults)}, 254 {"rx_len_errors", offsetof(struct ice_hw_port_stats, 255 rx_len_errors)}, 256 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)}, 257 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)}, 258 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)}, 259 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)}, 260 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)}, 261 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats, 262 rx_size_127)}, 263 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats, 264 rx_size_255)}, 265 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats, 266 rx_size_511)}, 267 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats, 268 rx_size_1023)}, 269 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats, 270 rx_size_1522)}, 271 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats, 272 rx_size_big)}, 273 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats, 274 rx_undersize)}, 275 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats, 276 rx_oversize)}, 277 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats, 278 mac_short_pkt_dropped)}, 279 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats, 280 rx_fragments)}, 281 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)}, 282 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)}, 283 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats, 284 tx_size_127)}, 285 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats, 286 tx_size_255)}, 287 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats, 288 tx_size_511)}, 289 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats, 290 tx_size_1023)}, 291 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats, 292 tx_size_1522)}, 293 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats, 294 tx_size_big)}, 295 }; 296 297 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \ 298 sizeof(ice_hw_port_strings[0])) 299 300 static void 301 ice_init_controlq_parameter(struct ice_hw *hw) 302 { 303 /* fields for adminq */ 304 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN; 305 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN; 306 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ; 307 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ; 308 309 /* fields for mailboxq, DPDK used as PF host */ 310 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN; 311 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN; 312 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ; 313 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ; 314 } 315 316 static int 317 lookup_proto_xtr_type(const char *xtr_name) 318 { 319 static struct { 320 const char *name; 321 enum proto_xtr_type type; 322 } xtr_type_map[] = { 323 { "vlan", PROTO_XTR_VLAN }, 324 { "ipv4", PROTO_XTR_IPV4 }, 325 { "ipv6", PROTO_XTR_IPV6 }, 326 { "ipv6_flow", PROTO_XTR_IPV6_FLOW }, 327 { "tcp", PROTO_XTR_TCP }, 328 { "ip_offset", PROTO_XTR_IP_OFFSET }, 329 }; 330 uint32_t i; 331 332 for (i = 0; i < RTE_DIM(xtr_type_map); i++) { 333 if (strcmp(xtr_name, xtr_type_map[i].name) == 0) 334 return xtr_type_map[i].type; 335 } 336 337 return -1; 338 } 339 340 /* 341 * Parse elem, the elem could be single number/range or '(' ')' group 342 * 1) A single number elem, it's just a simple digit. e.g. 9 343 * 2) A single range elem, two digits with a '-' between. e.g. 2-6 344 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6) 345 * Within group elem, '-' used for a range separator; 346 * ',' used for a single number. 347 */ 348 static int 349 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs) 350 { 351 const char *str = input; 352 char *end = NULL; 353 uint32_t min, max; 354 uint32_t idx; 355 356 while (isblank(*str)) 357 str++; 358 359 if (!isdigit(*str) && *str != '(') 360 return -1; 361 362 /* process single number or single range of number */ 363 if (*str != '(') { 364 errno = 0; 365 idx = strtoul(str, &end, 10); 366 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) 367 return -1; 368 369 while (isblank(*end)) 370 end++; 371 372 min = idx; 373 max = idx; 374 375 /* process single <number>-<number> */ 376 if (*end == '-') { 377 end++; 378 while (isblank(*end)) 379 end++; 380 if (!isdigit(*end)) 381 return -1; 382 383 errno = 0; 384 idx = strtoul(end, &end, 10); 385 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) 386 return -1; 387 388 max = idx; 389 while (isblank(*end)) 390 end++; 391 } 392 393 if (*end != ':') 394 return -1; 395 396 for (idx = RTE_MIN(min, max); 397 idx <= RTE_MAX(min, max); idx++) 398 devargs->proto_xtr[idx] = xtr_type; 399 400 return 0; 401 } 402 403 /* process set within bracket */ 404 str++; 405 while (isblank(*str)) 406 str++; 407 if (*str == '\0') 408 return -1; 409 410 min = ICE_MAX_QUEUE_NUM; 411 do { 412 /* go ahead to the first digit */ 413 while (isblank(*str)) 414 str++; 415 if (!isdigit(*str)) 416 return -1; 417 418 /* get the digit value */ 419 errno = 0; 420 idx = strtoul(str, &end, 10); 421 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) 422 return -1; 423 424 /* go ahead to separator '-',',' and ')' */ 425 while (isblank(*end)) 426 end++; 427 if (*end == '-') { 428 if (min == ICE_MAX_QUEUE_NUM) 429 min = idx; 430 else /* avoid continuous '-' */ 431 return -1; 432 } else if (*end == ',' || *end == ')') { 433 max = idx; 434 if (min == ICE_MAX_QUEUE_NUM) 435 min = idx; 436 437 for (idx = RTE_MIN(min, max); 438 idx <= RTE_MAX(min, max); idx++) 439 devargs->proto_xtr[idx] = xtr_type; 440 441 min = ICE_MAX_QUEUE_NUM; 442 } else { 443 return -1; 444 } 445 446 str = end + 1; 447 } while (*end != ')' && *end != '\0'); 448 449 return 0; 450 } 451 452 static int 453 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs) 454 { 455 const char *queue_start; 456 uint32_t idx; 457 int xtr_type; 458 char xtr_name[32]; 459 460 while (isblank(*queues)) 461 queues++; 462 463 if (*queues != '[') { 464 xtr_type = lookup_proto_xtr_type(queues); 465 if (xtr_type < 0) 466 return -1; 467 468 devargs->proto_xtr_dflt = xtr_type; 469 470 return 0; 471 } 472 473 queues++; 474 do { 475 while (isblank(*queues)) 476 queues++; 477 if (*queues == '\0') 478 return -1; 479 480 queue_start = queues; 481 482 /* go across a complete bracket */ 483 if (*queue_start == '(') { 484 queues += strcspn(queues, ")"); 485 if (*queues != ')') 486 return -1; 487 } 488 489 /* scan the separator ':' */ 490 queues += strcspn(queues, ":"); 491 if (*queues++ != ':') 492 return -1; 493 while (isblank(*queues)) 494 queues++; 495 496 for (idx = 0; ; idx++) { 497 if (isblank(queues[idx]) || 498 queues[idx] == ',' || 499 queues[idx] == ']' || 500 queues[idx] == '\0') 501 break; 502 503 if (idx > sizeof(xtr_name) - 2) 504 return -1; 505 506 xtr_name[idx] = queues[idx]; 507 } 508 xtr_name[idx] = '\0'; 509 xtr_type = lookup_proto_xtr_type(xtr_name); 510 if (xtr_type < 0) 511 return -1; 512 513 queues += idx; 514 515 while (isblank(*queues) || *queues == ',' || *queues == ']') 516 queues++; 517 518 if (parse_queue_set(queue_start, xtr_type, devargs) < 0) 519 return -1; 520 } while (*queues != '\0'); 521 522 return 0; 523 } 524 525 static int 526 handle_proto_xtr_arg(__rte_unused const char *key, const char *value, 527 void *extra_args) 528 { 529 struct ice_devargs *devargs = extra_args; 530 531 if (value == NULL || extra_args == NULL) 532 return -EINVAL; 533 534 if (parse_queue_proto_xtr(value, devargs) < 0) { 535 PMD_DRV_LOG(ERR, 536 "The protocol extraction parameter is wrong : '%s'", 537 value); 538 return -1; 539 } 540 541 return 0; 542 } 543 544 static void 545 ice_check_proto_xtr_support(struct ice_hw *hw) 546 { 547 #define FLX_REG(val, fld, idx) \ 548 (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \ 549 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S) 550 static struct { 551 uint32_t rxdid; 552 uint8_t opcode; 553 uint8_t protid_0; 554 uint8_t protid_1; 555 } xtr_sets[] = { 556 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN, 557 ICE_RX_OPC_EXTRACT, 558 ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O}, 559 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4, 560 ICE_RX_OPC_EXTRACT, 561 ICE_PROT_IPV4_OF_OR_S, 562 ICE_PROT_IPV4_OF_OR_S }, 563 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6, 564 ICE_RX_OPC_EXTRACT, 565 ICE_PROT_IPV6_OF_OR_S, 566 ICE_PROT_IPV6_OF_OR_S }, 567 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW, 568 ICE_RX_OPC_EXTRACT, 569 ICE_PROT_IPV6_OF_OR_S, 570 ICE_PROT_IPV6_OF_OR_S }, 571 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP, 572 ICE_RX_OPC_EXTRACT, 573 ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL }, 574 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET, 575 ICE_RX_OPC_PROTID, 576 ICE_PROT_IPV4_OF_OR_S, 577 ICE_PROT_IPV6_OF_OR_S }, 578 }; 579 uint32_t i; 580 581 for (i = 0; i < RTE_DIM(xtr_sets); i++) { 582 uint32_t rxdid = xtr_sets[i].rxdid; 583 uint32_t v; 584 585 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) { 586 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid)); 587 588 if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 && 589 FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode) 590 ice_proto_xtr_hw_support[i] = true; 591 } 592 593 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) { 594 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid)); 595 596 if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 && 597 FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode) 598 ice_proto_xtr_hw_support[i] = true; 599 } 600 } 601 } 602 603 static int 604 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base, 605 uint32_t num) 606 { 607 struct pool_entry *entry; 608 609 if (!pool || !num) 610 return -EINVAL; 611 612 entry = rte_zmalloc(NULL, sizeof(*entry), 0); 613 if (!entry) { 614 PMD_INIT_LOG(ERR, 615 "Failed to allocate memory for resource pool"); 616 return -ENOMEM; 617 } 618 619 /* queue heap initialize */ 620 pool->num_free = num; 621 pool->num_alloc = 0; 622 pool->base = base; 623 LIST_INIT(&pool->alloc_list); 624 LIST_INIT(&pool->free_list); 625 626 /* Initialize element */ 627 entry->base = 0; 628 entry->len = num; 629 630 LIST_INSERT_HEAD(&pool->free_list, entry, next); 631 return 0; 632 } 633 634 static int 635 ice_res_pool_alloc(struct ice_res_pool_info *pool, 636 uint16_t num) 637 { 638 struct pool_entry *entry, *valid_entry; 639 640 if (!pool || !num) { 641 PMD_INIT_LOG(ERR, "Invalid parameter"); 642 return -EINVAL; 643 } 644 645 if (pool->num_free < num) { 646 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u", 647 num, pool->num_free); 648 return -ENOMEM; 649 } 650 651 valid_entry = NULL; 652 /* Lookup in free list and find most fit one */ 653 LIST_FOREACH(entry, &pool->free_list, next) { 654 if (entry->len >= num) { 655 /* Find best one */ 656 if (entry->len == num) { 657 valid_entry = entry; 658 break; 659 } 660 if (!valid_entry || 661 valid_entry->len > entry->len) 662 valid_entry = entry; 663 } 664 } 665 666 /* Not find one to satisfy the request, return */ 667 if (!valid_entry) { 668 PMD_INIT_LOG(ERR, "No valid entry found"); 669 return -ENOMEM; 670 } 671 /** 672 * The entry have equal queue number as requested, 673 * remove it from alloc_list. 674 */ 675 if (valid_entry->len == num) { 676 LIST_REMOVE(valid_entry, next); 677 } else { 678 /** 679 * The entry have more numbers than requested, 680 * create a new entry for alloc_list and minus its 681 * queue base and number in free_list. 682 */ 683 entry = rte_zmalloc(NULL, sizeof(*entry), 0); 684 if (!entry) { 685 PMD_INIT_LOG(ERR, 686 "Failed to allocate memory for " 687 "resource pool"); 688 return -ENOMEM; 689 } 690 entry->base = valid_entry->base; 691 entry->len = num; 692 valid_entry->base += num; 693 valid_entry->len -= num; 694 valid_entry = entry; 695 } 696 697 /* Insert it into alloc list, not sorted */ 698 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next); 699 700 pool->num_free -= valid_entry->len; 701 pool->num_alloc += valid_entry->len; 702 703 return valid_entry->base + pool->base; 704 } 705 706 static void 707 ice_res_pool_destroy(struct ice_res_pool_info *pool) 708 { 709 struct pool_entry *entry, *next_entry; 710 711 if (!pool) 712 return; 713 714 for (entry = LIST_FIRST(&pool->alloc_list); 715 entry && (next_entry = LIST_NEXT(entry, next), 1); 716 entry = next_entry) { 717 LIST_REMOVE(entry, next); 718 rte_free(entry); 719 } 720 721 for (entry = LIST_FIRST(&pool->free_list); 722 entry && (next_entry = LIST_NEXT(entry, next), 1); 723 entry = next_entry) { 724 LIST_REMOVE(entry, next); 725 rte_free(entry); 726 } 727 728 pool->num_free = 0; 729 pool->num_alloc = 0; 730 pool->base = 0; 731 LIST_INIT(&pool->alloc_list); 732 LIST_INIT(&pool->free_list); 733 } 734 735 static void 736 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info) 737 { 738 /* Set VSI LUT selection */ 739 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI & 740 ICE_AQ_VSI_Q_OPT_RSS_LUT_M; 741 /* Set Hash scheme */ 742 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ & 743 ICE_AQ_VSI_Q_OPT_RSS_HASH_M; 744 /* enable TC */ 745 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M; 746 } 747 748 static enum ice_status 749 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi, 750 struct ice_aqc_vsi_props *info, 751 uint8_t enabled_tcmap) 752 { 753 uint16_t bsf, qp_idx; 754 755 /* default tc 0 now. Multi-TC supporting need to be done later. 756 * Configure TC and queue mapping parameters, for enabled TC, 757 * allocate qpnum_per_tc queues to this traffic. 758 */ 759 if (enabled_tcmap != 0x01) { 760 PMD_INIT_LOG(ERR, "only TC0 is supported"); 761 return -ENOTSUP; 762 } 763 764 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC); 765 bsf = rte_bsf32(vsi->nb_qps); 766 /* Adjust the queue number to actual queues that can be applied */ 767 vsi->nb_qps = 0x1 << bsf; 768 769 qp_idx = 0; 770 /* Set tc and queue mapping with VSI */ 771 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx << 772 ICE_AQ_VSI_TC_Q_OFFSET_S) | 773 (bsf << ICE_AQ_VSI_TC_Q_NUM_S)); 774 775 /* Associate queue number with VSI */ 776 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG); 777 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); 778 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps); 779 info->valid_sections |= 780 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 781 /* Set the info.ingress_table and info.egress_table 782 * for UP translate table. Now just set it to 1:1 map by default 783 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688 784 */ 785 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688 786 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); 787 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); 788 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); 789 return 0; 790 } 791 792 static int 793 ice_init_mac_address(struct rte_eth_dev *dev) 794 { 795 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 796 797 if (!rte_is_unicast_ether_addr 798 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) { 799 PMD_INIT_LOG(ERR, "Invalid MAC address"); 800 return -EINVAL; 801 } 802 803 rte_ether_addr_copy( 804 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr, 805 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr); 806 807 dev->data->mac_addrs = 808 rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0); 809 if (!dev->data->mac_addrs) { 810 PMD_INIT_LOG(ERR, 811 "Failed to allocate memory to store mac address"); 812 return -ENOMEM; 813 } 814 /* store it to dev data */ 815 rte_ether_addr_copy( 816 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr, 817 &dev->data->mac_addrs[0]); 818 return 0; 819 } 820 821 /* Find out specific MAC filter */ 822 static struct ice_mac_filter * 823 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr) 824 { 825 struct ice_mac_filter *f; 826 827 TAILQ_FOREACH(f, &vsi->mac_list, next) { 828 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr)) 829 return f; 830 } 831 832 return NULL; 833 } 834 835 static int 836 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr) 837 { 838 struct ice_fltr_list_entry *m_list_itr = NULL; 839 struct ice_mac_filter *f; 840 struct LIST_HEAD_TYPE list_head; 841 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 842 int ret = 0; 843 844 /* If it's added and configured, return */ 845 f = ice_find_mac_filter(vsi, mac_addr); 846 if (f) { 847 PMD_DRV_LOG(INFO, "This MAC filter already exists."); 848 return 0; 849 } 850 851 INIT_LIST_HEAD(&list_head); 852 853 m_list_itr = (struct ice_fltr_list_entry *) 854 ice_malloc(hw, sizeof(*m_list_itr)); 855 if (!m_list_itr) { 856 ret = -ENOMEM; 857 goto DONE; 858 } 859 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr, 860 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); 861 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 862 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 863 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 864 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 865 m_list_itr->fltr_info.vsi_handle = vsi->idx; 866 867 LIST_ADD(&m_list_itr->list_entry, &list_head); 868 869 /* Add the mac */ 870 ret = ice_add_mac(hw, &list_head); 871 if (ret != ICE_SUCCESS) { 872 PMD_DRV_LOG(ERR, "Failed to add MAC filter"); 873 ret = -EINVAL; 874 goto DONE; 875 } 876 /* Add the mac addr into mac list */ 877 f = rte_zmalloc(NULL, sizeof(*f), 0); 878 if (!f) { 879 PMD_DRV_LOG(ERR, "failed to allocate memory"); 880 ret = -ENOMEM; 881 goto DONE; 882 } 883 rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr); 884 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); 885 vsi->mac_num++; 886 887 ret = 0; 888 889 DONE: 890 rte_free(m_list_itr); 891 return ret; 892 } 893 894 static int 895 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr) 896 { 897 struct ice_fltr_list_entry *m_list_itr = NULL; 898 struct ice_mac_filter *f; 899 struct LIST_HEAD_TYPE list_head; 900 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 901 int ret = 0; 902 903 /* Can't find it, return an error */ 904 f = ice_find_mac_filter(vsi, mac_addr); 905 if (!f) 906 return -EINVAL; 907 908 INIT_LIST_HEAD(&list_head); 909 910 m_list_itr = (struct ice_fltr_list_entry *) 911 ice_malloc(hw, sizeof(*m_list_itr)); 912 if (!m_list_itr) { 913 ret = -ENOMEM; 914 goto DONE; 915 } 916 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr, 917 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); 918 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 919 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 920 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 921 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 922 m_list_itr->fltr_info.vsi_handle = vsi->idx; 923 924 LIST_ADD(&m_list_itr->list_entry, &list_head); 925 926 /* remove the mac filter */ 927 ret = ice_remove_mac(hw, &list_head); 928 if (ret != ICE_SUCCESS) { 929 PMD_DRV_LOG(ERR, "Failed to remove MAC filter"); 930 ret = -EINVAL; 931 goto DONE; 932 } 933 934 /* Remove the mac addr from mac list */ 935 TAILQ_REMOVE(&vsi->mac_list, f, next); 936 rte_free(f); 937 vsi->mac_num--; 938 939 ret = 0; 940 DONE: 941 rte_free(m_list_itr); 942 return ret; 943 } 944 945 /* Find out specific VLAN filter */ 946 static struct ice_vlan_filter * 947 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id) 948 { 949 struct ice_vlan_filter *f; 950 951 TAILQ_FOREACH(f, &vsi->vlan_list, next) { 952 if (vlan_id == f->vlan_info.vlan_id) 953 return f; 954 } 955 956 return NULL; 957 } 958 959 static int 960 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id) 961 { 962 struct ice_fltr_list_entry *v_list_itr = NULL; 963 struct ice_vlan_filter *f; 964 struct LIST_HEAD_TYPE list_head; 965 struct ice_hw *hw; 966 int ret = 0; 967 968 if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID) 969 return -EINVAL; 970 971 hw = ICE_VSI_TO_HW(vsi); 972 973 /* If it's added and configured, return. */ 974 f = ice_find_vlan_filter(vsi, vlan_id); 975 if (f) { 976 PMD_DRV_LOG(INFO, "This VLAN filter already exists."); 977 return 0; 978 } 979 980 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) 981 return 0; 982 983 INIT_LIST_HEAD(&list_head); 984 985 v_list_itr = (struct ice_fltr_list_entry *) 986 ice_malloc(hw, sizeof(*v_list_itr)); 987 if (!v_list_itr) { 988 ret = -ENOMEM; 989 goto DONE; 990 } 991 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id; 992 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 993 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 994 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 995 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 996 v_list_itr->fltr_info.vsi_handle = vsi->idx; 997 998 LIST_ADD(&v_list_itr->list_entry, &list_head); 999 1000 /* Add the vlan */ 1001 ret = ice_add_vlan(hw, &list_head); 1002 if (ret != ICE_SUCCESS) { 1003 PMD_DRV_LOG(ERR, "Failed to add VLAN filter"); 1004 ret = -EINVAL; 1005 goto DONE; 1006 } 1007 1008 /* Add vlan into vlan list */ 1009 f = rte_zmalloc(NULL, sizeof(*f), 0); 1010 if (!f) { 1011 PMD_DRV_LOG(ERR, "failed to allocate memory"); 1012 ret = -ENOMEM; 1013 goto DONE; 1014 } 1015 f->vlan_info.vlan_id = vlan_id; 1016 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next); 1017 vsi->vlan_num++; 1018 1019 ret = 0; 1020 1021 DONE: 1022 rte_free(v_list_itr); 1023 return ret; 1024 } 1025 1026 static int 1027 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id) 1028 { 1029 struct ice_fltr_list_entry *v_list_itr = NULL; 1030 struct ice_vlan_filter *f; 1031 struct LIST_HEAD_TYPE list_head; 1032 struct ice_hw *hw; 1033 int ret = 0; 1034 1035 /** 1036 * Vlan 0 is the generic filter for untagged packets 1037 * and can't be removed. 1038 */ 1039 if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID) 1040 return -EINVAL; 1041 1042 hw = ICE_VSI_TO_HW(vsi); 1043 1044 /* Can't find it, return an error */ 1045 f = ice_find_vlan_filter(vsi, vlan_id); 1046 if (!f) 1047 return -EINVAL; 1048 1049 INIT_LIST_HEAD(&list_head); 1050 1051 v_list_itr = (struct ice_fltr_list_entry *) 1052 ice_malloc(hw, sizeof(*v_list_itr)); 1053 if (!v_list_itr) { 1054 ret = -ENOMEM; 1055 goto DONE; 1056 } 1057 1058 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id; 1059 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 1060 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1061 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1062 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 1063 v_list_itr->fltr_info.vsi_handle = vsi->idx; 1064 1065 LIST_ADD(&v_list_itr->list_entry, &list_head); 1066 1067 /* remove the vlan filter */ 1068 ret = ice_remove_vlan(hw, &list_head); 1069 if (ret != ICE_SUCCESS) { 1070 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter"); 1071 ret = -EINVAL; 1072 goto DONE; 1073 } 1074 1075 /* Remove the vlan id from vlan list */ 1076 TAILQ_REMOVE(&vsi->vlan_list, f, next); 1077 rte_free(f); 1078 vsi->vlan_num--; 1079 1080 ret = 0; 1081 DONE: 1082 rte_free(v_list_itr); 1083 return ret; 1084 } 1085 1086 static int 1087 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi) 1088 { 1089 struct ice_mac_filter *m_f; 1090 struct ice_vlan_filter *v_f; 1091 int ret = 0; 1092 1093 if (!vsi || !vsi->mac_num) 1094 return -EINVAL; 1095 1096 TAILQ_FOREACH(m_f, &vsi->mac_list, next) { 1097 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr); 1098 if (ret != ICE_SUCCESS) { 1099 ret = -EINVAL; 1100 goto DONE; 1101 } 1102 } 1103 1104 if (vsi->vlan_num == 0) 1105 return 0; 1106 1107 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) { 1108 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id); 1109 if (ret != ICE_SUCCESS) { 1110 ret = -EINVAL; 1111 goto DONE; 1112 } 1113 } 1114 1115 DONE: 1116 return ret; 1117 } 1118 1119 static int 1120 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on) 1121 { 1122 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 1123 struct ice_vsi_ctx ctxt; 1124 uint8_t qinq_flags; 1125 int ret = 0; 1126 1127 /* Check if it has been already on or off */ 1128 if (vsi->info.valid_sections & 1129 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) { 1130 if (on) { 1131 if ((vsi->info.outer_tag_flags & 1132 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) == 1133 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) 1134 return 0; /* already on */ 1135 } else { 1136 if (!(vsi->info.outer_tag_flags & 1137 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)) 1138 return 0; /* already off */ 1139 } 1140 } 1141 1142 if (on) 1143 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST; 1144 else 1145 qinq_flags = 0; 1146 /* clear global insertion and use per packet insertion */ 1147 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT); 1148 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST); 1149 vsi->info.outer_tag_flags |= qinq_flags; 1150 /* use default vlan type 0x8100 */ 1151 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M); 1152 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE << 1153 ICE_AQ_VSI_OUTER_TAG_TYPE_S; 1154 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1155 ctxt.info.valid_sections = 1156 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); 1157 ctxt.vsi_num = vsi->vsi_id; 1158 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 1159 if (ret) { 1160 PMD_DRV_LOG(INFO, 1161 "Update VSI failed to %s qinq stripping", 1162 on ? "enable" : "disable"); 1163 return -EINVAL; 1164 } 1165 1166 vsi->info.valid_sections |= 1167 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); 1168 1169 return ret; 1170 } 1171 1172 static int 1173 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on) 1174 { 1175 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 1176 struct ice_vsi_ctx ctxt; 1177 uint8_t qinq_flags; 1178 int ret = 0; 1179 1180 /* Check if it has been already on or off */ 1181 if (vsi->info.valid_sections & 1182 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) { 1183 if (on) { 1184 if ((vsi->info.outer_tag_flags & 1185 ICE_AQ_VSI_OUTER_TAG_MODE_M) == 1186 ICE_AQ_VSI_OUTER_TAG_COPY) 1187 return 0; /* already on */ 1188 } else { 1189 if ((vsi->info.outer_tag_flags & 1190 ICE_AQ_VSI_OUTER_TAG_MODE_M) == 1191 ICE_AQ_VSI_OUTER_TAG_NOTHING) 1192 return 0; /* already off */ 1193 } 1194 } 1195 1196 if (on) 1197 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY; 1198 else 1199 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING; 1200 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M); 1201 vsi->info.outer_tag_flags |= qinq_flags; 1202 /* use default vlan type 0x8100 */ 1203 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M); 1204 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE << 1205 ICE_AQ_VSI_OUTER_TAG_TYPE_S; 1206 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1207 ctxt.info.valid_sections = 1208 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); 1209 ctxt.vsi_num = vsi->vsi_id; 1210 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 1211 if (ret) { 1212 PMD_DRV_LOG(INFO, 1213 "Update VSI failed to %s qinq stripping", 1214 on ? "enable" : "disable"); 1215 return -EINVAL; 1216 } 1217 1218 vsi->info.valid_sections |= 1219 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); 1220 1221 return ret; 1222 } 1223 1224 static int 1225 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on) 1226 { 1227 int ret; 1228 1229 ret = ice_vsi_config_qinq_stripping(vsi, on); 1230 if (ret) 1231 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret); 1232 1233 ret = ice_vsi_config_qinq_insertion(vsi, on); 1234 if (ret) 1235 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret); 1236 1237 return ret; 1238 } 1239 1240 /* Enable IRQ0 */ 1241 static void 1242 ice_pf_enable_irq0(struct ice_hw *hw) 1243 { 1244 /* reset the registers */ 1245 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0); 1246 ICE_READ_REG(hw, PFINT_OICR); 1247 1248 #ifdef ICE_LSE_SPT 1249 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 1250 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M & 1251 (~PFINT_OICR_LINK_STAT_CHANGE_M))); 1252 1253 ICE_WRITE_REG(hw, PFINT_OICR_CTL, 1254 (0 & PFINT_OICR_CTL_MSIX_INDX_M) | 1255 ((0 << PFINT_OICR_CTL_ITR_INDX_S) & 1256 PFINT_OICR_CTL_ITR_INDX_M) | 1257 PFINT_OICR_CTL_CAUSE_ENA_M); 1258 1259 ICE_WRITE_REG(hw, PFINT_FW_CTL, 1260 (0 & PFINT_FW_CTL_MSIX_INDX_M) | 1261 ((0 << PFINT_FW_CTL_ITR_INDX_S) & 1262 PFINT_FW_CTL_ITR_INDX_M) | 1263 PFINT_FW_CTL_CAUSE_ENA_M); 1264 #else 1265 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M); 1266 #endif 1267 1268 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), 1269 GLINT_DYN_CTL_INTENA_M | 1270 GLINT_DYN_CTL_CLEARPBA_M | 1271 GLINT_DYN_CTL_ITR_INDX_M); 1272 1273 ice_flush(hw); 1274 } 1275 1276 /* Disable IRQ0 */ 1277 static void 1278 ice_pf_disable_irq0(struct ice_hw *hw) 1279 { 1280 /* Disable all interrupt types */ 1281 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M); 1282 ice_flush(hw); 1283 } 1284 1285 #ifdef ICE_LSE_SPT 1286 static void 1287 ice_handle_aq_msg(struct rte_eth_dev *dev) 1288 { 1289 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1290 struct ice_ctl_q_info *cq = &hw->adminq; 1291 struct ice_rq_event_info event; 1292 uint16_t pending, opcode; 1293 int ret; 1294 1295 event.buf_len = ICE_AQ_MAX_BUF_LEN; 1296 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0); 1297 if (!event.msg_buf) { 1298 PMD_DRV_LOG(ERR, "Failed to allocate mem"); 1299 return; 1300 } 1301 1302 pending = 1; 1303 while (pending) { 1304 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1305 1306 if (ret != ICE_SUCCESS) { 1307 PMD_DRV_LOG(INFO, 1308 "Failed to read msg from AdminQ, " 1309 "adminq_err: %u", 1310 hw->adminq.sq_last_status); 1311 break; 1312 } 1313 opcode = rte_le_to_cpu_16(event.desc.opcode); 1314 1315 switch (opcode) { 1316 case ice_aqc_opc_get_link_status: 1317 ret = ice_link_update(dev, 0); 1318 if (!ret) 1319 rte_eth_dev_callback_process 1320 (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1321 break; 1322 default: 1323 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", 1324 opcode); 1325 break; 1326 } 1327 } 1328 rte_free(event.msg_buf); 1329 } 1330 #endif 1331 1332 /** 1333 * Interrupt handler triggered by NIC for handling 1334 * specific interrupt. 1335 * 1336 * @param handle 1337 * Pointer to interrupt handle. 1338 * @param param 1339 * The address of parameter (struct rte_eth_dev *) regsitered before. 1340 * 1341 * @return 1342 * void 1343 */ 1344 static void 1345 ice_interrupt_handler(void *param) 1346 { 1347 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1348 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1349 uint32_t oicr; 1350 uint32_t reg; 1351 uint8_t pf_num; 1352 uint8_t event; 1353 uint16_t queue; 1354 int ret; 1355 #ifdef ICE_LSE_SPT 1356 uint32_t int_fw_ctl; 1357 #endif 1358 1359 /* Disable interrupt */ 1360 ice_pf_disable_irq0(hw); 1361 1362 /* read out interrupt causes */ 1363 oicr = ICE_READ_REG(hw, PFINT_OICR); 1364 #ifdef ICE_LSE_SPT 1365 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL); 1366 #endif 1367 1368 /* No interrupt event indicated */ 1369 if (!(oicr & PFINT_OICR_INTEVENT_M)) { 1370 PMD_DRV_LOG(INFO, "No interrupt event"); 1371 goto done; 1372 } 1373 1374 #ifdef ICE_LSE_SPT 1375 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) { 1376 PMD_DRV_LOG(INFO, "FW_CTL: link state change event"); 1377 ice_handle_aq_msg(dev); 1378 } 1379 #else 1380 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) { 1381 PMD_DRV_LOG(INFO, "OICR: link state change event"); 1382 ret = ice_link_update(dev, 0); 1383 if (!ret) 1384 rte_eth_dev_callback_process 1385 (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1386 } 1387 #endif 1388 1389 if (oicr & PFINT_OICR_MAL_DETECT_M) { 1390 PMD_DRV_LOG(WARNING, "OICR: MDD event"); 1391 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM); 1392 if (reg & GL_MDET_TX_PQM_VALID_M) { 1393 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1394 GL_MDET_TX_PQM_PF_NUM_S; 1395 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1396 GL_MDET_TX_PQM_MAL_TYPE_S; 1397 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> 1398 GL_MDET_TX_PQM_QNUM_S; 1399 1400 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event " 1401 "%d by PQM on TX queue %d PF# %d", 1402 event, queue, pf_num); 1403 } 1404 1405 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN); 1406 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1407 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1408 GL_MDET_TX_TCLAN_PF_NUM_S; 1409 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1410 GL_MDET_TX_TCLAN_MAL_TYPE_S; 1411 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1412 GL_MDET_TX_TCLAN_QNUM_S; 1413 1414 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event " 1415 "%d by TCLAN on TX queue %d PF# %d", 1416 event, queue, pf_num); 1417 } 1418 } 1419 done: 1420 /* Enable interrupt */ 1421 ice_pf_enable_irq0(hw); 1422 rte_intr_ack(dev->intr_handle); 1423 } 1424 1425 static void 1426 ice_init_proto_xtr(struct rte_eth_dev *dev) 1427 { 1428 struct ice_adapter *ad = 1429 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1430 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1431 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1432 const struct proto_xtr_ol_flag *ol_flag; 1433 bool proto_xtr_enable = false; 1434 int offset; 1435 uint16_t i; 1436 1437 pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0); 1438 if (unlikely(pf->proto_xtr == NULL)) { 1439 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table"); 1440 return; 1441 } 1442 1443 for (i = 0; i < pf->lan_nb_qps; i++) { 1444 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ? 1445 ad->devargs.proto_xtr[i] : 1446 ad->devargs.proto_xtr_dflt; 1447 1448 if (pf->proto_xtr[i] != PROTO_XTR_NONE) { 1449 uint8_t type = pf->proto_xtr[i]; 1450 1451 ice_proto_xtr_ol_flag_params[type].required = true; 1452 proto_xtr_enable = true; 1453 } 1454 } 1455 1456 if (likely(!proto_xtr_enable)) 1457 return; 1458 1459 ice_check_proto_xtr_support(hw); 1460 1461 offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param); 1462 if (unlikely(offset == -1)) { 1463 PMD_DRV_LOG(ERR, 1464 "Protocol extraction metadata is disabled in mbuf with error %d", 1465 -rte_errno); 1466 return; 1467 } 1468 1469 PMD_DRV_LOG(DEBUG, 1470 "Protocol extraction metadata offset in mbuf is : %d", 1471 offset); 1472 rte_net_ice_dynfield_proto_xtr_metadata_offs = offset; 1473 1474 for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) { 1475 ol_flag = &ice_proto_xtr_ol_flag_params[i]; 1476 1477 if (!ol_flag->required) 1478 continue; 1479 1480 if (!ice_proto_xtr_hw_support[i]) { 1481 PMD_DRV_LOG(ERR, 1482 "Protocol extraction type %u is not supported in hardware", 1483 i); 1484 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1; 1485 break; 1486 } 1487 1488 offset = rte_mbuf_dynflag_register(&ol_flag->param); 1489 if (unlikely(offset == -1)) { 1490 PMD_DRV_LOG(ERR, 1491 "Protocol extraction offload '%s' failed to register with error %d", 1492 ol_flag->param.name, -rte_errno); 1493 1494 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1; 1495 break; 1496 } 1497 1498 PMD_DRV_LOG(DEBUG, 1499 "Protocol extraction offload '%s' offset in mbuf is : %d", 1500 ol_flag->param.name, offset); 1501 *ol_flag->ol_flag = 1ULL << offset; 1502 } 1503 } 1504 1505 /* Initialize SW parameters of PF */ 1506 static int 1507 ice_pf_sw_init(struct rte_eth_dev *dev) 1508 { 1509 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1510 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1511 1512 pf->lan_nb_qp_max = 1513 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq, 1514 hw->func_caps.common_cap.num_rxq); 1515 1516 pf->lan_nb_qps = pf->lan_nb_qp_max; 1517 1518 ice_init_proto_xtr(dev); 1519 1520 if (hw->func_caps.fd_fltr_guar > 0 || 1521 hw->func_caps.fd_fltr_best_effort > 0) { 1522 pf->flags |= ICE_FLAG_FDIR; 1523 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR; 1524 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps; 1525 } else { 1526 pf->fdir_nb_qps = 0; 1527 } 1528 pf->fdir_qp_offset = 0; 1529 1530 return 0; 1531 } 1532 1533 struct ice_vsi * 1534 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) 1535 { 1536 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1537 struct ice_vsi *vsi = NULL; 1538 struct ice_vsi_ctx vsi_ctx; 1539 int ret; 1540 struct rte_ether_addr broadcast = { 1541 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }; 1542 struct rte_ether_addr mac_addr; 1543 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 1544 uint8_t tc_bitmap = 0x1; 1545 uint16_t cfg; 1546 1547 /* hw->num_lports = 1 in NIC mode */ 1548 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0); 1549 if (!vsi) 1550 return NULL; 1551 1552 vsi->idx = pf->next_vsi_idx; 1553 pf->next_vsi_idx++; 1554 vsi->type = type; 1555 vsi->adapter = ICE_PF_TO_ADAPTER(pf); 1556 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX; 1557 vsi->vlan_anti_spoof_on = 0; 1558 vsi->vlan_filter_on = 1; 1559 TAILQ_INIT(&vsi->mac_list); 1560 TAILQ_INIT(&vsi->vlan_list); 1561 1562 /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */ 1563 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size > 1564 ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 : 1565 hw->func_caps.common_cap.rss_table_size; 1566 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE; 1567 1568 memset(&vsi_ctx, 0, sizeof(vsi_ctx)); 1569 switch (type) { 1570 case ICE_VSI_PF: 1571 vsi->nb_qps = pf->lan_nb_qps; 1572 vsi->base_queue = 1; 1573 ice_vsi_config_default_rss(&vsi_ctx.info); 1574 vsi_ctx.alloc_from_pool = true; 1575 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; 1576 /* switch_id is queried by get_switch_config aq, which is done 1577 * by ice_init_hw 1578 */ 1579 vsi_ctx.info.sw_id = hw->port_info->sw_id; 1580 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 1581 /* Allow all untagged or tagged packets */ 1582 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; 1583 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING; 1584 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF | 1585 ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1586 1587 /* FDIR */ 1588 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID | 1589 ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 1590 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); 1591 cfg = ICE_AQ_VSI_FD_ENABLE; 1592 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); 1593 vsi_ctx.info.max_fd_fltr_dedicated = 1594 rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar); 1595 vsi_ctx.info.max_fd_fltr_shared = 1596 rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort); 1597 1598 /* Enable VLAN/UP trip */ 1599 ret = ice_vsi_config_tc_queue_mapping(vsi, 1600 &vsi_ctx.info, 1601 ICE_DEFAULT_TCMAP); 1602 if (ret) { 1603 PMD_INIT_LOG(ERR, 1604 "tc queue mapping with vsi failed, " 1605 "err = %d", 1606 ret); 1607 goto fail_mem; 1608 } 1609 1610 break; 1611 case ICE_VSI_CTRL: 1612 vsi->nb_qps = pf->fdir_nb_qps; 1613 vsi->base_queue = ICE_FDIR_QUEUE_ID; 1614 vsi_ctx.alloc_from_pool = true; 1615 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; 1616 1617 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 1618 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); 1619 cfg = ICE_AQ_VSI_FD_PROG_ENABLE; 1620 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); 1621 vsi_ctx.info.sw_id = hw->port_info->sw_id; 1622 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 1623 ret = ice_vsi_config_tc_queue_mapping(vsi, 1624 &vsi_ctx.info, 1625 ICE_DEFAULT_TCMAP); 1626 if (ret) { 1627 PMD_INIT_LOG(ERR, 1628 "tc queue mapping with vsi failed, " 1629 "err = %d", 1630 ret); 1631 goto fail_mem; 1632 } 1633 break; 1634 default: 1635 /* for other types of VSI */ 1636 PMD_INIT_LOG(ERR, "other types of VSI not supported"); 1637 goto fail_mem; 1638 } 1639 1640 /* VF has MSIX interrupt in VF range, don't allocate here */ 1641 if (type == ICE_VSI_PF) { 1642 ret = ice_res_pool_alloc(&pf->msix_pool, 1643 RTE_MIN(vsi->nb_qps, 1644 RTE_MAX_RXTX_INTR_VEC_ID)); 1645 if (ret < 0) { 1646 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d", 1647 vsi->vsi_id, ret); 1648 } 1649 vsi->msix_intr = ret; 1650 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); 1651 } else if (type == ICE_VSI_CTRL) { 1652 ret = ice_res_pool_alloc(&pf->msix_pool, 1); 1653 if (ret < 0) { 1654 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", 1655 vsi->vsi_id, ret); 1656 } 1657 vsi->msix_intr = ret; 1658 vsi->nb_msix = 1; 1659 } else { 1660 vsi->msix_intr = 0; 1661 vsi->nb_msix = 0; 1662 } 1663 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL); 1664 if (ret != ICE_SUCCESS) { 1665 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret); 1666 goto fail_mem; 1667 } 1668 /* store vsi information is SW structure */ 1669 vsi->vsi_id = vsi_ctx.vsi_num; 1670 vsi->info = vsi_ctx.info; 1671 pf->vsis_allocated = vsi_ctx.vsis_allocd; 1672 pf->vsis_unallocated = vsi_ctx.vsis_unallocated; 1673 1674 if (type == ICE_VSI_PF) { 1675 /* MAC configuration */ 1676 rte_ether_addr_copy((struct rte_ether_addr *) 1677 hw->port_info->mac.perm_addr, 1678 &pf->dev_addr); 1679 1680 rte_ether_addr_copy(&pf->dev_addr, &mac_addr); 1681 ret = ice_add_mac_filter(vsi, &mac_addr); 1682 if (ret != ICE_SUCCESS) 1683 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter"); 1684 1685 rte_ether_addr_copy(&broadcast, &mac_addr); 1686 ret = ice_add_mac_filter(vsi, &mac_addr); 1687 if (ret != ICE_SUCCESS) 1688 PMD_INIT_LOG(ERR, "Failed to add MAC filter"); 1689 } 1690 1691 /* At the beginning, only TC0. */ 1692 /* What we need here is the maximam number of the TX queues. 1693 * Currently vsi->nb_qps means it. 1694 * Correct it if any change. 1695 */ 1696 max_txqs[0] = vsi->nb_qps; 1697 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx, 1698 tc_bitmap, max_txqs); 1699 if (ret != ICE_SUCCESS) 1700 PMD_INIT_LOG(ERR, "Failed to config vsi sched"); 1701 1702 return vsi; 1703 fail_mem: 1704 rte_free(vsi); 1705 pf->next_vsi_idx--; 1706 return NULL; 1707 } 1708 1709 static int 1710 ice_send_driver_ver(struct ice_hw *hw) 1711 { 1712 struct ice_driver_ver dv; 1713 1714 /* we don't have driver version use 0 for dummy */ 1715 dv.major_ver = 0; 1716 dv.minor_ver = 0; 1717 dv.build_ver = 0; 1718 dv.subbuild_ver = 0; 1719 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string)); 1720 1721 return ice_aq_send_driver_ver(hw, &dv, NULL); 1722 } 1723 1724 static int 1725 ice_pf_setup(struct ice_pf *pf) 1726 { 1727 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1728 struct ice_vsi *vsi; 1729 uint16_t unused; 1730 1731 /* Clear all stats counters */ 1732 pf->offset_loaded = false; 1733 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats)); 1734 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats)); 1735 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats)); 1736 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats)); 1737 1738 /* force guaranteed filter pool for PF */ 1739 ice_alloc_fd_guar_item(hw, &unused, 1740 hw->func_caps.fd_fltr_guar); 1741 /* force shared filter pool for PF */ 1742 ice_alloc_fd_shrd_item(hw, &unused, 1743 hw->func_caps.fd_fltr_best_effort); 1744 1745 vsi = ice_setup_vsi(pf, ICE_VSI_PF); 1746 if (!vsi) { 1747 PMD_INIT_LOG(ERR, "Failed to add vsi for PF"); 1748 return -EINVAL; 1749 } 1750 1751 pf->main_vsi = vsi; 1752 1753 return 0; 1754 } 1755 1756 /* 1757 * Extract device serial number from PCIe Configuration Space and 1758 * determine the pkg file path according to the DSN. 1759 */ 1760 static int 1761 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file) 1762 { 1763 off_t pos; 1764 char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE]; 1765 uint32_t dsn_low, dsn_high; 1766 memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE); 1767 1768 pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN); 1769 1770 if (pos) { 1771 rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4); 1772 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8); 1773 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE, 1774 "ice-%08x%08x.pkg", dsn_high, dsn_low); 1775 } else { 1776 PMD_INIT_LOG(ERR, "Failed to read device serial number\n"); 1777 goto fail_dsn; 1778 } 1779 1780 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES, 1781 ICE_MAX_PKG_FILENAME_SIZE); 1782 if (!access(strcat(pkg_file, opt_ddp_filename), 0)) 1783 return 0; 1784 1785 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT, 1786 ICE_MAX_PKG_FILENAME_SIZE); 1787 if (!access(strcat(pkg_file, opt_ddp_filename), 0)) 1788 return 0; 1789 1790 fail_dsn: 1791 strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE); 1792 if (!access(pkg_file, 0)) 1793 return 0; 1794 strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE); 1795 return 0; 1796 } 1797 1798 enum ice_pkg_type 1799 ice_load_pkg_type(struct ice_hw *hw) 1800 { 1801 enum ice_pkg_type package_type; 1802 1803 /* store the activated package type (OS default or Comms) */ 1804 if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME, 1805 ICE_PKG_NAME_SIZE)) 1806 package_type = ICE_PKG_TYPE_OS_DEFAULT; 1807 else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME, 1808 ICE_PKG_NAME_SIZE)) 1809 package_type = ICE_PKG_TYPE_COMMS; 1810 else 1811 package_type = ICE_PKG_TYPE_UNKNOWN; 1812 1813 PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s", 1814 hw->active_pkg_ver.major, hw->active_pkg_ver.minor, 1815 hw->active_pkg_ver.update, hw->active_pkg_ver.draft, 1816 hw->active_pkg_name); 1817 1818 return package_type; 1819 } 1820 1821 static int ice_load_pkg(struct rte_eth_dev *dev) 1822 { 1823 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1824 char pkg_file[ICE_MAX_PKG_FILENAME_SIZE]; 1825 int err; 1826 uint8_t *buf; 1827 int buf_len; 1828 FILE *file; 1829 struct stat fstat; 1830 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); 1831 struct ice_adapter *ad = 1832 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1833 1834 ice_pkg_file_search_path(pci_dev, pkg_file); 1835 1836 file = fopen(pkg_file, "rb"); 1837 if (!file) { 1838 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file); 1839 return -1; 1840 } 1841 1842 err = stat(pkg_file, &fstat); 1843 if (err) { 1844 PMD_INIT_LOG(ERR, "failed to get file stats\n"); 1845 fclose(file); 1846 return err; 1847 } 1848 1849 buf_len = fstat.st_size; 1850 buf = rte_malloc(NULL, buf_len, 0); 1851 1852 if (!buf) { 1853 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n", 1854 buf_len); 1855 fclose(file); 1856 return -1; 1857 } 1858 1859 err = fread(buf, buf_len, 1, file); 1860 if (err != 1) { 1861 PMD_INIT_LOG(ERR, "failed to read package data\n"); 1862 fclose(file); 1863 err = -1; 1864 goto fail_exit; 1865 } 1866 1867 fclose(file); 1868 1869 err = ice_copy_and_init_pkg(hw, buf, buf_len); 1870 if (err) { 1871 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err); 1872 goto fail_exit; 1873 } 1874 1875 /* store the loaded pkg type info */ 1876 ad->active_pkg_type = ice_load_pkg_type(hw); 1877 1878 err = ice_init_hw_tbls(hw); 1879 if (err) { 1880 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err); 1881 goto fail_init_tbls; 1882 } 1883 1884 return 0; 1885 1886 fail_init_tbls: 1887 rte_free(hw->pkg_copy); 1888 fail_exit: 1889 rte_free(buf); 1890 return err; 1891 } 1892 1893 static void 1894 ice_base_queue_get(struct ice_pf *pf) 1895 { 1896 uint32_t reg; 1897 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1898 1899 reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC); 1900 if (reg & PFLAN_RX_QALLOC_VALID_M) { 1901 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M; 1902 } else { 1903 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue" 1904 " index"); 1905 } 1906 } 1907 1908 static int 1909 parse_bool(const char *key, const char *value, void *args) 1910 { 1911 int *i = (int *)args; 1912 char *end; 1913 int num; 1914 1915 num = strtoul(value, &end, 10); 1916 1917 if (num != 0 && num != 1) { 1918 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", " 1919 "value must be 0 or 1", 1920 value, key); 1921 return -1; 1922 } 1923 1924 *i = num; 1925 return 0; 1926 } 1927 1928 static int ice_parse_devargs(struct rte_eth_dev *dev) 1929 { 1930 struct ice_adapter *ad = 1931 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1932 struct rte_devargs *devargs = dev->device->devargs; 1933 struct rte_kvargs *kvlist; 1934 int ret; 1935 1936 if (devargs == NULL) 1937 return 0; 1938 1939 kvlist = rte_kvargs_parse(devargs->args, ice_valid_args); 1940 if (kvlist == NULL) { 1941 PMD_INIT_LOG(ERR, "Invalid kvargs key\n"); 1942 return -EINVAL; 1943 } 1944 1945 ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE; 1946 memset(ad->devargs.proto_xtr, PROTO_XTR_NONE, 1947 sizeof(ad->devargs.proto_xtr)); 1948 1949 ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG, 1950 &handle_proto_xtr_arg, &ad->devargs); 1951 if (ret) 1952 goto bail; 1953 1954 ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG, 1955 &parse_bool, &ad->devargs.safe_mode_support); 1956 if (ret) 1957 goto bail; 1958 1959 ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG, 1960 &parse_bool, &ad->devargs.pipe_mode_support); 1961 if (ret) 1962 goto bail; 1963 1964 bail: 1965 rte_kvargs_free(kvlist); 1966 return ret; 1967 } 1968 1969 /* Forward LLDP packets to default VSI by set switch rules */ 1970 static int 1971 ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on) 1972 { 1973 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 1974 struct ice_fltr_list_entry *s_list_itr = NULL; 1975 struct LIST_HEAD_TYPE list_head; 1976 int ret = 0; 1977 1978 INIT_LIST_HEAD(&list_head); 1979 1980 s_list_itr = (struct ice_fltr_list_entry *) 1981 ice_malloc(hw, sizeof(*s_list_itr)); 1982 if (!s_list_itr) 1983 return -ENOMEM; 1984 s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 1985 s_list_itr->fltr_info.vsi_handle = vsi->idx; 1986 s_list_itr->fltr_info.l_data.ethertype_mac.ethertype = 1987 RTE_ETHER_TYPE_LLDP; 1988 s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1989 s_list_itr->fltr_info.flag = ICE_FLTR_RX; 1990 s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT; 1991 LIST_ADD(&s_list_itr->list_entry, &list_head); 1992 if (on) 1993 ret = ice_add_eth_mac(hw, &list_head); 1994 else 1995 ret = ice_remove_eth_mac(hw, &list_head); 1996 1997 rte_free(s_list_itr); 1998 return ret; 1999 } 2000 2001 static enum ice_status 2002 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type, 2003 uint16_t num, uint16_t desc_id, 2004 uint16_t *prof_buf, uint16_t *num_prof) 2005 { 2006 struct ice_aqc_res_elem *resp_buf; 2007 int ret; 2008 uint16_t buf_len; 2009 bool res_shared = 1; 2010 struct ice_aq_desc aq_desc; 2011 struct ice_sq_cd *cd = NULL; 2012 struct ice_aqc_get_allocd_res_desc *cmd = 2013 &aq_desc.params.get_res_desc; 2014 2015 buf_len = sizeof(*resp_buf) * num; 2016 resp_buf = ice_malloc(hw, buf_len); 2017 if (!resp_buf) 2018 return -ENOMEM; 2019 2020 ice_fill_dflt_direct_cmd_desc(&aq_desc, 2021 ice_aqc_opc_get_allocd_res_desc); 2022 2023 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) & 2024 ICE_AQC_RES_TYPE_M) | (res_shared ? 2025 ICE_AQC_RES_TYPE_FLAG_SHARED : 0)); 2026 cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id); 2027 2028 ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd); 2029 if (!ret) 2030 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc); 2031 else 2032 goto exit; 2033 2034 ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) * 2035 (*num_prof), ICE_NONDMA_TO_NONDMA); 2036 2037 exit: 2038 rte_free(resp_buf); 2039 return ret; 2040 } 2041 static int 2042 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type) 2043 { 2044 int ret; 2045 uint16_t prof_id; 2046 uint16_t prof_buf[ICE_MAX_RES_DESC_NUM]; 2047 uint16_t first_desc = 1; 2048 uint16_t num_prof = 0; 2049 2050 ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM, 2051 first_desc, prof_buf, &num_prof); 2052 if (ret) { 2053 PMD_INIT_LOG(ERR, "Failed to get fxp resource"); 2054 return ret; 2055 } 2056 2057 for (prof_id = 0; prof_id < num_prof; prof_id++) { 2058 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]); 2059 if (ret) { 2060 PMD_INIT_LOG(ERR, "Failed to free fxp resource"); 2061 return ret; 2062 } 2063 } 2064 return 0; 2065 } 2066 2067 static int 2068 ice_reset_fxp_resource(struct ice_hw *hw) 2069 { 2070 int ret; 2071 2072 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID); 2073 if (ret) { 2074 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource"); 2075 return ret; 2076 } 2077 2078 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID); 2079 if (ret) { 2080 PMD_INIT_LOG(ERR, "Failed to clearup rss resource"); 2081 return ret; 2082 } 2083 2084 return 0; 2085 } 2086 2087 static void 2088 ice_rss_ctx_init(struct ice_pf *pf) 2089 { 2090 memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx)); 2091 } 2092 2093 static uint64_t 2094 ice_get_supported_rxdid(struct ice_hw *hw) 2095 { 2096 uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */ 2097 uint32_t regval; 2098 int i; 2099 2100 supported_rxdid |= BIT(ICE_RXDID_LEGACY_1); 2101 2102 for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { 2103 regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0)); 2104 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) 2105 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) 2106 supported_rxdid |= BIT(i); 2107 } 2108 return supported_rxdid; 2109 } 2110 2111 static int 2112 ice_dev_init(struct rte_eth_dev *dev) 2113 { 2114 struct rte_pci_device *pci_dev; 2115 struct rte_intr_handle *intr_handle; 2116 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2117 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2118 struct ice_adapter *ad = 2119 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2120 struct ice_vsi *vsi; 2121 int ret; 2122 2123 dev->dev_ops = &ice_eth_dev_ops; 2124 dev->rx_queue_count = ice_rx_queue_count; 2125 dev->rx_descriptor_status = ice_rx_descriptor_status; 2126 dev->tx_descriptor_status = ice_tx_descriptor_status; 2127 dev->rx_pkt_burst = ice_recv_pkts; 2128 dev->tx_pkt_burst = ice_xmit_pkts; 2129 dev->tx_pkt_prepare = ice_prep_pkts; 2130 2131 /* for secondary processes, we don't initialise any further as primary 2132 * has already done this work. 2133 */ 2134 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2135 ice_set_rx_function(dev); 2136 ice_set_tx_function(dev); 2137 return 0; 2138 } 2139 2140 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2141 2142 ice_set_default_ptype_table(dev); 2143 pci_dev = RTE_DEV_TO_PCI(dev->device); 2144 intr_handle = &pci_dev->intr_handle; 2145 2146 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2147 pf->adapter->eth_dev = dev; 2148 pf->dev_data = dev->data; 2149 hw->back = pf->adapter; 2150 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr; 2151 hw->vendor_id = pci_dev->id.vendor_id; 2152 hw->device_id = pci_dev->id.device_id; 2153 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 2154 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 2155 hw->bus.device = pci_dev->addr.devid; 2156 hw->bus.func = pci_dev->addr.function; 2157 2158 ret = ice_parse_devargs(dev); 2159 if (ret) { 2160 PMD_INIT_LOG(ERR, "Failed to parse devargs"); 2161 return -EINVAL; 2162 } 2163 2164 ice_init_controlq_parameter(hw); 2165 2166 ret = ice_init_hw(hw); 2167 if (ret) { 2168 PMD_INIT_LOG(ERR, "Failed to initialize HW"); 2169 return -EINVAL; 2170 } 2171 2172 ret = ice_load_pkg(dev); 2173 if (ret) { 2174 if (ad->devargs.safe_mode_support == 0) { 2175 PMD_INIT_LOG(ERR, "Failed to load the DDP package," 2176 "Use safe-mode-support=1 to enter Safe Mode"); 2177 return ret; 2178 } 2179 2180 PMD_INIT_LOG(WARNING, "Failed to load the DDP package," 2181 "Entering Safe Mode"); 2182 ad->is_safe_mode = 1; 2183 } 2184 2185 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d", 2186 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, 2187 hw->api_maj_ver, hw->api_min_ver); 2188 2189 ice_pf_sw_init(dev); 2190 ret = ice_init_mac_address(dev); 2191 if (ret) { 2192 PMD_INIT_LOG(ERR, "Failed to initialize mac address"); 2193 goto err_init_mac; 2194 } 2195 2196 ret = ice_res_pool_init(&pf->msix_pool, 1, 2197 hw->func_caps.common_cap.num_msix_vectors - 1); 2198 if (ret) { 2199 PMD_INIT_LOG(ERR, "Failed to init MSIX pool"); 2200 goto err_msix_pool_init; 2201 } 2202 2203 ret = ice_pf_setup(pf); 2204 if (ret) { 2205 PMD_INIT_LOG(ERR, "Failed to setup PF"); 2206 goto err_pf_setup; 2207 } 2208 2209 ret = ice_send_driver_ver(hw); 2210 if (ret) { 2211 PMD_INIT_LOG(ERR, "Failed to send driver version"); 2212 goto err_pf_setup; 2213 } 2214 2215 vsi = pf->main_vsi; 2216 2217 /* Disable double vlan by default */ 2218 ice_vsi_config_double_vlan(vsi, false); 2219 2220 ret = ice_aq_stop_lldp(hw, true, false, NULL); 2221 if (ret != ICE_SUCCESS) 2222 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n"); 2223 ret = ice_init_dcb(hw, true); 2224 if (ret != ICE_SUCCESS) 2225 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n"); 2226 /* Forward LLDP packets to default VSI */ 2227 ret = ice_vsi_config_sw_lldp(vsi, true); 2228 if (ret != ICE_SUCCESS) 2229 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n"); 2230 /* register callback func to eal lib */ 2231 rte_intr_callback_register(intr_handle, 2232 ice_interrupt_handler, dev); 2233 2234 ice_pf_enable_irq0(hw); 2235 2236 /* enable uio intr after callback register */ 2237 rte_intr_enable(intr_handle); 2238 2239 /* get base queue pairs index in the device */ 2240 ice_base_queue_get(pf); 2241 2242 /* Initialize RSS context for gtpu_eh */ 2243 ice_rss_ctx_init(pf); 2244 2245 if (!ad->is_safe_mode) { 2246 ret = ice_flow_init(ad); 2247 if (ret) { 2248 PMD_INIT_LOG(ERR, "Failed to initialize flow"); 2249 return ret; 2250 } 2251 } 2252 2253 ret = ice_reset_fxp_resource(hw); 2254 if (ret) { 2255 PMD_INIT_LOG(ERR, "Failed to reset fxp resource"); 2256 return ret; 2257 } 2258 2259 pf->supported_rxdid = ice_get_supported_rxdid(hw); 2260 2261 return 0; 2262 2263 err_pf_setup: 2264 ice_res_pool_destroy(&pf->msix_pool); 2265 err_msix_pool_init: 2266 rte_free(dev->data->mac_addrs); 2267 dev->data->mac_addrs = NULL; 2268 err_init_mac: 2269 ice_sched_cleanup_all(hw); 2270 rte_free(hw->port_info); 2271 ice_shutdown_all_ctrlq(hw); 2272 rte_free(pf->proto_xtr); 2273 2274 return ret; 2275 } 2276 2277 int 2278 ice_release_vsi(struct ice_vsi *vsi) 2279 { 2280 struct ice_hw *hw; 2281 struct ice_vsi_ctx vsi_ctx; 2282 enum ice_status ret; 2283 int error = 0; 2284 2285 if (!vsi) 2286 return error; 2287 2288 hw = ICE_VSI_TO_HW(vsi); 2289 2290 ice_remove_all_mac_vlan_filters(vsi); 2291 2292 memset(&vsi_ctx, 0, sizeof(vsi_ctx)); 2293 2294 vsi_ctx.vsi_num = vsi->vsi_id; 2295 vsi_ctx.info = vsi->info; 2296 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL); 2297 if (ret != ICE_SUCCESS) { 2298 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id); 2299 error = -1; 2300 } 2301 2302 rte_free(vsi->rss_lut); 2303 rte_free(vsi->rss_key); 2304 rte_free(vsi); 2305 return error; 2306 } 2307 2308 void 2309 ice_vsi_disable_queues_intr(struct ice_vsi *vsi) 2310 { 2311 struct rte_eth_dev *dev = vsi->adapter->eth_dev; 2312 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 2313 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2314 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 2315 uint16_t msix_intr, i; 2316 2317 /* disable interrupt and also clear all the exist config */ 2318 for (i = 0; i < vsi->nb_qps; i++) { 2319 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0); 2320 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0); 2321 rte_wmb(); 2322 } 2323 2324 if (rte_intr_allow_others(intr_handle)) 2325 /* vfio-pci */ 2326 for (i = 0; i < vsi->nb_msix; i++) { 2327 msix_intr = vsi->msix_intr + i; 2328 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), 2329 GLINT_DYN_CTL_WB_ON_ITR_M); 2330 } 2331 else 2332 /* igb_uio */ 2333 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M); 2334 } 2335 2336 static int 2337 ice_dev_stop(struct rte_eth_dev *dev) 2338 { 2339 struct rte_eth_dev_data *data = dev->data; 2340 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2341 struct ice_vsi *main_vsi = pf->main_vsi; 2342 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 2343 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2344 uint16_t i; 2345 2346 /* avoid stopping again */ 2347 if (pf->adapter_stopped) 2348 return 0; 2349 2350 /* stop and clear all Rx queues */ 2351 for (i = 0; i < data->nb_rx_queues; i++) 2352 ice_rx_queue_stop(dev, i); 2353 2354 /* stop and clear all Tx queues */ 2355 for (i = 0; i < data->nb_tx_queues; i++) 2356 ice_tx_queue_stop(dev, i); 2357 2358 /* disable all queue interrupts */ 2359 ice_vsi_disable_queues_intr(main_vsi); 2360 2361 if (pf->init_link_up) 2362 ice_dev_set_link_up(dev); 2363 else 2364 ice_dev_set_link_down(dev); 2365 2366 /* Clean datapath event and queue/vec mapping */ 2367 rte_intr_efd_disable(intr_handle); 2368 if (intr_handle->intr_vec) { 2369 rte_free(intr_handle->intr_vec); 2370 intr_handle->intr_vec = NULL; 2371 } 2372 2373 pf->adapter_stopped = true; 2374 dev->data->dev_started = 0; 2375 2376 return 0; 2377 } 2378 2379 static int 2380 ice_dev_close(struct rte_eth_dev *dev) 2381 { 2382 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2383 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2384 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2385 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2386 struct ice_adapter *ad = 2387 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2388 int ret; 2389 2390 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2391 return 0; 2392 2393 /* Since stop will make link down, then the link event will be 2394 * triggered, disable the irq firstly to avoid the port_infoe etc 2395 * resources deallocation causing the interrupt service thread 2396 * crash. 2397 */ 2398 ice_pf_disable_irq0(hw); 2399 2400 ret = ice_dev_stop(dev); 2401 2402 if (!ad->is_safe_mode) 2403 ice_flow_uninit(ad); 2404 2405 /* release all queue resource */ 2406 ice_free_queues(dev); 2407 2408 ice_res_pool_destroy(&pf->msix_pool); 2409 ice_release_vsi(pf->main_vsi); 2410 ice_sched_cleanup_all(hw); 2411 ice_free_hw_tbls(hw); 2412 rte_free(hw->port_info); 2413 hw->port_info = NULL; 2414 ice_shutdown_all_ctrlq(hw); 2415 rte_free(pf->proto_xtr); 2416 pf->proto_xtr = NULL; 2417 2418 /* disable uio intr before callback unregister */ 2419 rte_intr_disable(intr_handle); 2420 2421 /* unregister callback func from eal lib */ 2422 rte_intr_callback_unregister(intr_handle, 2423 ice_interrupt_handler, dev); 2424 2425 return ret; 2426 } 2427 2428 static int 2429 ice_dev_uninit(struct rte_eth_dev *dev) 2430 { 2431 ice_dev_close(dev); 2432 2433 return 0; 2434 } 2435 2436 static bool 2437 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg) 2438 { 2439 return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false; 2440 } 2441 2442 static void 2443 hash_cfg_reset(struct ice_rss_hash_cfg *cfg) 2444 { 2445 cfg->hash_flds = 0; 2446 cfg->addl_hdrs = 0; 2447 cfg->symm = 0; 2448 cfg->hdr_type = ICE_RSS_ANY_HEADERS; 2449 } 2450 2451 static int 2452 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2453 { 2454 enum ice_status status = ICE_SUCCESS; 2455 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2456 struct ice_vsi *vsi = pf->main_vsi; 2457 2458 if (!is_hash_cfg_valid(cfg)) 2459 return -ENOENT; 2460 2461 status = ice_rem_rss_cfg(hw, vsi->idx, cfg); 2462 if (status && status != ICE_ERR_DOES_NOT_EXIST) { 2463 PMD_DRV_LOG(ERR, 2464 "ice_rem_rss_cfg failed for VSI:%d, error:%d\n", 2465 vsi->idx, status); 2466 return -EBUSY; 2467 } 2468 2469 return 0; 2470 } 2471 2472 static int 2473 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2474 { 2475 enum ice_status status = ICE_SUCCESS; 2476 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2477 struct ice_vsi *vsi = pf->main_vsi; 2478 2479 if (!is_hash_cfg_valid(cfg)) 2480 return -ENOENT; 2481 2482 status = ice_add_rss_cfg(hw, vsi->idx, cfg); 2483 if (status) { 2484 PMD_DRV_LOG(ERR, 2485 "ice_add_rss_cfg failed for VSI:%d, error:%d\n", 2486 vsi->idx, status); 2487 return -EBUSY; 2488 } 2489 2490 return 0; 2491 } 2492 2493 static int 2494 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2495 { 2496 int ret; 2497 2498 ret = ice_hash_moveout(pf, cfg); 2499 if (ret && (ret != -ENOENT)) 2500 return ret; 2501 2502 hash_cfg_reset(cfg); 2503 2504 return 0; 2505 } 2506 2507 static int 2508 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx, 2509 u8 ctx_idx) 2510 { 2511 int ret; 2512 2513 switch (ctx_idx) { 2514 case ICE_HASH_GTPU_CTX_EH_IP: 2515 ret = ice_hash_remove(pf, 2516 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2517 if (ret && (ret != -ENOENT)) 2518 return ret; 2519 2520 ret = ice_hash_remove(pf, 2521 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2522 if (ret && (ret != -ENOENT)) 2523 return ret; 2524 2525 ret = ice_hash_remove(pf, 2526 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2527 if (ret && (ret != -ENOENT)) 2528 return ret; 2529 2530 ret = ice_hash_remove(pf, 2531 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2532 if (ret && (ret != -ENOENT)) 2533 return ret; 2534 2535 ret = ice_hash_remove(pf, 2536 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2537 if (ret && (ret != -ENOENT)) 2538 return ret; 2539 2540 ret = ice_hash_remove(pf, 2541 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2542 if (ret && (ret != -ENOENT)) 2543 return ret; 2544 2545 ret = ice_hash_remove(pf, 2546 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2547 if (ret && (ret != -ENOENT)) 2548 return ret; 2549 2550 ret = ice_hash_remove(pf, 2551 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2552 if (ret && (ret != -ENOENT)) 2553 return ret; 2554 2555 break; 2556 case ICE_HASH_GTPU_CTX_EH_IP_UDP: 2557 ret = ice_hash_remove(pf, 2558 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2559 if (ret && (ret != -ENOENT)) 2560 return ret; 2561 2562 ret = ice_hash_remove(pf, 2563 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2564 if (ret && (ret != -ENOENT)) 2565 return ret; 2566 2567 ret = ice_hash_moveout(pf, 2568 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2569 if (ret && (ret != -ENOENT)) 2570 return ret; 2571 2572 ret = ice_hash_moveout(pf, 2573 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2574 if (ret && (ret != -ENOENT)) 2575 return ret; 2576 2577 ret = ice_hash_moveout(pf, 2578 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2579 if (ret && (ret != -ENOENT)) 2580 return ret; 2581 2582 ret = ice_hash_moveout(pf, 2583 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2584 if (ret && (ret != -ENOENT)) 2585 return ret; 2586 2587 break; 2588 case ICE_HASH_GTPU_CTX_EH_IP_TCP: 2589 ret = ice_hash_remove(pf, 2590 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2591 if (ret && (ret != -ENOENT)) 2592 return ret; 2593 2594 ret = ice_hash_remove(pf, 2595 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2596 if (ret && (ret != -ENOENT)) 2597 return ret; 2598 2599 ret = ice_hash_moveout(pf, 2600 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2601 if (ret && (ret != -ENOENT)) 2602 return ret; 2603 2604 ret = ice_hash_moveout(pf, 2605 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2606 if (ret && (ret != -ENOENT)) 2607 return ret; 2608 2609 ret = ice_hash_moveout(pf, 2610 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2611 if (ret && (ret != -ENOENT)) 2612 return ret; 2613 2614 ret = ice_hash_moveout(pf, 2615 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2616 if (ret && (ret != -ENOENT)) 2617 return ret; 2618 2619 break; 2620 case ICE_HASH_GTPU_CTX_UP_IP: 2621 ret = ice_hash_remove(pf, 2622 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2623 if (ret && (ret != -ENOENT)) 2624 return ret; 2625 2626 ret = ice_hash_remove(pf, 2627 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2628 if (ret && (ret != -ENOENT)) 2629 return ret; 2630 2631 ret = ice_hash_moveout(pf, 2632 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2633 if (ret && (ret != -ENOENT)) 2634 return ret; 2635 2636 ret = ice_hash_moveout(pf, 2637 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2638 if (ret && (ret != -ENOENT)) 2639 return ret; 2640 2641 ret = ice_hash_moveout(pf, 2642 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2643 if (ret && (ret != -ENOENT)) 2644 return ret; 2645 2646 break; 2647 case ICE_HASH_GTPU_CTX_UP_IP_UDP: 2648 case ICE_HASH_GTPU_CTX_UP_IP_TCP: 2649 ret = ice_hash_moveout(pf, 2650 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2651 if (ret && (ret != -ENOENT)) 2652 return ret; 2653 2654 ret = ice_hash_moveout(pf, 2655 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2656 if (ret && (ret != -ENOENT)) 2657 return ret; 2658 2659 ret = ice_hash_moveout(pf, 2660 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2661 if (ret && (ret != -ENOENT)) 2662 return ret; 2663 2664 break; 2665 case ICE_HASH_GTPU_CTX_DW_IP: 2666 ret = ice_hash_remove(pf, 2667 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2668 if (ret && (ret != -ENOENT)) 2669 return ret; 2670 2671 ret = ice_hash_remove(pf, 2672 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2673 if (ret && (ret != -ENOENT)) 2674 return ret; 2675 2676 ret = ice_hash_moveout(pf, 2677 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2678 if (ret && (ret != -ENOENT)) 2679 return ret; 2680 2681 ret = ice_hash_moveout(pf, 2682 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2683 if (ret && (ret != -ENOENT)) 2684 return ret; 2685 2686 ret = ice_hash_moveout(pf, 2687 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2688 if (ret && (ret != -ENOENT)) 2689 return ret; 2690 2691 break; 2692 case ICE_HASH_GTPU_CTX_DW_IP_UDP: 2693 case ICE_HASH_GTPU_CTX_DW_IP_TCP: 2694 ret = ice_hash_moveout(pf, 2695 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2696 if (ret && (ret != -ENOENT)) 2697 return ret; 2698 2699 ret = ice_hash_moveout(pf, 2700 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2701 if (ret && (ret != -ENOENT)) 2702 return ret; 2703 2704 ret = ice_hash_moveout(pf, 2705 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2706 if (ret && (ret != -ENOENT)) 2707 return ret; 2708 2709 break; 2710 default: 2711 break; 2712 } 2713 2714 return 0; 2715 } 2716 2717 static u8 calc_gtpu_ctx_idx(uint32_t hdr) 2718 { 2719 u8 eh_idx, ip_idx; 2720 2721 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) 2722 eh_idx = 0; 2723 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP) 2724 eh_idx = 1; 2725 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN) 2726 eh_idx = 2; 2727 else 2728 return ICE_HASH_GTPU_CTX_MAX; 2729 2730 ip_idx = 0; 2731 if (hdr & ICE_FLOW_SEG_HDR_UDP) 2732 ip_idx = 1; 2733 else if (hdr & ICE_FLOW_SEG_HDR_TCP) 2734 ip_idx = 2; 2735 2736 if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)) 2737 return eh_idx * 3 + ip_idx; 2738 else 2739 return ICE_HASH_GTPU_CTX_MAX; 2740 } 2741 2742 static int 2743 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr) 2744 { 2745 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr); 2746 2747 if (hdr & ICE_FLOW_SEG_HDR_IPV4) 2748 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4, 2749 gtpu_ctx_idx); 2750 else if (hdr & ICE_FLOW_SEG_HDR_IPV6) 2751 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6, 2752 gtpu_ctx_idx); 2753 2754 return 0; 2755 } 2756 2757 static int 2758 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx, 2759 u8 ctx_idx, struct ice_rss_hash_cfg *cfg) 2760 { 2761 int ret; 2762 2763 if (ctx_idx < ICE_HASH_GTPU_CTX_MAX) 2764 ctx->ctx[ctx_idx] = *cfg; 2765 2766 switch (ctx_idx) { 2767 case ICE_HASH_GTPU_CTX_EH_IP: 2768 break; 2769 case ICE_HASH_GTPU_CTX_EH_IP_UDP: 2770 ret = ice_hash_moveback(pf, 2771 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2772 if (ret && (ret != -ENOENT)) 2773 return ret; 2774 2775 ret = ice_hash_moveback(pf, 2776 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2777 if (ret && (ret != -ENOENT)) 2778 return ret; 2779 2780 ret = ice_hash_moveback(pf, 2781 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2782 if (ret && (ret != -ENOENT)) 2783 return ret; 2784 2785 ret = ice_hash_moveback(pf, 2786 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2787 if (ret && (ret != -ENOENT)) 2788 return ret; 2789 2790 break; 2791 case ICE_HASH_GTPU_CTX_EH_IP_TCP: 2792 ret = ice_hash_moveback(pf, 2793 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2794 if (ret && (ret != -ENOENT)) 2795 return ret; 2796 2797 ret = ice_hash_moveback(pf, 2798 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2799 if (ret && (ret != -ENOENT)) 2800 return ret; 2801 2802 ret = ice_hash_moveback(pf, 2803 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2804 if (ret && (ret != -ENOENT)) 2805 return ret; 2806 2807 ret = ice_hash_moveback(pf, 2808 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2809 if (ret && (ret != -ENOENT)) 2810 return ret; 2811 2812 break; 2813 case ICE_HASH_GTPU_CTX_UP_IP: 2814 case ICE_HASH_GTPU_CTX_UP_IP_UDP: 2815 case ICE_HASH_GTPU_CTX_UP_IP_TCP: 2816 case ICE_HASH_GTPU_CTX_DW_IP: 2817 case ICE_HASH_GTPU_CTX_DW_IP_UDP: 2818 case ICE_HASH_GTPU_CTX_DW_IP_TCP: 2819 ret = ice_hash_moveback(pf, 2820 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2821 if (ret && (ret != -ENOENT)) 2822 return ret; 2823 2824 ret = ice_hash_moveback(pf, 2825 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2826 if (ret && (ret != -ENOENT)) 2827 return ret; 2828 2829 ret = ice_hash_moveback(pf, 2830 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2831 if (ret && (ret != -ENOENT)) 2832 return ret; 2833 2834 break; 2835 default: 2836 break; 2837 } 2838 2839 return 0; 2840 } 2841 2842 static int 2843 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2844 { 2845 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs); 2846 2847 if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) 2848 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4, 2849 gtpu_ctx_idx, cfg); 2850 else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) 2851 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6, 2852 gtpu_ctx_idx, cfg); 2853 2854 return 0; 2855 } 2856 2857 static void 2858 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr) 2859 { 2860 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr); 2861 2862 if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX) 2863 return; 2864 2865 if (hdr & ICE_FLOW_SEG_HDR_IPV4) 2866 hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]); 2867 else if (hdr & ICE_FLOW_SEG_HDR_IPV6) 2868 hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]); 2869 } 2870 2871 int 2872 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, 2873 struct ice_rss_hash_cfg *cfg) 2874 { 2875 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2876 int ret; 2877 2878 ret = ice_rem_rss_cfg(hw, vsi_id, cfg); 2879 if (ret && ret != ICE_ERR_DOES_NOT_EXIST) 2880 PMD_DRV_LOG(ERR, "remove rss cfg failed\n"); 2881 2882 ice_rem_rss_cfg_post(pf, cfg->addl_hdrs); 2883 2884 return 0; 2885 } 2886 2887 int 2888 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, 2889 struct ice_rss_hash_cfg *cfg) 2890 { 2891 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2892 int ret; 2893 2894 ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs); 2895 if (ret) 2896 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n"); 2897 2898 ret = ice_add_rss_cfg(hw, vsi_id, cfg); 2899 if (ret) 2900 PMD_DRV_LOG(ERR, "add rss cfg failed\n"); 2901 2902 ret = ice_add_rss_cfg_post(pf, cfg); 2903 if (ret) 2904 PMD_DRV_LOG(ERR, "add rss cfg post failed\n"); 2905 2906 return 0; 2907 } 2908 2909 static void 2910 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf) 2911 { 2912 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2913 struct ice_vsi *vsi = pf->main_vsi; 2914 struct ice_rss_hash_cfg cfg; 2915 int ret; 2916 2917 #define ICE_RSS_HF_ALL ( \ 2918 ETH_RSS_IPV4 | \ 2919 ETH_RSS_IPV6 | \ 2920 ETH_RSS_NONFRAG_IPV4_UDP | \ 2921 ETH_RSS_NONFRAG_IPV6_UDP | \ 2922 ETH_RSS_NONFRAG_IPV4_TCP | \ 2923 ETH_RSS_NONFRAG_IPV6_TCP | \ 2924 ETH_RSS_NONFRAG_IPV4_SCTP | \ 2925 ETH_RSS_NONFRAG_IPV6_SCTP) 2926 2927 ret = ice_rem_vsi_rss_cfg(hw, vsi->idx); 2928 if (ret) 2929 PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d", 2930 __func__, ret); 2931 2932 cfg.symm = 0; 2933 cfg.hdr_type = ICE_RSS_ANY_HEADERS; 2934 /* Configure RSS for IPv4 with src/dst addr as input set */ 2935 if (rss_hf & ETH_RSS_IPV4) { 2936 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 2937 cfg.hash_flds = ICE_FLOW_HASH_IPV4; 2938 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 2939 if (ret) 2940 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", 2941 __func__, ret); 2942 } 2943 2944 /* Configure RSS for IPv6 with src/dst addr as input set */ 2945 if (rss_hf & ETH_RSS_IPV6) { 2946 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 2947 cfg.hash_flds = ICE_FLOW_HASH_IPV6; 2948 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 2949 if (ret) 2950 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", 2951 __func__, ret); 2952 } 2953 2954 /* Configure RSS for udp4 with src/dst addr and port as input set */ 2955 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { 2956 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 | 2957 ICE_FLOW_SEG_HDR_IPV_OTHER; 2958 cfg.hash_flds = ICE_HASH_UDP_IPV4; 2959 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 2960 if (ret) 2961 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", 2962 __func__, ret); 2963 } 2964 2965 /* Configure RSS for udp6 with src/dst addr and port as input set */ 2966 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) { 2967 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 | 2968 ICE_FLOW_SEG_HDR_IPV_OTHER; 2969 cfg.hash_flds = ICE_HASH_UDP_IPV6; 2970 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 2971 if (ret) 2972 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", 2973 __func__, ret); 2974 } 2975 2976 /* Configure RSS for tcp4 with src/dst addr and port as input set */ 2977 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 2978 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 | 2979 ICE_FLOW_SEG_HDR_IPV_OTHER; 2980 cfg.hash_flds = ICE_HASH_TCP_IPV4; 2981 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 2982 if (ret) 2983 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", 2984 __func__, ret); 2985 } 2986 2987 /* Configure RSS for tcp6 with src/dst addr and port as input set */ 2988 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { 2989 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 | 2990 ICE_FLOW_SEG_HDR_IPV_OTHER; 2991 cfg.hash_flds = ICE_HASH_TCP_IPV6; 2992 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 2993 if (ret) 2994 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", 2995 __func__, ret); 2996 } 2997 2998 /* Configure RSS for sctp4 with src/dst addr and port as input set */ 2999 if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) { 3000 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 | 3001 ICE_FLOW_SEG_HDR_IPV_OTHER; 3002 cfg.hash_flds = ICE_HASH_SCTP_IPV4; 3003 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3004 if (ret) 3005 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d", 3006 __func__, ret); 3007 } 3008 3009 /* Configure RSS for sctp6 with src/dst addr and port as input set */ 3010 if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) { 3011 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 | 3012 ICE_FLOW_SEG_HDR_IPV_OTHER; 3013 cfg.hash_flds = ICE_HASH_SCTP_IPV6; 3014 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3015 if (ret) 3016 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d", 3017 __func__, ret); 3018 } 3019 3020 if (rss_hf & ETH_RSS_IPV4) { 3021 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4 | 3022 ICE_FLOW_SEG_HDR_IPV_OTHER; 3023 cfg.hash_flds = ICE_FLOW_HASH_IPV4; 3024 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3025 if (ret) 3026 PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d", 3027 __func__, ret); 3028 3029 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4 | 3030 ICE_FLOW_SEG_HDR_IPV_OTHER; 3031 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3032 if (ret) 3033 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d", 3034 __func__, ret); 3035 3036 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 | 3037 ICE_FLOW_SEG_HDR_IPV_OTHER; 3038 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3039 if (ret) 3040 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d", 3041 __func__, ret); 3042 } 3043 3044 if (rss_hf & ETH_RSS_IPV6) { 3045 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6 | 3046 ICE_FLOW_SEG_HDR_IPV_OTHER; 3047 cfg.hash_flds = ICE_FLOW_HASH_IPV6; 3048 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3049 if (ret) 3050 PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d", 3051 __func__, ret); 3052 3053 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6 | 3054 ICE_FLOW_SEG_HDR_IPV_OTHER; 3055 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3056 if (ret) 3057 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d", 3058 __func__, ret); 3059 3060 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 | 3061 ICE_FLOW_SEG_HDR_IPV_OTHER; 3062 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3063 if (ret) 3064 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d", 3065 __func__, ret); 3066 } 3067 3068 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { 3069 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_UDP | 3070 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3071 cfg.hash_flds = ICE_HASH_UDP_IPV4; 3072 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3073 if (ret) 3074 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d", 3075 __func__, ret); 3076 3077 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_UDP | 3078 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3079 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3080 if (ret) 3081 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d", 3082 __func__, ret); 3083 3084 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP | 3085 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3086 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3087 if (ret) 3088 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d", 3089 __func__, ret); 3090 } 3091 3092 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) { 3093 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_UDP | 3094 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3095 cfg.hash_flds = ICE_HASH_UDP_IPV6; 3096 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3097 if (ret) 3098 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d", 3099 __func__, ret); 3100 3101 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_UDP | 3102 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3103 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3104 if (ret) 3105 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d", 3106 __func__, ret); 3107 3108 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP | 3109 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3110 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3111 if (ret) 3112 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d", 3113 __func__, ret); 3114 } 3115 3116 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 3117 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_TCP | 3118 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3119 cfg.hash_flds = ICE_HASH_TCP_IPV4; 3120 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3121 if (ret) 3122 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d", 3123 __func__, ret); 3124 3125 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_TCP | 3126 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3127 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3128 if (ret) 3129 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d", 3130 __func__, ret); 3131 3132 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP | 3133 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3134 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3135 if (ret) 3136 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d", 3137 __func__, ret); 3138 } 3139 3140 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { 3141 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_TCP | 3142 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3143 cfg.hash_flds = ICE_HASH_TCP_IPV6; 3144 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3145 if (ret) 3146 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d", 3147 __func__, ret); 3148 3149 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_TCP | 3150 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3151 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3152 if (ret) 3153 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d", 3154 __func__, ret); 3155 3156 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP | 3157 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3158 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3159 if (ret) 3160 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d", 3161 __func__, ret); 3162 } 3163 3164 pf->rss_hf = rss_hf & ICE_RSS_HF_ALL; 3165 } 3166 3167 static int ice_init_rss(struct ice_pf *pf) 3168 { 3169 struct ice_hw *hw = ICE_PF_TO_HW(pf); 3170 struct ice_vsi *vsi = pf->main_vsi; 3171 struct rte_eth_dev *dev = pf->adapter->eth_dev; 3172 struct ice_aq_get_set_rss_lut_params lut_params; 3173 struct rte_eth_rss_conf *rss_conf; 3174 struct ice_aqc_get_set_rss_keys key; 3175 uint16_t i, nb_q; 3176 int ret = 0; 3177 bool is_safe_mode = pf->adapter->is_safe_mode; 3178 uint32_t reg; 3179 3180 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf; 3181 nb_q = dev->data->nb_rx_queues; 3182 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE; 3183 vsi->rss_lut_size = pf->hash_lut_size; 3184 3185 if (is_safe_mode) { 3186 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n"); 3187 return 0; 3188 } 3189 3190 if (!vsi->rss_key) { 3191 vsi->rss_key = rte_zmalloc(NULL, 3192 vsi->rss_key_size, 0); 3193 if (vsi->rss_key == NULL) { 3194 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key"); 3195 return -ENOMEM; 3196 } 3197 } 3198 if (!vsi->rss_lut) { 3199 vsi->rss_lut = rte_zmalloc(NULL, 3200 vsi->rss_lut_size, 0); 3201 if (vsi->rss_lut == NULL) { 3202 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key"); 3203 rte_free(vsi->rss_key); 3204 vsi->rss_key = NULL; 3205 return -ENOMEM; 3206 } 3207 } 3208 /* configure RSS key */ 3209 if (!rss_conf->rss_key) { 3210 /* Calculate the default hash key */ 3211 for (i = 0; i <= vsi->rss_key_size; i++) 3212 vsi->rss_key[i] = (uint8_t)rte_rand(); 3213 } else { 3214 rte_memcpy(vsi->rss_key, rss_conf->rss_key, 3215 RTE_MIN(rss_conf->rss_key_len, 3216 vsi->rss_key_size)); 3217 } 3218 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size); 3219 ret = ice_aq_set_rss_key(hw, vsi->idx, &key); 3220 if (ret) 3221 goto out; 3222 3223 /* init RSS LUT table */ 3224 for (i = 0; i < vsi->rss_lut_size; i++) 3225 vsi->rss_lut[i] = i % nb_q; 3226 3227 lut_params.vsi_handle = vsi->idx; 3228 lut_params.lut_size = vsi->rss_lut_size; 3229 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 3230 lut_params.lut = vsi->rss_lut; 3231 lut_params.global_lut_id = 0; 3232 ret = ice_aq_set_rss_lut(hw, &lut_params); 3233 if (ret) 3234 goto out; 3235 3236 /* Enable registers for symmetric_toeplitz function. */ 3237 reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id)); 3238 reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) | 3239 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); 3240 ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); 3241 3242 /* RSS hash configuration */ 3243 ice_rss_hash_set(pf, rss_conf->rss_hf); 3244 3245 return 0; 3246 out: 3247 rte_free(vsi->rss_key); 3248 vsi->rss_key = NULL; 3249 rte_free(vsi->rss_lut); 3250 vsi->rss_lut = NULL; 3251 return -EINVAL; 3252 } 3253 3254 static int 3255 ice_dev_configure(struct rte_eth_dev *dev) 3256 { 3257 struct ice_adapter *ad = 3258 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3259 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3260 int ret; 3261 3262 /* Initialize to TRUE. If any of Rx queues doesn't meet the 3263 * bulk allocation or vector Rx preconditions we will reset it. 3264 */ 3265 ad->rx_bulk_alloc_allowed = true; 3266 ad->tx_simple_allowed = true; 3267 3268 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 3269 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 3270 3271 ret = ice_init_rss(pf); 3272 if (ret) { 3273 PMD_DRV_LOG(ERR, "Failed to enable rss for PF"); 3274 return ret; 3275 } 3276 3277 return 0; 3278 } 3279 3280 static void 3281 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect, 3282 int base_queue, int nb_queue) 3283 { 3284 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 3285 uint32_t val, val_tx; 3286 int i; 3287 3288 for (i = 0; i < nb_queue; i++) { 3289 /*do actual bind*/ 3290 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) | 3291 (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M; 3292 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) | 3293 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M; 3294 3295 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d", 3296 base_queue + i, msix_vect); 3297 /* set ITR0 value */ 3298 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2); 3299 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val); 3300 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx); 3301 } 3302 } 3303 3304 void 3305 ice_vsi_queues_bind_intr(struct ice_vsi *vsi) 3306 { 3307 struct rte_eth_dev *dev = vsi->adapter->eth_dev; 3308 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 3309 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3310 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 3311 uint16_t msix_vect = vsi->msix_intr; 3312 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); 3313 uint16_t queue_idx = 0; 3314 int record = 0; 3315 int i; 3316 3317 /* clear Rx/Tx queue interrupt */ 3318 for (i = 0; i < vsi->nb_used_qps; i++) { 3319 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0); 3320 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0); 3321 } 3322 3323 /* PF bind interrupt */ 3324 if (rte_intr_dp_is_en(intr_handle)) { 3325 queue_idx = 0; 3326 record = 1; 3327 } 3328 3329 for (i = 0; i < vsi->nb_used_qps; i++) { 3330 if (nb_msix <= 1) { 3331 if (!rte_intr_allow_others(intr_handle)) 3332 msix_vect = ICE_MISC_VEC_ID; 3333 3334 /* uio mapping all queue to one msix_vect */ 3335 __vsi_queues_bind_intr(vsi, msix_vect, 3336 vsi->base_queue + i, 3337 vsi->nb_used_qps - i); 3338 3339 for (; !!record && i < vsi->nb_used_qps; i++) 3340 intr_handle->intr_vec[queue_idx + i] = 3341 msix_vect; 3342 break; 3343 } 3344 3345 /* vfio 1:1 queue/msix_vect mapping */ 3346 __vsi_queues_bind_intr(vsi, msix_vect, 3347 vsi->base_queue + i, 1); 3348 3349 if (!!record) 3350 intr_handle->intr_vec[queue_idx + i] = msix_vect; 3351 3352 msix_vect++; 3353 nb_msix--; 3354 } 3355 } 3356 3357 void 3358 ice_vsi_enable_queues_intr(struct ice_vsi *vsi) 3359 { 3360 struct rte_eth_dev *dev = vsi->adapter->eth_dev; 3361 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 3362 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3363 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 3364 uint16_t msix_intr, i; 3365 3366 if (rte_intr_allow_others(intr_handle)) 3367 for (i = 0; i < vsi->nb_used_qps; i++) { 3368 msix_intr = vsi->msix_intr + i; 3369 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), 3370 GLINT_DYN_CTL_INTENA_M | 3371 GLINT_DYN_CTL_CLEARPBA_M | 3372 GLINT_DYN_CTL_ITR_INDX_M | 3373 GLINT_DYN_CTL_WB_ON_ITR_M); 3374 } 3375 else 3376 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), 3377 GLINT_DYN_CTL_INTENA_M | 3378 GLINT_DYN_CTL_CLEARPBA_M | 3379 GLINT_DYN_CTL_ITR_INDX_M | 3380 GLINT_DYN_CTL_WB_ON_ITR_M); 3381 } 3382 3383 static int 3384 ice_rxq_intr_setup(struct rte_eth_dev *dev) 3385 { 3386 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3387 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 3388 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3389 struct ice_vsi *vsi = pf->main_vsi; 3390 uint32_t intr_vector = 0; 3391 3392 rte_intr_disable(intr_handle); 3393 3394 /* check and configure queue intr-vector mapping */ 3395 if ((rte_intr_cap_multiple(intr_handle) || 3396 !RTE_ETH_DEV_SRIOV(dev).active) && 3397 dev->data->dev_conf.intr_conf.rxq != 0) { 3398 intr_vector = dev->data->nb_rx_queues; 3399 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) { 3400 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 3401 ICE_MAX_INTR_QUEUE_NUM); 3402 return -ENOTSUP; 3403 } 3404 if (rte_intr_efd_enable(intr_handle, intr_vector)) 3405 return -1; 3406 } 3407 3408 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 3409 intr_handle->intr_vec = 3410 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int), 3411 0); 3412 if (!intr_handle->intr_vec) { 3413 PMD_DRV_LOG(ERR, 3414 "Failed to allocate %d rx_queues intr_vec", 3415 dev->data->nb_rx_queues); 3416 return -ENOMEM; 3417 } 3418 } 3419 3420 /* Map queues with MSIX interrupt */ 3421 vsi->nb_used_qps = dev->data->nb_rx_queues; 3422 ice_vsi_queues_bind_intr(vsi); 3423 3424 /* Enable interrupts for all the queues */ 3425 ice_vsi_enable_queues_intr(vsi); 3426 3427 rte_intr_enable(intr_handle); 3428 3429 return 0; 3430 } 3431 3432 static void 3433 ice_get_init_link_status(struct rte_eth_dev *dev) 3434 { 3435 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3436 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3437 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; 3438 struct ice_link_status link_status; 3439 int ret; 3440 3441 ret = ice_aq_get_link_info(hw->port_info, enable_lse, 3442 &link_status, NULL); 3443 if (ret != ICE_SUCCESS) { 3444 PMD_DRV_LOG(ERR, "Failed to get link info"); 3445 pf->init_link_up = false; 3446 return; 3447 } 3448 3449 if (link_status.link_info & ICE_AQ_LINK_UP) 3450 pf->init_link_up = true; 3451 } 3452 3453 static int 3454 ice_dev_start(struct rte_eth_dev *dev) 3455 { 3456 struct rte_eth_dev_data *data = dev->data; 3457 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3458 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3459 struct ice_vsi *vsi = pf->main_vsi; 3460 uint16_t nb_rxq = 0; 3461 uint16_t nb_txq, i; 3462 uint16_t max_frame_size; 3463 int mask, ret; 3464 3465 /* program Tx queues' context in hardware */ 3466 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) { 3467 ret = ice_tx_queue_start(dev, nb_txq); 3468 if (ret) { 3469 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq); 3470 goto tx_err; 3471 } 3472 } 3473 3474 /* program Rx queues' context in hardware*/ 3475 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) { 3476 ret = ice_rx_queue_start(dev, nb_rxq); 3477 if (ret) { 3478 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq); 3479 goto rx_err; 3480 } 3481 } 3482 3483 ice_set_rx_function(dev); 3484 ice_set_tx_function(dev); 3485 3486 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 3487 ETH_VLAN_EXTEND_MASK; 3488 ret = ice_vlan_offload_set(dev, mask); 3489 if (ret) { 3490 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 3491 goto rx_err; 3492 } 3493 3494 /* enable Rx interrput and mapping Rx queue to interrupt vector */ 3495 if (ice_rxq_intr_setup(dev)) 3496 return -EIO; 3497 3498 /* Enable receiving broadcast packets and transmitting packets */ 3499 ret = ice_set_vsi_promisc(hw, vsi->idx, 3500 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX | 3501 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX, 3502 0); 3503 if (ret != ICE_SUCCESS) 3504 PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); 3505 3506 ret = ice_aq_set_event_mask(hw, hw->port_info->lport, 3507 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT | 3508 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM | 3509 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS | 3510 ICE_AQ_LINK_EVENT_SIGNAL_DETECT | 3511 ICE_AQ_LINK_EVENT_AN_COMPLETED | 3512 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)), 3513 NULL); 3514 if (ret != ICE_SUCCESS) 3515 PMD_DRV_LOG(WARNING, "Fail to set phy mask"); 3516 3517 ice_get_init_link_status(dev); 3518 3519 ice_dev_set_link_up(dev); 3520 3521 /* Call get_link_info aq commond to enable/disable LSE */ 3522 ice_link_update(dev, 0); 3523 3524 pf->adapter_stopped = false; 3525 3526 /* Set the max frame size to default value*/ 3527 max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ? 3528 pf->dev_data->dev_conf.rxmode.max_rx_pkt_len : 3529 ICE_FRAME_SIZE_MAX; 3530 3531 /* Set the max frame size to HW*/ 3532 ice_aq_set_mac_cfg(hw, max_frame_size, NULL); 3533 3534 return 0; 3535 3536 /* stop the started queues if failed to start all queues */ 3537 rx_err: 3538 for (i = 0; i < nb_rxq; i++) 3539 ice_rx_queue_stop(dev, i); 3540 tx_err: 3541 for (i = 0; i < nb_txq; i++) 3542 ice_tx_queue_stop(dev, i); 3543 3544 return -EIO; 3545 } 3546 3547 static int 3548 ice_dev_reset(struct rte_eth_dev *dev) 3549 { 3550 int ret; 3551 3552 if (dev->data->sriov.active) 3553 return -ENOTSUP; 3554 3555 ret = ice_dev_uninit(dev); 3556 if (ret) { 3557 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret); 3558 return -ENXIO; 3559 } 3560 3561 ret = ice_dev_init(dev); 3562 if (ret) { 3563 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret); 3564 return -ENXIO; 3565 } 3566 3567 return 0; 3568 } 3569 3570 static int 3571 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3572 { 3573 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3574 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3575 struct ice_vsi *vsi = pf->main_vsi; 3576 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); 3577 bool is_safe_mode = pf->adapter->is_safe_mode; 3578 u64 phy_type_low; 3579 u64 phy_type_high; 3580 3581 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN; 3582 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX; 3583 dev_info->max_rx_queues = vsi->nb_qps; 3584 dev_info->max_tx_queues = vsi->nb_qps; 3585 dev_info->max_mac_addrs = vsi->max_macaddrs; 3586 dev_info->max_vfs = pci_dev->max_vfs; 3587 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD; 3588 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3589 3590 dev_info->rx_offload_capa = 3591 DEV_RX_OFFLOAD_VLAN_STRIP | 3592 DEV_RX_OFFLOAD_JUMBO_FRAME | 3593 DEV_RX_OFFLOAD_KEEP_CRC | 3594 DEV_RX_OFFLOAD_SCATTER | 3595 DEV_RX_OFFLOAD_VLAN_FILTER; 3596 dev_info->tx_offload_capa = 3597 DEV_TX_OFFLOAD_VLAN_INSERT | 3598 DEV_TX_OFFLOAD_TCP_TSO | 3599 DEV_TX_OFFLOAD_MULTI_SEGS | 3600 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 3601 dev_info->flow_type_rss_offloads = 0; 3602 3603 if (!is_safe_mode) { 3604 dev_info->rx_offload_capa |= 3605 DEV_RX_OFFLOAD_IPV4_CKSUM | 3606 DEV_RX_OFFLOAD_UDP_CKSUM | 3607 DEV_RX_OFFLOAD_TCP_CKSUM | 3608 DEV_RX_OFFLOAD_QINQ_STRIP | 3609 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 3610 DEV_RX_OFFLOAD_VLAN_EXTEND | 3611 DEV_RX_OFFLOAD_RSS_HASH; 3612 dev_info->tx_offload_capa |= 3613 DEV_TX_OFFLOAD_QINQ_INSERT | 3614 DEV_TX_OFFLOAD_IPV4_CKSUM | 3615 DEV_TX_OFFLOAD_UDP_CKSUM | 3616 DEV_TX_OFFLOAD_TCP_CKSUM | 3617 DEV_TX_OFFLOAD_SCTP_CKSUM | 3618 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 3619 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; 3620 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; 3621 } 3622 3623 dev_info->rx_queue_offload_capa = 0; 3624 dev_info->tx_queue_offload_capa = 0; 3625 3626 dev_info->reta_size = pf->hash_lut_size; 3627 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); 3628 3629 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3630 .rx_thresh = { 3631 .pthresh = ICE_DEFAULT_RX_PTHRESH, 3632 .hthresh = ICE_DEFAULT_RX_HTHRESH, 3633 .wthresh = ICE_DEFAULT_RX_WTHRESH, 3634 }, 3635 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH, 3636 .rx_drop_en = 0, 3637 .offloads = 0, 3638 }; 3639 3640 dev_info->default_txconf = (struct rte_eth_txconf) { 3641 .tx_thresh = { 3642 .pthresh = ICE_DEFAULT_TX_PTHRESH, 3643 .hthresh = ICE_DEFAULT_TX_HTHRESH, 3644 .wthresh = ICE_DEFAULT_TX_WTHRESH, 3645 }, 3646 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH, 3647 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH, 3648 .offloads = 0, 3649 }; 3650 3651 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 3652 .nb_max = ICE_MAX_RING_DESC, 3653 .nb_min = ICE_MIN_RING_DESC, 3654 .nb_align = ICE_ALIGN_RING_DESC, 3655 }; 3656 3657 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 3658 .nb_max = ICE_MAX_RING_DESC, 3659 .nb_min = ICE_MIN_RING_DESC, 3660 .nb_align = ICE_ALIGN_RING_DESC, 3661 }; 3662 3663 dev_info->speed_capa = ETH_LINK_SPEED_10M | 3664 ETH_LINK_SPEED_100M | 3665 ETH_LINK_SPEED_1G | 3666 ETH_LINK_SPEED_2_5G | 3667 ETH_LINK_SPEED_5G | 3668 ETH_LINK_SPEED_10G | 3669 ETH_LINK_SPEED_20G | 3670 ETH_LINK_SPEED_25G; 3671 3672 phy_type_low = hw->port_info->phy.phy_type_low; 3673 phy_type_high = hw->port_info->phy.phy_type_high; 3674 3675 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low)) 3676 dev_info->speed_capa |= ETH_LINK_SPEED_50G; 3677 3678 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) || 3679 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high)) 3680 dev_info->speed_capa |= ETH_LINK_SPEED_100G; 3681 3682 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3683 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3684 3685 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST; 3686 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST; 3687 dev_info->default_rxportconf.nb_queues = 1; 3688 dev_info->default_txportconf.nb_queues = 1; 3689 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN; 3690 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN; 3691 3692 return 0; 3693 } 3694 3695 static inline int 3696 ice_atomic_read_link_status(struct rte_eth_dev *dev, 3697 struct rte_eth_link *link) 3698 { 3699 struct rte_eth_link *dst = link; 3700 struct rte_eth_link *src = &dev->data->dev_link; 3701 3702 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 3703 *(uint64_t *)src) == 0) 3704 return -1; 3705 3706 return 0; 3707 } 3708 3709 static inline int 3710 ice_atomic_write_link_status(struct rte_eth_dev *dev, 3711 struct rte_eth_link *link) 3712 { 3713 struct rte_eth_link *dst = &dev->data->dev_link; 3714 struct rte_eth_link *src = link; 3715 3716 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 3717 *(uint64_t *)src) == 0) 3718 return -1; 3719 3720 return 0; 3721 } 3722 3723 static int 3724 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) 3725 { 3726 #define CHECK_INTERVAL 100 /* 100ms */ 3727 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ 3728 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3729 struct ice_link_status link_status; 3730 struct rte_eth_link link, old; 3731 int status; 3732 unsigned int rep_cnt = MAX_REPEAT_TIME; 3733 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; 3734 3735 memset(&link, 0, sizeof(link)); 3736 memset(&old, 0, sizeof(old)); 3737 memset(&link_status, 0, sizeof(link_status)); 3738 ice_atomic_read_link_status(dev, &old); 3739 3740 do { 3741 /* Get link status information from hardware */ 3742 status = ice_aq_get_link_info(hw->port_info, enable_lse, 3743 &link_status, NULL); 3744 if (status != ICE_SUCCESS) { 3745 link.link_speed = ETH_SPEED_NUM_100M; 3746 link.link_duplex = ETH_LINK_FULL_DUPLEX; 3747 PMD_DRV_LOG(ERR, "Failed to get link info"); 3748 goto out; 3749 } 3750 3751 link.link_status = link_status.link_info & ICE_AQ_LINK_UP; 3752 if (!wait_to_complete || link.link_status) 3753 break; 3754 3755 rte_delay_ms(CHECK_INTERVAL); 3756 } while (--rep_cnt); 3757 3758 if (!link.link_status) 3759 goto out; 3760 3761 /* Full-duplex operation at all supported speeds */ 3762 link.link_duplex = ETH_LINK_FULL_DUPLEX; 3763 3764 /* Parse the link status */ 3765 switch (link_status.link_speed) { 3766 case ICE_AQ_LINK_SPEED_10MB: 3767 link.link_speed = ETH_SPEED_NUM_10M; 3768 break; 3769 case ICE_AQ_LINK_SPEED_100MB: 3770 link.link_speed = ETH_SPEED_NUM_100M; 3771 break; 3772 case ICE_AQ_LINK_SPEED_1000MB: 3773 link.link_speed = ETH_SPEED_NUM_1G; 3774 break; 3775 case ICE_AQ_LINK_SPEED_2500MB: 3776 link.link_speed = ETH_SPEED_NUM_2_5G; 3777 break; 3778 case ICE_AQ_LINK_SPEED_5GB: 3779 link.link_speed = ETH_SPEED_NUM_5G; 3780 break; 3781 case ICE_AQ_LINK_SPEED_10GB: 3782 link.link_speed = ETH_SPEED_NUM_10G; 3783 break; 3784 case ICE_AQ_LINK_SPEED_20GB: 3785 link.link_speed = ETH_SPEED_NUM_20G; 3786 break; 3787 case ICE_AQ_LINK_SPEED_25GB: 3788 link.link_speed = ETH_SPEED_NUM_25G; 3789 break; 3790 case ICE_AQ_LINK_SPEED_40GB: 3791 link.link_speed = ETH_SPEED_NUM_40G; 3792 break; 3793 case ICE_AQ_LINK_SPEED_50GB: 3794 link.link_speed = ETH_SPEED_NUM_50G; 3795 break; 3796 case ICE_AQ_LINK_SPEED_100GB: 3797 link.link_speed = ETH_SPEED_NUM_100G; 3798 break; 3799 case ICE_AQ_LINK_SPEED_UNKNOWN: 3800 PMD_DRV_LOG(ERR, "Unknown link speed"); 3801 link.link_speed = ETH_SPEED_NUM_UNKNOWN; 3802 break; 3803 default: 3804 PMD_DRV_LOG(ERR, "None link speed"); 3805 link.link_speed = ETH_SPEED_NUM_NONE; 3806 break; 3807 } 3808 3809 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 3810 ETH_LINK_SPEED_FIXED); 3811 3812 out: 3813 ice_atomic_write_link_status(dev, &link); 3814 if (link.link_status == old.link_status) 3815 return -1; 3816 3817 return 0; 3818 } 3819 3820 /* Force the physical link state by getting the current PHY capabilities from 3821 * hardware and setting the PHY config based on the determined capabilities. If 3822 * link changes, link event will be triggered because both the Enable Automatic 3823 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 3824 */ 3825 static enum ice_status 3826 ice_force_phys_link_state(struct ice_hw *hw, bool link_up) 3827 { 3828 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3829 struct ice_aqc_get_phy_caps_data *pcaps; 3830 struct ice_port_info *pi; 3831 enum ice_status status; 3832 3833 if (!hw || !hw->port_info) 3834 return ICE_ERR_PARAM; 3835 3836 pi = hw->port_info; 3837 3838 pcaps = (struct ice_aqc_get_phy_caps_data *) 3839 ice_malloc(hw, sizeof(*pcaps)); 3840 if (!pcaps) 3841 return ICE_ERR_NO_MEMORY; 3842 3843 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 3844 NULL); 3845 if (status) 3846 goto out; 3847 3848 /* No change in link */ 3849 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 3850 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 3851 goto out; 3852 3853 cfg.phy_type_low = pcaps->phy_type_low; 3854 cfg.phy_type_high = pcaps->phy_type_high; 3855 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3856 cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an; 3857 cfg.eee_cap = pcaps->eee_cap; 3858 cfg.eeer_value = pcaps->eeer_value; 3859 cfg.link_fec_opt = pcaps->link_fec_options; 3860 if (link_up) 3861 cfg.caps |= ICE_AQ_PHY_ENA_LINK; 3862 else 3863 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK; 3864 3865 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3866 3867 out: 3868 ice_free(hw, pcaps); 3869 return status; 3870 } 3871 3872 static int 3873 ice_dev_set_link_up(struct rte_eth_dev *dev) 3874 { 3875 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3876 3877 return ice_force_phys_link_state(hw, true); 3878 } 3879 3880 static int 3881 ice_dev_set_link_down(struct rte_eth_dev *dev) 3882 { 3883 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3884 3885 return ice_force_phys_link_state(hw, false); 3886 } 3887 3888 static int 3889 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 3890 { 3891 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3892 struct rte_eth_dev_data *dev_data = pf->dev_data; 3893 uint32_t frame_size = mtu + ICE_ETH_OVERHEAD; 3894 3895 /* check if mtu is within the allowed range */ 3896 if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX) 3897 return -EINVAL; 3898 3899 /* mtu setting is forbidden if port is start */ 3900 if (dev_data->dev_started) { 3901 PMD_DRV_LOG(ERR, 3902 "port %d must be stopped before configuration", 3903 dev_data->port_id); 3904 return -EBUSY; 3905 } 3906 3907 if (frame_size > RTE_ETHER_MAX_LEN) 3908 dev_data->dev_conf.rxmode.offloads |= 3909 DEV_RX_OFFLOAD_JUMBO_FRAME; 3910 else 3911 dev_data->dev_conf.rxmode.offloads &= 3912 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 3913 3914 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 3915 3916 return 0; 3917 } 3918 3919 static int ice_macaddr_set(struct rte_eth_dev *dev, 3920 struct rte_ether_addr *mac_addr) 3921 { 3922 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3923 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3924 struct ice_vsi *vsi = pf->main_vsi; 3925 struct ice_mac_filter *f; 3926 uint8_t flags = 0; 3927 int ret; 3928 3929 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 3930 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); 3931 return -EINVAL; 3932 } 3933 3934 TAILQ_FOREACH(f, &vsi->mac_list, next) { 3935 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr)) 3936 break; 3937 } 3938 3939 if (!f) { 3940 PMD_DRV_LOG(ERR, "Failed to find filter for default mac"); 3941 return -EIO; 3942 } 3943 3944 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr); 3945 if (ret != ICE_SUCCESS) { 3946 PMD_DRV_LOG(ERR, "Failed to delete mac filter"); 3947 return -EIO; 3948 } 3949 ret = ice_add_mac_filter(vsi, mac_addr); 3950 if (ret != ICE_SUCCESS) { 3951 PMD_DRV_LOG(ERR, "Failed to add mac filter"); 3952 return -EIO; 3953 } 3954 rte_ether_addr_copy(mac_addr, &pf->dev_addr); 3955 3956 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 3957 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL); 3958 if (ret != ICE_SUCCESS) 3959 PMD_DRV_LOG(ERR, "Failed to set manage mac"); 3960 3961 return 0; 3962 } 3963 3964 /* Add a MAC address, and update filters */ 3965 static int 3966 ice_macaddr_add(struct rte_eth_dev *dev, 3967 struct rte_ether_addr *mac_addr, 3968 __rte_unused uint32_t index, 3969 __rte_unused uint32_t pool) 3970 { 3971 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3972 struct ice_vsi *vsi = pf->main_vsi; 3973 int ret; 3974 3975 ret = ice_add_mac_filter(vsi, mac_addr); 3976 if (ret != ICE_SUCCESS) { 3977 PMD_DRV_LOG(ERR, "Failed to add MAC filter"); 3978 return -EINVAL; 3979 } 3980 3981 return ICE_SUCCESS; 3982 } 3983 3984 /* Remove a MAC address, and update filters */ 3985 static void 3986 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) 3987 { 3988 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3989 struct ice_vsi *vsi = pf->main_vsi; 3990 struct rte_eth_dev_data *data = dev->data; 3991 struct rte_ether_addr *macaddr; 3992 int ret; 3993 3994 macaddr = &data->mac_addrs[index]; 3995 ret = ice_remove_mac_filter(vsi, macaddr); 3996 if (ret) { 3997 PMD_DRV_LOG(ERR, "Failed to remove MAC filter"); 3998 return; 3999 } 4000 } 4001 4002 static int 4003 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 4004 { 4005 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4006 struct ice_vsi *vsi = pf->main_vsi; 4007 int ret; 4008 4009 PMD_INIT_FUNC_TRACE(); 4010 4011 if (on) { 4012 ret = ice_add_vlan_filter(vsi, vlan_id); 4013 if (ret < 0) { 4014 PMD_DRV_LOG(ERR, "Failed to add vlan filter"); 4015 return -EINVAL; 4016 } 4017 } else { 4018 ret = ice_remove_vlan_filter(vsi, vlan_id); 4019 if (ret < 0) { 4020 PMD_DRV_LOG(ERR, "Failed to remove vlan filter"); 4021 return -EINVAL; 4022 } 4023 } 4024 4025 return 0; 4026 } 4027 4028 /* Configure vlan filter on or off */ 4029 static int 4030 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on) 4031 { 4032 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4033 struct ice_vsi_ctx ctxt; 4034 uint8_t sec_flags, sw_flags2; 4035 int ret = 0; 4036 4037 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4038 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; 4039 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 4040 4041 if (on) { 4042 vsi->info.sec_flags |= sec_flags; 4043 vsi->info.sw_flags2 |= sw_flags2; 4044 } else { 4045 vsi->info.sec_flags &= ~sec_flags; 4046 vsi->info.sw_flags2 &= ~sw_flags2; 4047 } 4048 vsi->info.sw_id = hw->port_info->sw_id; 4049 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4050 ctxt.info.valid_sections = 4051 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID | 4052 ICE_AQ_VSI_PROP_SECURITY_VALID); 4053 ctxt.vsi_num = vsi->vsi_id; 4054 4055 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4056 if (ret) { 4057 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning", 4058 on ? "enable" : "disable"); 4059 return -EINVAL; 4060 } else { 4061 vsi->info.valid_sections |= 4062 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID | 4063 ICE_AQ_VSI_PROP_SECURITY_VALID); 4064 } 4065 4066 /* consist with other drivers, allow untagged packet when vlan filter on */ 4067 if (on) 4068 ret = ice_add_vlan_filter(vsi, 0); 4069 else 4070 ret = ice_remove_vlan_filter(vsi, 0); 4071 4072 return 0; 4073 } 4074 4075 static int 4076 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on) 4077 { 4078 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4079 struct ice_vsi_ctx ctxt; 4080 uint8_t vlan_flags; 4081 int ret = 0; 4082 4083 /* Check if it has been already on or off */ 4084 if (vsi->info.valid_sections & 4085 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) { 4086 if (on) { 4087 if ((vsi->info.vlan_flags & 4088 ICE_AQ_VSI_VLAN_EMOD_M) == 4089 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH) 4090 return 0; /* already on */ 4091 } else { 4092 if ((vsi->info.vlan_flags & 4093 ICE_AQ_VSI_VLAN_EMOD_M) == 4094 ICE_AQ_VSI_VLAN_EMOD_NOTHING) 4095 return 0; /* already off */ 4096 } 4097 } 4098 4099 if (on) 4100 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; 4101 else 4102 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; 4103 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M); 4104 vsi->info.vlan_flags |= vlan_flags; 4105 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4106 ctxt.info.valid_sections = 4107 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); 4108 ctxt.vsi_num = vsi->vsi_id; 4109 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4110 if (ret) { 4111 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping", 4112 on ? "enable" : "disable"); 4113 return -EINVAL; 4114 } 4115 4116 vsi->info.valid_sections |= 4117 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); 4118 4119 return ret; 4120 } 4121 4122 static int 4123 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask) 4124 { 4125 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4126 struct ice_vsi *vsi = pf->main_vsi; 4127 struct rte_eth_rxmode *rxmode; 4128 4129 rxmode = &dev->data->dev_conf.rxmode; 4130 if (mask & ETH_VLAN_FILTER_MASK) { 4131 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 4132 ice_vsi_config_vlan_filter(vsi, true); 4133 else 4134 ice_vsi_config_vlan_filter(vsi, false); 4135 } 4136 4137 if (mask & ETH_VLAN_STRIP_MASK) { 4138 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 4139 ice_vsi_config_vlan_stripping(vsi, true); 4140 else 4141 ice_vsi_config_vlan_stripping(vsi, false); 4142 } 4143 4144 if (mask & ETH_VLAN_EXTEND_MASK) { 4145 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 4146 ice_vsi_config_double_vlan(vsi, true); 4147 else 4148 ice_vsi_config_double_vlan(vsi, false); 4149 } 4150 4151 return 0; 4152 } 4153 4154 static int 4155 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size) 4156 { 4157 struct ice_aq_get_set_rss_lut_params lut_params; 4158 struct ice_pf *pf = ICE_VSI_TO_PF(vsi); 4159 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4160 int ret; 4161 4162 if (!lut) 4163 return -EINVAL; 4164 4165 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) { 4166 lut_params.vsi_handle = vsi->idx; 4167 lut_params.lut_size = lut_size; 4168 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 4169 lut_params.lut = lut; 4170 lut_params.global_lut_id = 0; 4171 ret = ice_aq_get_rss_lut(hw, &lut_params); 4172 if (ret) { 4173 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); 4174 return -EINVAL; 4175 } 4176 } else { 4177 uint64_t *lut_dw = (uint64_t *)lut; 4178 uint16_t i, lut_size_dw = lut_size / 4; 4179 4180 for (i = 0; i < lut_size_dw; i++) 4181 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i)); 4182 } 4183 4184 return 0; 4185 } 4186 4187 static int 4188 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size) 4189 { 4190 struct ice_aq_get_set_rss_lut_params lut_params; 4191 struct ice_pf *pf; 4192 struct ice_hw *hw; 4193 int ret; 4194 4195 if (!vsi || !lut) 4196 return -EINVAL; 4197 4198 pf = ICE_VSI_TO_PF(vsi); 4199 hw = ICE_VSI_TO_HW(vsi); 4200 4201 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) { 4202 lut_params.vsi_handle = vsi->idx; 4203 lut_params.lut_size = lut_size; 4204 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 4205 lut_params.lut = lut; 4206 lut_params.global_lut_id = 0; 4207 ret = ice_aq_set_rss_lut(hw, &lut_params); 4208 if (ret) { 4209 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); 4210 return -EINVAL; 4211 } 4212 } else { 4213 uint64_t *lut_dw = (uint64_t *)lut; 4214 uint16_t i, lut_size_dw = lut_size / 4; 4215 4216 for (i = 0; i < lut_size_dw; i++) 4217 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]); 4218 4219 ice_flush(hw); 4220 } 4221 4222 return 0; 4223 } 4224 4225 static int 4226 ice_rss_reta_update(struct rte_eth_dev *dev, 4227 struct rte_eth_rss_reta_entry64 *reta_conf, 4228 uint16_t reta_size) 4229 { 4230 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4231 uint16_t i, lut_size = pf->hash_lut_size; 4232 uint16_t idx, shift; 4233 uint8_t *lut; 4234 int ret; 4235 4236 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 && 4237 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 && 4238 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) { 4239 PMD_DRV_LOG(ERR, 4240 "The size of hash lookup table configured (%d)" 4241 "doesn't match the number hardware can " 4242 "supported (128, 512, 2048)", 4243 reta_size); 4244 return -EINVAL; 4245 } 4246 4247 /* It MUST use the current LUT size to get the RSS lookup table, 4248 * otherwise if will fail with -100 error code. 4249 */ 4250 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0); 4251 if (!lut) { 4252 PMD_DRV_LOG(ERR, "No memory can be allocated"); 4253 return -ENOMEM; 4254 } 4255 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size); 4256 if (ret) 4257 goto out; 4258 4259 for (i = 0; i < reta_size; i++) { 4260 idx = i / RTE_RETA_GROUP_SIZE; 4261 shift = i % RTE_RETA_GROUP_SIZE; 4262 if (reta_conf[idx].mask & (1ULL << shift)) 4263 lut[i] = reta_conf[idx].reta[shift]; 4264 } 4265 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size); 4266 if (ret == 0 && lut_size != reta_size) { 4267 PMD_DRV_LOG(INFO, 4268 "The size of hash lookup table is changed from (%d) to (%d)", 4269 lut_size, reta_size); 4270 pf->hash_lut_size = reta_size; 4271 } 4272 4273 out: 4274 rte_free(lut); 4275 4276 return ret; 4277 } 4278 4279 static int 4280 ice_rss_reta_query(struct rte_eth_dev *dev, 4281 struct rte_eth_rss_reta_entry64 *reta_conf, 4282 uint16_t reta_size) 4283 { 4284 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4285 uint16_t i, lut_size = pf->hash_lut_size; 4286 uint16_t idx, shift; 4287 uint8_t *lut; 4288 int ret; 4289 4290 if (reta_size != lut_size) { 4291 PMD_DRV_LOG(ERR, 4292 "The size of hash lookup table configured (%d)" 4293 "doesn't match the number hardware can " 4294 "supported (%d)", 4295 reta_size, lut_size); 4296 return -EINVAL; 4297 } 4298 4299 lut = rte_zmalloc(NULL, reta_size, 0); 4300 if (!lut) { 4301 PMD_DRV_LOG(ERR, "No memory can be allocated"); 4302 return -ENOMEM; 4303 } 4304 4305 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size); 4306 if (ret) 4307 goto out; 4308 4309 for (i = 0; i < reta_size; i++) { 4310 idx = i / RTE_RETA_GROUP_SIZE; 4311 shift = i % RTE_RETA_GROUP_SIZE; 4312 if (reta_conf[idx].mask & (1ULL << shift)) 4313 reta_conf[idx].reta[shift] = lut[i]; 4314 } 4315 4316 out: 4317 rte_free(lut); 4318 4319 return ret; 4320 } 4321 4322 static int 4323 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len) 4324 { 4325 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4326 int ret = 0; 4327 4328 if (!key || key_len == 0) { 4329 PMD_DRV_LOG(DEBUG, "No key to be configured"); 4330 return 0; 4331 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) * 4332 sizeof(uint32_t)) { 4333 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); 4334 return -EINVAL; 4335 } 4336 4337 struct ice_aqc_get_set_rss_keys *key_dw = 4338 (struct ice_aqc_get_set_rss_keys *)key; 4339 4340 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw); 4341 if (ret) { 4342 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ"); 4343 ret = -EINVAL; 4344 } 4345 4346 return ret; 4347 } 4348 4349 static int 4350 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len) 4351 { 4352 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4353 int ret; 4354 4355 if (!key || !key_len) 4356 return -EINVAL; 4357 4358 ret = ice_aq_get_rss_key 4359 (hw, vsi->idx, 4360 (struct ice_aqc_get_set_rss_keys *)key); 4361 if (ret) { 4362 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ"); 4363 return -EINVAL; 4364 } 4365 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); 4366 4367 return 0; 4368 } 4369 4370 static int 4371 ice_rss_hash_update(struct rte_eth_dev *dev, 4372 struct rte_eth_rss_conf *rss_conf) 4373 { 4374 enum ice_status status = ICE_SUCCESS; 4375 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4376 struct ice_vsi *vsi = pf->main_vsi; 4377 4378 /* set hash key */ 4379 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len); 4380 if (status) 4381 return status; 4382 4383 if (rss_conf->rss_hf == 0) 4384 return 0; 4385 4386 /* RSS hash configuration */ 4387 ice_rss_hash_set(pf, rss_conf->rss_hf); 4388 4389 return 0; 4390 } 4391 4392 static int 4393 ice_rss_hash_conf_get(struct rte_eth_dev *dev, 4394 struct rte_eth_rss_conf *rss_conf) 4395 { 4396 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4397 struct ice_vsi *vsi = pf->main_vsi; 4398 4399 ice_get_rss_key(vsi, rss_conf->rss_key, 4400 &rss_conf->rss_key_len); 4401 4402 rss_conf->rss_hf = pf->rss_hf; 4403 return 0; 4404 } 4405 4406 static int 4407 ice_promisc_enable(struct rte_eth_dev *dev) 4408 { 4409 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4410 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4411 struct ice_vsi *vsi = pf->main_vsi; 4412 enum ice_status status; 4413 uint8_t pmask; 4414 int ret = 0; 4415 4416 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX | 4417 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4418 4419 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0); 4420 switch (status) { 4421 case ICE_ERR_ALREADY_EXISTS: 4422 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled"); 4423 case ICE_SUCCESS: 4424 break; 4425 default: 4426 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status); 4427 ret = -EAGAIN; 4428 } 4429 4430 return ret; 4431 } 4432 4433 static int 4434 ice_promisc_disable(struct rte_eth_dev *dev) 4435 { 4436 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4437 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4438 struct ice_vsi *vsi = pf->main_vsi; 4439 enum ice_status status; 4440 uint8_t pmask; 4441 int ret = 0; 4442 4443 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX | 4444 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4445 4446 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0); 4447 if (status != ICE_SUCCESS) { 4448 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status); 4449 ret = -EAGAIN; 4450 } 4451 4452 return ret; 4453 } 4454 4455 static int 4456 ice_allmulti_enable(struct rte_eth_dev *dev) 4457 { 4458 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4459 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4460 struct ice_vsi *vsi = pf->main_vsi; 4461 enum ice_status status; 4462 uint8_t pmask; 4463 int ret = 0; 4464 4465 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4466 4467 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0); 4468 4469 switch (status) { 4470 case ICE_ERR_ALREADY_EXISTS: 4471 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled"); 4472 case ICE_SUCCESS: 4473 break; 4474 default: 4475 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status); 4476 ret = -EAGAIN; 4477 } 4478 4479 return ret; 4480 } 4481 4482 static int 4483 ice_allmulti_disable(struct rte_eth_dev *dev) 4484 { 4485 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4486 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4487 struct ice_vsi *vsi = pf->main_vsi; 4488 enum ice_status status; 4489 uint8_t pmask; 4490 int ret = 0; 4491 4492 if (dev->data->promiscuous == 1) 4493 return 0; /* must remain in all_multicast mode */ 4494 4495 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4496 4497 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0); 4498 if (status != ICE_SUCCESS) { 4499 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status); 4500 ret = -EAGAIN; 4501 } 4502 4503 return ret; 4504 } 4505 4506 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev, 4507 uint16_t queue_id) 4508 { 4509 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 4510 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4511 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4512 uint32_t val; 4513 uint16_t msix_intr; 4514 4515 msix_intr = intr_handle->intr_vec[queue_id]; 4516 4517 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 4518 GLINT_DYN_CTL_ITR_INDX_M; 4519 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M; 4520 4521 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val); 4522 rte_intr_ack(&pci_dev->intr_handle); 4523 4524 return 0; 4525 } 4526 4527 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev, 4528 uint16_t queue_id) 4529 { 4530 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 4531 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4532 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4533 uint16_t msix_intr; 4534 4535 msix_intr = intr_handle->intr_vec[queue_id]; 4536 4537 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M); 4538 4539 return 0; 4540 } 4541 4542 static int 4543 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 4544 { 4545 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4546 u8 ver, patch; 4547 u16 build; 4548 int ret; 4549 4550 ver = hw->flash.orom.major; 4551 patch = hw->flash.orom.patch; 4552 build = hw->flash.orom.build; 4553 4554 ret = snprintf(fw_version, fw_size, 4555 "%x.%02x 0x%08x %d.%d.%d", 4556 hw->flash.nvm.major, 4557 hw->flash.nvm.minor, 4558 hw->flash.nvm.eetrack, 4559 ver, build, patch); 4560 4561 /* add the size of '\0' */ 4562 ret += 1; 4563 if (fw_size < (u32)ret) 4564 return ret; 4565 else 4566 return 0; 4567 } 4568 4569 static int 4570 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info) 4571 { 4572 struct ice_hw *hw; 4573 struct ice_vsi_ctx ctxt; 4574 uint8_t vlan_flags = 0; 4575 int ret; 4576 4577 if (!vsi || !info) { 4578 PMD_DRV_LOG(ERR, "invalid parameters"); 4579 return -EINVAL; 4580 } 4581 4582 if (info->on) { 4583 vsi->info.pvid = info->config.pvid; 4584 /** 4585 * If insert pvid is enabled, only tagged pkts are 4586 * allowed to be sent out. 4587 */ 4588 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID | 4589 ICE_AQ_VSI_VLAN_MODE_UNTAGGED; 4590 } else { 4591 vsi->info.pvid = 0; 4592 if (info->config.reject.tagged == 0) 4593 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED; 4594 4595 if (info->config.reject.untagged == 0) 4596 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED; 4597 } 4598 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID | 4599 ICE_AQ_VSI_VLAN_MODE_M); 4600 vsi->info.vlan_flags |= vlan_flags; 4601 memset(&ctxt, 0, sizeof(ctxt)); 4602 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4603 ctxt.info.valid_sections = 4604 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); 4605 ctxt.vsi_num = vsi->vsi_id; 4606 4607 hw = ICE_VSI_TO_HW(vsi); 4608 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4609 if (ret != ICE_SUCCESS) { 4610 PMD_DRV_LOG(ERR, 4611 "update VSI for VLAN insert failed, err %d", 4612 ret); 4613 return -EINVAL; 4614 } 4615 4616 vsi->info.valid_sections |= 4617 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); 4618 4619 return ret; 4620 } 4621 4622 static int 4623 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 4624 { 4625 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4626 struct ice_vsi *vsi = pf->main_vsi; 4627 struct rte_eth_dev_data *data = pf->dev_data; 4628 struct ice_vsi_vlan_pvid_info info; 4629 int ret; 4630 4631 memset(&info, 0, sizeof(info)); 4632 info.on = on; 4633 if (info.on) { 4634 info.config.pvid = pvid; 4635 } else { 4636 info.config.reject.tagged = 4637 data->dev_conf.txmode.hw_vlan_reject_tagged; 4638 info.config.reject.untagged = 4639 data->dev_conf.txmode.hw_vlan_reject_untagged; 4640 } 4641 4642 ret = ice_vsi_vlan_pvid_set(vsi, &info); 4643 if (ret < 0) { 4644 PMD_DRV_LOG(ERR, "Failed to set pvid."); 4645 return -EINVAL; 4646 } 4647 4648 return 0; 4649 } 4650 4651 static int 4652 ice_get_eeprom_length(struct rte_eth_dev *dev) 4653 { 4654 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4655 4656 return hw->flash.flash_size; 4657 } 4658 4659 static int 4660 ice_get_eeprom(struct rte_eth_dev *dev, 4661 struct rte_dev_eeprom_info *eeprom) 4662 { 4663 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4664 enum ice_status status = ICE_SUCCESS; 4665 uint8_t *data = eeprom->data; 4666 4667 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 4668 4669 status = ice_acquire_nvm(hw, ICE_RES_READ); 4670 if (status) { 4671 PMD_DRV_LOG(ERR, "acquire nvm failed."); 4672 return -EIO; 4673 } 4674 4675 status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length, 4676 data, false); 4677 4678 ice_release_nvm(hw); 4679 4680 if (status) { 4681 PMD_DRV_LOG(ERR, "EEPROM read failed."); 4682 return -EIO; 4683 } 4684 4685 return 0; 4686 } 4687 4688 static void 4689 ice_stat_update_32(struct ice_hw *hw, 4690 uint32_t reg, 4691 bool offset_loaded, 4692 uint64_t *offset, 4693 uint64_t *stat) 4694 { 4695 uint64_t new_data; 4696 4697 new_data = (uint64_t)ICE_READ_REG(hw, reg); 4698 if (!offset_loaded) 4699 *offset = new_data; 4700 4701 if (new_data >= *offset) 4702 *stat = (uint64_t)(new_data - *offset); 4703 else 4704 *stat = (uint64_t)((new_data + 4705 ((uint64_t)1 << ICE_32_BIT_WIDTH)) 4706 - *offset); 4707 } 4708 4709 static void 4710 ice_stat_update_40(struct ice_hw *hw, 4711 uint32_t hireg, 4712 uint32_t loreg, 4713 bool offset_loaded, 4714 uint64_t *offset, 4715 uint64_t *stat) 4716 { 4717 uint64_t new_data; 4718 4719 new_data = (uint64_t)ICE_READ_REG(hw, loreg); 4720 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) << 4721 ICE_32_BIT_WIDTH; 4722 4723 if (!offset_loaded) 4724 *offset = new_data; 4725 4726 if (new_data >= *offset) 4727 *stat = new_data - *offset; 4728 else 4729 *stat = (uint64_t)((new_data + 4730 ((uint64_t)1 << ICE_40_BIT_WIDTH)) - 4731 *offset); 4732 4733 *stat &= ICE_40_BIT_MASK; 4734 } 4735 4736 /* Get all the statistics of a VSI */ 4737 static void 4738 ice_update_vsi_stats(struct ice_vsi *vsi) 4739 { 4740 struct ice_eth_stats *oes = &vsi->eth_stats_offset; 4741 struct ice_eth_stats *nes = &vsi->eth_stats; 4742 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4743 int idx = rte_le_to_cpu_16(vsi->vsi_id); 4744 4745 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx), 4746 vsi->offset_loaded, &oes->rx_bytes, 4747 &nes->rx_bytes); 4748 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx), 4749 vsi->offset_loaded, &oes->rx_unicast, 4750 &nes->rx_unicast); 4751 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx), 4752 vsi->offset_loaded, &oes->rx_multicast, 4753 &nes->rx_multicast); 4754 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx), 4755 vsi->offset_loaded, &oes->rx_broadcast, 4756 &nes->rx_broadcast); 4757 /* enlarge the limitation when rx_bytes overflowed */ 4758 if (vsi->offset_loaded) { 4759 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes) 4760 nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 4761 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes); 4762 } 4763 vsi->old_rx_bytes = nes->rx_bytes; 4764 /* exclude CRC bytes */ 4765 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast + 4766 nes->rx_broadcast) * RTE_ETHER_CRC_LEN; 4767 4768 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded, 4769 &oes->rx_discards, &nes->rx_discards); 4770 /* GLV_REPC not supported */ 4771 /* GLV_RMPC not supported */ 4772 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded, 4773 &oes->rx_unknown_protocol, 4774 &nes->rx_unknown_protocol); 4775 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx), 4776 vsi->offset_loaded, &oes->tx_bytes, 4777 &nes->tx_bytes); 4778 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx), 4779 vsi->offset_loaded, &oes->tx_unicast, 4780 &nes->tx_unicast); 4781 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx), 4782 vsi->offset_loaded, &oes->tx_multicast, 4783 &nes->tx_multicast); 4784 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx), 4785 vsi->offset_loaded, &oes->tx_broadcast, 4786 &nes->tx_broadcast); 4787 /* GLV_TDPC not supported */ 4788 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded, 4789 &oes->tx_errors, &nes->tx_errors); 4790 /* enlarge the limitation when tx_bytes overflowed */ 4791 if (vsi->offset_loaded) { 4792 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes) 4793 nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 4794 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes); 4795 } 4796 vsi->old_tx_bytes = nes->tx_bytes; 4797 vsi->offset_loaded = true; 4798 4799 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************", 4800 vsi->vsi_id); 4801 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes); 4802 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast); 4803 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast); 4804 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast); 4805 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards); 4806 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", 4807 nes->rx_unknown_protocol); 4808 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes); 4809 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast); 4810 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast); 4811 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast); 4812 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards); 4813 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors); 4814 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************", 4815 vsi->vsi_id); 4816 } 4817 4818 static void 4819 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw) 4820 { 4821 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */ 4822 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */ 4823 4824 /* Get statistics of struct ice_eth_stats */ 4825 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport), 4826 GLPRT_GORCL(hw->port_info->lport), 4827 pf->offset_loaded, &os->eth.rx_bytes, 4828 &ns->eth.rx_bytes); 4829 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport), 4830 GLPRT_UPRCL(hw->port_info->lport), 4831 pf->offset_loaded, &os->eth.rx_unicast, 4832 &ns->eth.rx_unicast); 4833 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport), 4834 GLPRT_MPRCL(hw->port_info->lport), 4835 pf->offset_loaded, &os->eth.rx_multicast, 4836 &ns->eth.rx_multicast); 4837 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport), 4838 GLPRT_BPRCL(hw->port_info->lport), 4839 pf->offset_loaded, &os->eth.rx_broadcast, 4840 &ns->eth.rx_broadcast); 4841 ice_stat_update_32(hw, PRTRPB_RDPC, 4842 pf->offset_loaded, &os->eth.rx_discards, 4843 &ns->eth.rx_discards); 4844 /* enlarge the limitation when rx_bytes overflowed */ 4845 if (pf->offset_loaded) { 4846 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes) 4847 ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 4848 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes); 4849 } 4850 pf->old_rx_bytes = ns->eth.rx_bytes; 4851 4852 /* Workaround: CRC size should not be included in byte statistics, 4853 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx 4854 * packet. 4855 */ 4856 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + 4857 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN; 4858 4859 /* GLPRT_REPC not supported */ 4860 /* GLPRT_RMPC not supported */ 4861 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport), 4862 pf->offset_loaded, 4863 &os->eth.rx_unknown_protocol, 4864 &ns->eth.rx_unknown_protocol); 4865 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport), 4866 GLPRT_GOTCL(hw->port_info->lport), 4867 pf->offset_loaded, &os->eth.tx_bytes, 4868 &ns->eth.tx_bytes); 4869 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport), 4870 GLPRT_UPTCL(hw->port_info->lport), 4871 pf->offset_loaded, &os->eth.tx_unicast, 4872 &ns->eth.tx_unicast); 4873 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport), 4874 GLPRT_MPTCL(hw->port_info->lport), 4875 pf->offset_loaded, &os->eth.tx_multicast, 4876 &ns->eth.tx_multicast); 4877 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport), 4878 GLPRT_BPTCL(hw->port_info->lport), 4879 pf->offset_loaded, &os->eth.tx_broadcast, 4880 &ns->eth.tx_broadcast); 4881 /* enlarge the limitation when tx_bytes overflowed */ 4882 if (pf->offset_loaded) { 4883 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes) 4884 ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 4885 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes); 4886 } 4887 pf->old_tx_bytes = ns->eth.tx_bytes; 4888 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + 4889 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN; 4890 4891 /* GLPRT_TEPC not supported */ 4892 4893 /* additional port specific stats */ 4894 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport), 4895 pf->offset_loaded, &os->tx_dropped_link_down, 4896 &ns->tx_dropped_link_down); 4897 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport), 4898 pf->offset_loaded, &os->crc_errors, 4899 &ns->crc_errors); 4900 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport), 4901 pf->offset_loaded, &os->illegal_bytes, 4902 &ns->illegal_bytes); 4903 /* GLPRT_ERRBC not supported */ 4904 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport), 4905 pf->offset_loaded, &os->mac_local_faults, 4906 &ns->mac_local_faults); 4907 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport), 4908 pf->offset_loaded, &os->mac_remote_faults, 4909 &ns->mac_remote_faults); 4910 4911 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport), 4912 pf->offset_loaded, &os->rx_len_errors, 4913 &ns->rx_len_errors); 4914 4915 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport), 4916 pf->offset_loaded, &os->link_xon_rx, 4917 &ns->link_xon_rx); 4918 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport), 4919 pf->offset_loaded, &os->link_xoff_rx, 4920 &ns->link_xoff_rx); 4921 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport), 4922 pf->offset_loaded, &os->link_xon_tx, 4923 &ns->link_xon_tx); 4924 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport), 4925 pf->offset_loaded, &os->link_xoff_tx, 4926 &ns->link_xoff_tx); 4927 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport), 4928 GLPRT_PRC64L(hw->port_info->lport), 4929 pf->offset_loaded, &os->rx_size_64, 4930 &ns->rx_size_64); 4931 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport), 4932 GLPRT_PRC127L(hw->port_info->lport), 4933 pf->offset_loaded, &os->rx_size_127, 4934 &ns->rx_size_127); 4935 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport), 4936 GLPRT_PRC255L(hw->port_info->lport), 4937 pf->offset_loaded, &os->rx_size_255, 4938 &ns->rx_size_255); 4939 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport), 4940 GLPRT_PRC511L(hw->port_info->lport), 4941 pf->offset_loaded, &os->rx_size_511, 4942 &ns->rx_size_511); 4943 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport), 4944 GLPRT_PRC1023L(hw->port_info->lport), 4945 pf->offset_loaded, &os->rx_size_1023, 4946 &ns->rx_size_1023); 4947 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport), 4948 GLPRT_PRC1522L(hw->port_info->lport), 4949 pf->offset_loaded, &os->rx_size_1522, 4950 &ns->rx_size_1522); 4951 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport), 4952 GLPRT_PRC9522L(hw->port_info->lport), 4953 pf->offset_loaded, &os->rx_size_big, 4954 &ns->rx_size_big); 4955 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport), 4956 pf->offset_loaded, &os->rx_undersize, 4957 &ns->rx_undersize); 4958 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport), 4959 pf->offset_loaded, &os->rx_fragments, 4960 &ns->rx_fragments); 4961 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport), 4962 pf->offset_loaded, &os->rx_oversize, 4963 &ns->rx_oversize); 4964 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport), 4965 pf->offset_loaded, &os->rx_jabber, 4966 &ns->rx_jabber); 4967 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport), 4968 GLPRT_PTC64L(hw->port_info->lport), 4969 pf->offset_loaded, &os->tx_size_64, 4970 &ns->tx_size_64); 4971 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport), 4972 GLPRT_PTC127L(hw->port_info->lport), 4973 pf->offset_loaded, &os->tx_size_127, 4974 &ns->tx_size_127); 4975 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport), 4976 GLPRT_PTC255L(hw->port_info->lport), 4977 pf->offset_loaded, &os->tx_size_255, 4978 &ns->tx_size_255); 4979 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport), 4980 GLPRT_PTC511L(hw->port_info->lport), 4981 pf->offset_loaded, &os->tx_size_511, 4982 &ns->tx_size_511); 4983 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport), 4984 GLPRT_PTC1023L(hw->port_info->lport), 4985 pf->offset_loaded, &os->tx_size_1023, 4986 &ns->tx_size_1023); 4987 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport), 4988 GLPRT_PTC1522L(hw->port_info->lport), 4989 pf->offset_loaded, &os->tx_size_1522, 4990 &ns->tx_size_1522); 4991 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport), 4992 GLPRT_PTC9522L(hw->port_info->lport), 4993 pf->offset_loaded, &os->tx_size_big, 4994 &ns->tx_size_big); 4995 4996 /* GLPRT_MSPDC not supported */ 4997 /* GLPRT_XEC not supported */ 4998 4999 pf->offset_loaded = true; 5000 5001 if (pf->main_vsi) 5002 ice_update_vsi_stats(pf->main_vsi); 5003 } 5004 5005 /* Get all statistics of a port */ 5006 static int 5007 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 5008 { 5009 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5010 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5011 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */ 5012 5013 /* call read registers - updates values, now write them to struct */ 5014 ice_read_stats_registers(pf, hw); 5015 5016 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + 5017 pf->main_vsi->eth_stats.rx_multicast + 5018 pf->main_vsi->eth_stats.rx_broadcast - 5019 pf->main_vsi->eth_stats.rx_discards; 5020 stats->opackets = ns->eth.tx_unicast + 5021 ns->eth.tx_multicast + 5022 ns->eth.tx_broadcast; 5023 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes; 5024 stats->obytes = ns->eth.tx_bytes; 5025 stats->oerrors = ns->eth.tx_errors + 5026 pf->main_vsi->eth_stats.tx_errors; 5027 5028 /* Rx Errors */ 5029 stats->imissed = ns->eth.rx_discards + 5030 pf->main_vsi->eth_stats.rx_discards; 5031 stats->ierrors = ns->crc_errors + 5032 ns->rx_undersize + 5033 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; 5034 5035 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************"); 5036 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes); 5037 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast); 5038 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast); 5039 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast); 5040 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards); 5041 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"", 5042 pf->main_vsi->eth_stats.rx_discards); 5043 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", 5044 ns->eth.rx_unknown_protocol); 5045 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes); 5046 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast); 5047 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast); 5048 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast); 5049 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards); 5050 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"", 5051 pf->main_vsi->eth_stats.tx_discards); 5052 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors); 5053 5054 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"", 5055 ns->tx_dropped_link_down); 5056 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors); 5057 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"", 5058 ns->illegal_bytes); 5059 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes); 5060 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"", 5061 ns->mac_local_faults); 5062 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"", 5063 ns->mac_remote_faults); 5064 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx); 5065 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx); 5066 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx); 5067 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx); 5068 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64); 5069 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127); 5070 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255); 5071 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511); 5072 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023); 5073 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522); 5074 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big); 5075 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize); 5076 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments); 5077 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize); 5078 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber); 5079 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64); 5080 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127); 5081 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255); 5082 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511); 5083 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023); 5084 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522); 5085 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big); 5086 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors); 5087 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************"); 5088 return 0; 5089 } 5090 5091 /* Reset the statistics */ 5092 static int 5093 ice_stats_reset(struct rte_eth_dev *dev) 5094 { 5095 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5096 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5097 5098 /* Mark PF and VSI stats to update the offset, aka "reset" */ 5099 pf->offset_loaded = false; 5100 if (pf->main_vsi) 5101 pf->main_vsi->offset_loaded = false; 5102 5103 /* read the stats, reading current register values into offset */ 5104 ice_read_stats_registers(pf, hw); 5105 5106 return 0; 5107 } 5108 5109 static uint32_t 5110 ice_xstats_calc_num(void) 5111 { 5112 uint32_t num; 5113 5114 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS; 5115 5116 return num; 5117 } 5118 5119 static int 5120 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 5121 unsigned int n) 5122 { 5123 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5124 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5125 unsigned int i; 5126 unsigned int count; 5127 struct ice_hw_port_stats *hw_stats = &pf->stats; 5128 5129 count = ice_xstats_calc_num(); 5130 if (n < count) 5131 return count; 5132 5133 ice_read_stats_registers(pf, hw); 5134 5135 if (!xstats) 5136 return 0; 5137 5138 count = 0; 5139 5140 /* Get stats from ice_eth_stats struct */ 5141 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) { 5142 xstats[count].value = 5143 *(uint64_t *)((char *)&hw_stats->eth + 5144 ice_stats_strings[i].offset); 5145 xstats[count].id = count; 5146 count++; 5147 } 5148 5149 /* Get individiual stats from ice_hw_port struct */ 5150 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { 5151 xstats[count].value = 5152 *(uint64_t *)((char *)hw_stats + 5153 ice_hw_port_strings[i].offset); 5154 xstats[count].id = count; 5155 count++; 5156 } 5157 5158 return count; 5159 } 5160 5161 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 5162 struct rte_eth_xstat_name *xstats_names, 5163 __rte_unused unsigned int limit) 5164 { 5165 unsigned int count = 0; 5166 unsigned int i; 5167 5168 if (!xstats_names) 5169 return ice_xstats_calc_num(); 5170 5171 /* Note: limit checked in rte_eth_xstats_names() */ 5172 5173 /* Get stats from ice_eth_stats struct */ 5174 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) { 5175 strlcpy(xstats_names[count].name, ice_stats_strings[i].name, 5176 sizeof(xstats_names[count].name)); 5177 count++; 5178 } 5179 5180 /* Get individiual stats from ice_hw_port struct */ 5181 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { 5182 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name, 5183 sizeof(xstats_names[count].name)); 5184 count++; 5185 } 5186 5187 return count; 5188 } 5189 5190 static int 5191 ice_dev_filter_ctrl(struct rte_eth_dev *dev, 5192 enum rte_filter_type filter_type, 5193 enum rte_filter_op filter_op, 5194 void *arg) 5195 { 5196 int ret = 0; 5197 5198 if (!dev) 5199 return -EINVAL; 5200 5201 switch (filter_type) { 5202 case RTE_ETH_FILTER_GENERIC: 5203 if (filter_op != RTE_ETH_FILTER_GET) 5204 return -EINVAL; 5205 *(const void **)arg = &ice_flow_ops; 5206 break; 5207 default: 5208 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", 5209 filter_type); 5210 ret = -EINVAL; 5211 break; 5212 } 5213 5214 return ret; 5215 } 5216 5217 /* Add UDP tunneling port */ 5218 static int 5219 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 5220 struct rte_eth_udp_tunnel *udp_tunnel) 5221 { 5222 int ret = 0; 5223 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5224 5225 if (udp_tunnel == NULL) 5226 return -EINVAL; 5227 5228 switch (udp_tunnel->prot_type) { 5229 case RTE_TUNNEL_TYPE_VXLAN: 5230 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port); 5231 break; 5232 default: 5233 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 5234 ret = -EINVAL; 5235 break; 5236 } 5237 5238 return ret; 5239 } 5240 5241 /* Delete UDP tunneling port */ 5242 static int 5243 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 5244 struct rte_eth_udp_tunnel *udp_tunnel) 5245 { 5246 int ret = 0; 5247 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5248 5249 if (udp_tunnel == NULL) 5250 return -EINVAL; 5251 5252 switch (udp_tunnel->prot_type) { 5253 case RTE_TUNNEL_TYPE_VXLAN: 5254 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0); 5255 break; 5256 default: 5257 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 5258 ret = -EINVAL; 5259 break; 5260 } 5261 5262 return ret; 5263 } 5264 5265 static int 5266 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 5267 struct rte_pci_device *pci_dev) 5268 { 5269 return rte_eth_dev_pci_generic_probe(pci_dev, 5270 sizeof(struct ice_adapter), 5271 ice_dev_init); 5272 } 5273 5274 static int 5275 ice_pci_remove(struct rte_pci_device *pci_dev) 5276 { 5277 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit); 5278 } 5279 5280 static struct rte_pci_driver rte_ice_pmd = { 5281 .id_table = pci_id_ice_map, 5282 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 5283 .probe = ice_pci_probe, 5284 .remove = ice_pci_remove, 5285 }; 5286 5287 /** 5288 * Driver initialization routine. 5289 * Invoked once at EAL init time. 5290 * Register itself as the [Poll Mode] Driver of PCI devices. 5291 */ 5292 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd); 5293 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map); 5294 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci"); 5295 RTE_PMD_REGISTER_PARAM_STRING(net_ice, 5296 ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>" 5297 ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>" 5298 ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"); 5299 5300 RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE); 5301 RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE); 5302 #ifdef RTE_LIBRTE_ICE_DEBUG_RX 5303 RTE_LOG_REGISTER(ice_logtype_rx, pmd.net.ice.rx, DEBUG); 5304 #endif 5305 #ifdef RTE_LIBRTE_ICE_DEBUG_TX 5306 RTE_LOG_REGISTER(ice_logtype_tx, pmd.net.ice.tx, DEBUG); 5307 #endif 5308 #ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE 5309 RTE_LOG_REGISTER(ice_logtype_tx_free, pmd.net.ice.tx_free, DEBUG); 5310 #endif 5311