1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation 3 */ 4 5 #include <rte_string_fns.h> 6 #include <ethdev_pci.h> 7 8 #include <stdio.h> 9 #include <sys/types.h> 10 #include <sys/stat.h> 11 #include <unistd.h> 12 13 #include <rte_tailq.h> 14 15 #include "eal_firmware.h" 16 17 #include "base/ice_sched.h" 18 #include "base/ice_flow.h" 19 #include "base/ice_dcb.h" 20 #include "base/ice_common.h" 21 #include "base/ice_ptp_hw.h" 22 23 #include "rte_pmd_ice.h" 24 #include "ice_ethdev.h" 25 #include "ice_rxtx.h" 26 #include "ice_generic_flow.h" 27 28 /* devargs */ 29 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support" 30 #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support" 31 #define ICE_PROTO_XTR_ARG "proto_xtr" 32 #define ICE_HW_DEBUG_MASK_ARG "hw_debug_mask" 33 #define ICE_ONE_PPS_OUT_ARG "pps_out" 34 #define ICE_RX_LOW_LATENCY_ARG "rx_low_latency" 35 36 #define ICE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 37 38 uint64_t ice_timestamp_dynflag; 39 int ice_timestamp_dynfield_offset = -1; 40 41 static const char * const ice_valid_args[] = { 42 ICE_SAFE_MODE_SUPPORT_ARG, 43 ICE_PIPELINE_MODE_SUPPORT_ARG, 44 ICE_PROTO_XTR_ARG, 45 ICE_HW_DEBUG_MASK_ARG, 46 ICE_ONE_PPS_OUT_ARG, 47 ICE_RX_LOW_LATENCY_ARG, 48 NULL 49 }; 50 51 #define PPS_OUT_DELAY_NS 1 52 53 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = { 54 .name = "intel_pmd_dynfield_proto_xtr_metadata", 55 .size = sizeof(uint32_t), 56 .align = __alignof__(uint32_t), 57 .flags = 0, 58 }; 59 60 struct proto_xtr_ol_flag { 61 const struct rte_mbuf_dynflag param; 62 uint64_t *ol_flag; 63 bool required; 64 }; 65 66 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX]; 67 68 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = { 69 [PROTO_XTR_VLAN] = { 70 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" }, 71 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask }, 72 [PROTO_XTR_IPV4] = { 73 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" }, 74 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask }, 75 [PROTO_XTR_IPV6] = { 76 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" }, 77 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask }, 78 [PROTO_XTR_IPV6_FLOW] = { 79 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" }, 80 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask }, 81 [PROTO_XTR_TCP] = { 82 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" }, 83 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask }, 84 [PROTO_XTR_IP_OFFSET] = { 85 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" }, 86 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask }, 87 }; 88 89 #define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package" 90 #define ICE_COMMS_PKG_NAME "ICE COMMS Package" 91 #define ICE_MAX_RES_DESC_NUM 1024 92 93 static int ice_dev_configure(struct rte_eth_dev *dev); 94 static int ice_dev_start(struct rte_eth_dev *dev); 95 static int ice_dev_stop(struct rte_eth_dev *dev); 96 static int ice_dev_close(struct rte_eth_dev *dev); 97 static int ice_dev_reset(struct rte_eth_dev *dev); 98 static int ice_dev_info_get(struct rte_eth_dev *dev, 99 struct rte_eth_dev_info *dev_info); 100 static int ice_link_update(struct rte_eth_dev *dev, 101 int wait_to_complete); 102 static int ice_dev_set_link_up(struct rte_eth_dev *dev); 103 static int ice_dev_set_link_down(struct rte_eth_dev *dev); 104 105 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 106 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask); 107 static int ice_rss_reta_update(struct rte_eth_dev *dev, 108 struct rte_eth_rss_reta_entry64 *reta_conf, 109 uint16_t reta_size); 110 static int ice_rss_reta_query(struct rte_eth_dev *dev, 111 struct rte_eth_rss_reta_entry64 *reta_conf, 112 uint16_t reta_size); 113 static int ice_rss_hash_update(struct rte_eth_dev *dev, 114 struct rte_eth_rss_conf *rss_conf); 115 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev, 116 struct rte_eth_rss_conf *rss_conf); 117 static int ice_promisc_enable(struct rte_eth_dev *dev); 118 static int ice_promisc_disable(struct rte_eth_dev *dev); 119 static int ice_allmulti_enable(struct rte_eth_dev *dev); 120 static int ice_allmulti_disable(struct rte_eth_dev *dev); 121 static int ice_vlan_filter_set(struct rte_eth_dev *dev, 122 uint16_t vlan_id, 123 int on); 124 static int ice_macaddr_set(struct rte_eth_dev *dev, 125 struct rte_ether_addr *mac_addr); 126 static int ice_macaddr_add(struct rte_eth_dev *dev, 127 struct rte_ether_addr *mac_addr, 128 __rte_unused uint32_t index, 129 uint32_t pool); 130 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); 131 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev, 132 uint16_t queue_id); 133 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev, 134 uint16_t queue_id); 135 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 136 size_t fw_size); 137 static int ice_vlan_pvid_set(struct rte_eth_dev *dev, 138 uint16_t pvid, int on); 139 static int ice_get_eeprom_length(struct rte_eth_dev *dev); 140 static int ice_get_eeprom(struct rte_eth_dev *dev, 141 struct rte_dev_eeprom_info *eeprom); 142 static int ice_stats_get(struct rte_eth_dev *dev, 143 struct rte_eth_stats *stats); 144 static int ice_stats_reset(struct rte_eth_dev *dev); 145 static int ice_xstats_get(struct rte_eth_dev *dev, 146 struct rte_eth_xstat *xstats, unsigned int n); 147 static int ice_xstats_get_names(struct rte_eth_dev *dev, 148 struct rte_eth_xstat_name *xstats_names, 149 unsigned int limit); 150 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev, 151 const struct rte_flow_ops **ops); 152 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 153 struct rte_eth_udp_tunnel *udp_tunnel); 154 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 155 struct rte_eth_udp_tunnel *udp_tunnel); 156 static int ice_timesync_enable(struct rte_eth_dev *dev); 157 static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 158 struct timespec *timestamp, 159 uint32_t flags); 160 static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 161 struct timespec *timestamp); 162 static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 163 static int ice_timesync_read_time(struct rte_eth_dev *dev, 164 struct timespec *timestamp); 165 static int ice_timesync_write_time(struct rte_eth_dev *dev, 166 const struct timespec *timestamp); 167 static int ice_timesync_disable(struct rte_eth_dev *dev); 168 169 static const struct rte_pci_id pci_id_ice_map[] = { 170 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) }, 171 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) }, 172 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) }, 173 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) }, 174 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) }, 175 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) }, 176 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) }, 177 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) }, 178 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) }, 179 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) }, 180 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) }, 181 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) }, 182 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) }, 183 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) }, 184 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) }, 185 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) }, 186 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) }, 187 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) }, 188 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) }, 189 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) }, 190 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) }, 191 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) }, 192 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) }, 193 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) }, 194 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) }, 195 { .vendor_id = 0, /* sentinel */ }, 196 }; 197 198 static const struct eth_dev_ops ice_eth_dev_ops = { 199 .dev_configure = ice_dev_configure, 200 .dev_start = ice_dev_start, 201 .dev_stop = ice_dev_stop, 202 .dev_close = ice_dev_close, 203 .dev_reset = ice_dev_reset, 204 .dev_set_link_up = ice_dev_set_link_up, 205 .dev_set_link_down = ice_dev_set_link_down, 206 .rx_queue_start = ice_rx_queue_start, 207 .rx_queue_stop = ice_rx_queue_stop, 208 .tx_queue_start = ice_tx_queue_start, 209 .tx_queue_stop = ice_tx_queue_stop, 210 .rx_queue_setup = ice_rx_queue_setup, 211 .rx_queue_release = ice_dev_rx_queue_release, 212 .tx_queue_setup = ice_tx_queue_setup, 213 .tx_queue_release = ice_dev_tx_queue_release, 214 .dev_infos_get = ice_dev_info_get, 215 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get, 216 .link_update = ice_link_update, 217 .mtu_set = ice_mtu_set, 218 .mac_addr_set = ice_macaddr_set, 219 .mac_addr_add = ice_macaddr_add, 220 .mac_addr_remove = ice_macaddr_remove, 221 .vlan_filter_set = ice_vlan_filter_set, 222 .vlan_offload_set = ice_vlan_offload_set, 223 .reta_update = ice_rss_reta_update, 224 .reta_query = ice_rss_reta_query, 225 .rss_hash_update = ice_rss_hash_update, 226 .rss_hash_conf_get = ice_rss_hash_conf_get, 227 .promiscuous_enable = ice_promisc_enable, 228 .promiscuous_disable = ice_promisc_disable, 229 .allmulticast_enable = ice_allmulti_enable, 230 .allmulticast_disable = ice_allmulti_disable, 231 .rx_queue_intr_enable = ice_rx_queue_intr_enable, 232 .rx_queue_intr_disable = ice_rx_queue_intr_disable, 233 .fw_version_get = ice_fw_version_get, 234 .vlan_pvid_set = ice_vlan_pvid_set, 235 .rxq_info_get = ice_rxq_info_get, 236 .txq_info_get = ice_txq_info_get, 237 .rx_burst_mode_get = ice_rx_burst_mode_get, 238 .tx_burst_mode_get = ice_tx_burst_mode_get, 239 .get_eeprom_length = ice_get_eeprom_length, 240 .get_eeprom = ice_get_eeprom, 241 .stats_get = ice_stats_get, 242 .stats_reset = ice_stats_reset, 243 .xstats_get = ice_xstats_get, 244 .xstats_get_names = ice_xstats_get_names, 245 .xstats_reset = ice_stats_reset, 246 .flow_ops_get = ice_dev_flow_ops_get, 247 .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add, 248 .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del, 249 .tx_done_cleanup = ice_tx_done_cleanup, 250 .get_monitor_addr = ice_get_monitor_addr, 251 .timesync_enable = ice_timesync_enable, 252 .timesync_read_rx_timestamp = ice_timesync_read_rx_timestamp, 253 .timesync_read_tx_timestamp = ice_timesync_read_tx_timestamp, 254 .timesync_adjust_time = ice_timesync_adjust_time, 255 .timesync_read_time = ice_timesync_read_time, 256 .timesync_write_time = ice_timesync_write_time, 257 .timesync_disable = ice_timesync_disable, 258 }; 259 260 /* store statistics names and its offset in stats structure */ 261 struct ice_xstats_name_off { 262 char name[RTE_ETH_XSTATS_NAME_SIZE]; 263 unsigned int offset; 264 }; 265 266 static const struct ice_xstats_name_off ice_stats_strings[] = { 267 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)}, 268 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)}, 269 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)}, 270 {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)}, 271 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats, 272 rx_unknown_protocol)}, 273 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)}, 274 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)}, 275 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)}, 276 {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)}, 277 }; 278 279 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \ 280 sizeof(ice_stats_strings[0])) 281 282 static const struct ice_xstats_name_off ice_hw_port_strings[] = { 283 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats, 284 tx_dropped_link_down)}, 285 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)}, 286 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats, 287 illegal_bytes)}, 288 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)}, 289 {"mac_local_errors", offsetof(struct ice_hw_port_stats, 290 mac_local_faults)}, 291 {"mac_remote_errors", offsetof(struct ice_hw_port_stats, 292 mac_remote_faults)}, 293 {"rx_len_errors", offsetof(struct ice_hw_port_stats, 294 rx_len_errors)}, 295 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)}, 296 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)}, 297 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)}, 298 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)}, 299 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)}, 300 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats, 301 rx_size_127)}, 302 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats, 303 rx_size_255)}, 304 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats, 305 rx_size_511)}, 306 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats, 307 rx_size_1023)}, 308 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats, 309 rx_size_1522)}, 310 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats, 311 rx_size_big)}, 312 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats, 313 rx_undersize)}, 314 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats, 315 rx_oversize)}, 316 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats, 317 mac_short_pkt_dropped)}, 318 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats, 319 rx_fragments)}, 320 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)}, 321 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)}, 322 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats, 323 tx_size_127)}, 324 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats, 325 tx_size_255)}, 326 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats, 327 tx_size_511)}, 328 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats, 329 tx_size_1023)}, 330 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats, 331 tx_size_1522)}, 332 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats, 333 tx_size_big)}, 334 }; 335 336 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \ 337 sizeof(ice_hw_port_strings[0])) 338 339 static void 340 ice_init_controlq_parameter(struct ice_hw *hw) 341 { 342 /* fields for adminq */ 343 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN; 344 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN; 345 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ; 346 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ; 347 348 /* fields for mailboxq, DPDK used as PF host */ 349 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN; 350 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN; 351 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ; 352 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ; 353 354 /* fields for sideband queue */ 355 hw->sbq.num_rq_entries = ICE_SBQ_LEN; 356 hw->sbq.num_sq_entries = ICE_SBQ_LEN; 357 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 358 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 359 360 } 361 362 static int 363 lookup_proto_xtr_type(const char *xtr_name) 364 { 365 static struct { 366 const char *name; 367 enum proto_xtr_type type; 368 } xtr_type_map[] = { 369 { "vlan", PROTO_XTR_VLAN }, 370 { "ipv4", PROTO_XTR_IPV4 }, 371 { "ipv6", PROTO_XTR_IPV6 }, 372 { "ipv6_flow", PROTO_XTR_IPV6_FLOW }, 373 { "tcp", PROTO_XTR_TCP }, 374 { "ip_offset", PROTO_XTR_IP_OFFSET }, 375 }; 376 uint32_t i; 377 378 for (i = 0; i < RTE_DIM(xtr_type_map); i++) { 379 if (strcmp(xtr_name, xtr_type_map[i].name) == 0) 380 return xtr_type_map[i].type; 381 } 382 383 return -1; 384 } 385 386 /* 387 * Parse elem, the elem could be single number/range or '(' ')' group 388 * 1) A single number elem, it's just a simple digit. e.g. 9 389 * 2) A single range elem, two digits with a '-' between. e.g. 2-6 390 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6) 391 * Within group elem, '-' used for a range separator; 392 * ',' used for a single number. 393 */ 394 static int 395 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs) 396 { 397 const char *str = input; 398 char *end = NULL; 399 uint32_t min, max; 400 uint32_t idx; 401 402 while (isblank(*str)) 403 str++; 404 405 if (!isdigit(*str) && *str != '(') 406 return -1; 407 408 /* process single number or single range of number */ 409 if (*str != '(') { 410 errno = 0; 411 idx = strtoul(str, &end, 10); 412 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) 413 return -1; 414 415 while (isblank(*end)) 416 end++; 417 418 min = idx; 419 max = idx; 420 421 /* process single <number>-<number> */ 422 if (*end == '-') { 423 end++; 424 while (isblank(*end)) 425 end++; 426 if (!isdigit(*end)) 427 return -1; 428 429 errno = 0; 430 idx = strtoul(end, &end, 10); 431 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) 432 return -1; 433 434 max = idx; 435 while (isblank(*end)) 436 end++; 437 } 438 439 if (*end != ':') 440 return -1; 441 442 for (idx = RTE_MIN(min, max); 443 idx <= RTE_MAX(min, max); idx++) 444 devargs->proto_xtr[idx] = xtr_type; 445 446 return 0; 447 } 448 449 /* process set within bracket */ 450 str++; 451 while (isblank(*str)) 452 str++; 453 if (*str == '\0') 454 return -1; 455 456 min = ICE_MAX_QUEUE_NUM; 457 do { 458 /* go ahead to the first digit */ 459 while (isblank(*str)) 460 str++; 461 if (!isdigit(*str)) 462 return -1; 463 464 /* get the digit value */ 465 errno = 0; 466 idx = strtoul(str, &end, 10); 467 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) 468 return -1; 469 470 /* go ahead to separator '-',',' and ')' */ 471 while (isblank(*end)) 472 end++; 473 if (*end == '-') { 474 if (min == ICE_MAX_QUEUE_NUM) 475 min = idx; 476 else /* avoid continuous '-' */ 477 return -1; 478 } else if (*end == ',' || *end == ')') { 479 max = idx; 480 if (min == ICE_MAX_QUEUE_NUM) 481 min = idx; 482 483 for (idx = RTE_MIN(min, max); 484 idx <= RTE_MAX(min, max); idx++) 485 devargs->proto_xtr[idx] = xtr_type; 486 487 min = ICE_MAX_QUEUE_NUM; 488 } else { 489 return -1; 490 } 491 492 str = end + 1; 493 } while (*end != ')' && *end != '\0'); 494 495 return 0; 496 } 497 498 static int 499 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs) 500 { 501 const char *queue_start; 502 uint32_t idx; 503 int xtr_type; 504 char xtr_name[32]; 505 506 while (isblank(*queues)) 507 queues++; 508 509 if (*queues != '[') { 510 xtr_type = lookup_proto_xtr_type(queues); 511 if (xtr_type < 0) 512 return -1; 513 514 devargs->proto_xtr_dflt = xtr_type; 515 516 return 0; 517 } 518 519 queues++; 520 do { 521 while (isblank(*queues)) 522 queues++; 523 if (*queues == '\0') 524 return -1; 525 526 queue_start = queues; 527 528 /* go across a complete bracket */ 529 if (*queue_start == '(') { 530 queues += strcspn(queues, ")"); 531 if (*queues != ')') 532 return -1; 533 } 534 535 /* scan the separator ':' */ 536 queues += strcspn(queues, ":"); 537 if (*queues++ != ':') 538 return -1; 539 while (isblank(*queues)) 540 queues++; 541 542 for (idx = 0; ; idx++) { 543 if (isblank(queues[idx]) || 544 queues[idx] == ',' || 545 queues[idx] == ']' || 546 queues[idx] == '\0') 547 break; 548 549 if (idx > sizeof(xtr_name) - 2) 550 return -1; 551 552 xtr_name[idx] = queues[idx]; 553 } 554 xtr_name[idx] = '\0'; 555 xtr_type = lookup_proto_xtr_type(xtr_name); 556 if (xtr_type < 0) 557 return -1; 558 559 queues += idx; 560 561 while (isblank(*queues) || *queues == ',' || *queues == ']') 562 queues++; 563 564 if (parse_queue_set(queue_start, xtr_type, devargs) < 0) 565 return -1; 566 } while (*queues != '\0'); 567 568 return 0; 569 } 570 571 static int 572 handle_proto_xtr_arg(__rte_unused const char *key, const char *value, 573 void *extra_args) 574 { 575 struct ice_devargs *devargs = extra_args; 576 577 if (value == NULL || extra_args == NULL) 578 return -EINVAL; 579 580 if (parse_queue_proto_xtr(value, devargs) < 0) { 581 PMD_DRV_LOG(ERR, 582 "The protocol extraction parameter is wrong : '%s'", 583 value); 584 return -1; 585 } 586 587 return 0; 588 } 589 590 static void 591 ice_check_proto_xtr_support(struct ice_hw *hw) 592 { 593 #define FLX_REG(val, fld, idx) \ 594 (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \ 595 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S) 596 static struct { 597 uint32_t rxdid; 598 uint8_t opcode; 599 uint8_t protid_0; 600 uint8_t protid_1; 601 } xtr_sets[] = { 602 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN, 603 ICE_RX_OPC_EXTRACT, 604 ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O}, 605 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4, 606 ICE_RX_OPC_EXTRACT, 607 ICE_PROT_IPV4_OF_OR_S, 608 ICE_PROT_IPV4_OF_OR_S }, 609 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6, 610 ICE_RX_OPC_EXTRACT, 611 ICE_PROT_IPV6_OF_OR_S, 612 ICE_PROT_IPV6_OF_OR_S }, 613 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW, 614 ICE_RX_OPC_EXTRACT, 615 ICE_PROT_IPV6_OF_OR_S, 616 ICE_PROT_IPV6_OF_OR_S }, 617 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP, 618 ICE_RX_OPC_EXTRACT, 619 ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL }, 620 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET, 621 ICE_RX_OPC_PROTID, 622 ICE_PROT_IPV4_OF_OR_S, 623 ICE_PROT_IPV6_OF_OR_S }, 624 }; 625 uint32_t i; 626 627 for (i = 0; i < RTE_DIM(xtr_sets); i++) { 628 uint32_t rxdid = xtr_sets[i].rxdid; 629 uint32_t v; 630 631 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) { 632 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid)); 633 634 if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 && 635 FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode) 636 ice_proto_xtr_hw_support[i] = true; 637 } 638 639 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) { 640 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid)); 641 642 if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 && 643 FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode) 644 ice_proto_xtr_hw_support[i] = true; 645 } 646 } 647 } 648 649 static int 650 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base, 651 uint32_t num) 652 { 653 struct pool_entry *entry; 654 655 if (!pool || !num) 656 return -EINVAL; 657 658 entry = rte_zmalloc(NULL, sizeof(*entry), 0); 659 if (!entry) { 660 PMD_INIT_LOG(ERR, 661 "Failed to allocate memory for resource pool"); 662 return -ENOMEM; 663 } 664 665 /* queue heap initialize */ 666 pool->num_free = num; 667 pool->num_alloc = 0; 668 pool->base = base; 669 LIST_INIT(&pool->alloc_list); 670 LIST_INIT(&pool->free_list); 671 672 /* Initialize element */ 673 entry->base = 0; 674 entry->len = num; 675 676 LIST_INSERT_HEAD(&pool->free_list, entry, next); 677 return 0; 678 } 679 680 static int 681 ice_res_pool_alloc(struct ice_res_pool_info *pool, 682 uint16_t num) 683 { 684 struct pool_entry *entry, *valid_entry; 685 686 if (!pool || !num) { 687 PMD_INIT_LOG(ERR, "Invalid parameter"); 688 return -EINVAL; 689 } 690 691 if (pool->num_free < num) { 692 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u", 693 num, pool->num_free); 694 return -ENOMEM; 695 } 696 697 valid_entry = NULL; 698 /* Lookup in free list and find most fit one */ 699 LIST_FOREACH(entry, &pool->free_list, next) { 700 if (entry->len >= num) { 701 /* Find best one */ 702 if (entry->len == num) { 703 valid_entry = entry; 704 break; 705 } 706 if (!valid_entry || 707 valid_entry->len > entry->len) 708 valid_entry = entry; 709 } 710 } 711 712 /* Not find one to satisfy the request, return */ 713 if (!valid_entry) { 714 PMD_INIT_LOG(ERR, "No valid entry found"); 715 return -ENOMEM; 716 } 717 /** 718 * The entry have equal queue number as requested, 719 * remove it from alloc_list. 720 */ 721 if (valid_entry->len == num) { 722 LIST_REMOVE(valid_entry, next); 723 } else { 724 /** 725 * The entry have more numbers than requested, 726 * create a new entry for alloc_list and minus its 727 * queue base and number in free_list. 728 */ 729 entry = rte_zmalloc(NULL, sizeof(*entry), 0); 730 if (!entry) { 731 PMD_INIT_LOG(ERR, 732 "Failed to allocate memory for " 733 "resource pool"); 734 return -ENOMEM; 735 } 736 entry->base = valid_entry->base; 737 entry->len = num; 738 valid_entry->base += num; 739 valid_entry->len -= num; 740 valid_entry = entry; 741 } 742 743 /* Insert it into alloc list, not sorted */ 744 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next); 745 746 pool->num_free -= valid_entry->len; 747 pool->num_alloc += valid_entry->len; 748 749 return valid_entry->base + pool->base; 750 } 751 752 static void 753 ice_res_pool_destroy(struct ice_res_pool_info *pool) 754 { 755 struct pool_entry *entry, *next_entry; 756 757 if (!pool) 758 return; 759 760 for (entry = LIST_FIRST(&pool->alloc_list); 761 entry && (next_entry = LIST_NEXT(entry, next), 1); 762 entry = next_entry) { 763 LIST_REMOVE(entry, next); 764 rte_free(entry); 765 } 766 767 for (entry = LIST_FIRST(&pool->free_list); 768 entry && (next_entry = LIST_NEXT(entry, next), 1); 769 entry = next_entry) { 770 LIST_REMOVE(entry, next); 771 rte_free(entry); 772 } 773 774 pool->num_free = 0; 775 pool->num_alloc = 0; 776 pool->base = 0; 777 LIST_INIT(&pool->alloc_list); 778 LIST_INIT(&pool->free_list); 779 } 780 781 static void 782 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info) 783 { 784 /* Set VSI LUT selection */ 785 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI & 786 ICE_AQ_VSI_Q_OPT_RSS_LUT_M; 787 /* Set Hash scheme */ 788 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ & 789 ICE_AQ_VSI_Q_OPT_RSS_HASH_M; 790 /* enable TC */ 791 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M; 792 } 793 794 static enum ice_status 795 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi, 796 struct ice_aqc_vsi_props *info, 797 uint8_t enabled_tcmap) 798 { 799 uint16_t bsf, qp_idx; 800 801 /* default tc 0 now. Multi-TC supporting need to be done later. 802 * Configure TC and queue mapping parameters, for enabled TC, 803 * allocate qpnum_per_tc queues to this traffic. 804 */ 805 if (enabled_tcmap != 0x01) { 806 PMD_INIT_LOG(ERR, "only TC0 is supported"); 807 return -ENOTSUP; 808 } 809 810 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC); 811 bsf = rte_bsf32(vsi->nb_qps); 812 /* Adjust the queue number to actual queues that can be applied */ 813 vsi->nb_qps = 0x1 << bsf; 814 815 qp_idx = 0; 816 /* Set tc and queue mapping with VSI */ 817 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx << 818 ICE_AQ_VSI_TC_Q_OFFSET_S) | 819 (bsf << ICE_AQ_VSI_TC_Q_NUM_S)); 820 821 /* Associate queue number with VSI */ 822 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG); 823 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); 824 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps); 825 info->valid_sections |= 826 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 827 /* Set the info.ingress_table and info.egress_table 828 * for UP translate table. Now just set it to 1:1 map by default 829 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688 830 */ 831 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688 832 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); 833 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); 834 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); 835 return 0; 836 } 837 838 static int 839 ice_init_mac_address(struct rte_eth_dev *dev) 840 { 841 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 842 843 if (!rte_is_unicast_ether_addr 844 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) { 845 PMD_INIT_LOG(ERR, "Invalid MAC address"); 846 return -EINVAL; 847 } 848 849 rte_ether_addr_copy( 850 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr, 851 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr); 852 853 dev->data->mac_addrs = 854 rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0); 855 if (!dev->data->mac_addrs) { 856 PMD_INIT_LOG(ERR, 857 "Failed to allocate memory to store mac address"); 858 return -ENOMEM; 859 } 860 /* store it to dev data */ 861 rte_ether_addr_copy( 862 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr, 863 &dev->data->mac_addrs[0]); 864 return 0; 865 } 866 867 /* Find out specific MAC filter */ 868 static struct ice_mac_filter * 869 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr) 870 { 871 struct ice_mac_filter *f; 872 873 TAILQ_FOREACH(f, &vsi->mac_list, next) { 874 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr)) 875 return f; 876 } 877 878 return NULL; 879 } 880 881 static int 882 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr) 883 { 884 struct ice_fltr_list_entry *m_list_itr = NULL; 885 struct ice_mac_filter *f; 886 struct LIST_HEAD_TYPE list_head; 887 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 888 int ret = 0; 889 890 /* If it's added and configured, return */ 891 f = ice_find_mac_filter(vsi, mac_addr); 892 if (f) { 893 PMD_DRV_LOG(INFO, "This MAC filter already exists."); 894 return 0; 895 } 896 897 INIT_LIST_HEAD(&list_head); 898 899 m_list_itr = (struct ice_fltr_list_entry *) 900 ice_malloc(hw, sizeof(*m_list_itr)); 901 if (!m_list_itr) { 902 ret = -ENOMEM; 903 goto DONE; 904 } 905 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr, 906 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); 907 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 908 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 909 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 910 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 911 m_list_itr->fltr_info.vsi_handle = vsi->idx; 912 913 LIST_ADD(&m_list_itr->list_entry, &list_head); 914 915 /* Add the mac */ 916 ret = ice_add_mac(hw, &list_head); 917 if (ret != ICE_SUCCESS) { 918 PMD_DRV_LOG(ERR, "Failed to add MAC filter"); 919 ret = -EINVAL; 920 goto DONE; 921 } 922 /* Add the mac addr into mac list */ 923 f = rte_zmalloc(NULL, sizeof(*f), 0); 924 if (!f) { 925 PMD_DRV_LOG(ERR, "failed to allocate memory"); 926 ret = -ENOMEM; 927 goto DONE; 928 } 929 rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr); 930 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); 931 vsi->mac_num++; 932 933 ret = 0; 934 935 DONE: 936 rte_free(m_list_itr); 937 return ret; 938 } 939 940 static int 941 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr) 942 { 943 struct ice_fltr_list_entry *m_list_itr = NULL; 944 struct ice_mac_filter *f; 945 struct LIST_HEAD_TYPE list_head; 946 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 947 int ret = 0; 948 949 /* Can't find it, return an error */ 950 f = ice_find_mac_filter(vsi, mac_addr); 951 if (!f) 952 return -EINVAL; 953 954 INIT_LIST_HEAD(&list_head); 955 956 m_list_itr = (struct ice_fltr_list_entry *) 957 ice_malloc(hw, sizeof(*m_list_itr)); 958 if (!m_list_itr) { 959 ret = -ENOMEM; 960 goto DONE; 961 } 962 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr, 963 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); 964 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 965 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 966 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 967 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 968 m_list_itr->fltr_info.vsi_handle = vsi->idx; 969 970 LIST_ADD(&m_list_itr->list_entry, &list_head); 971 972 /* remove the mac filter */ 973 ret = ice_remove_mac(hw, &list_head); 974 if (ret != ICE_SUCCESS) { 975 PMD_DRV_LOG(ERR, "Failed to remove MAC filter"); 976 ret = -EINVAL; 977 goto DONE; 978 } 979 980 /* Remove the mac addr from mac list */ 981 TAILQ_REMOVE(&vsi->mac_list, f, next); 982 rte_free(f); 983 vsi->mac_num--; 984 985 ret = 0; 986 DONE: 987 rte_free(m_list_itr); 988 return ret; 989 } 990 991 /* Find out specific VLAN filter */ 992 static struct ice_vlan_filter * 993 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan) 994 { 995 struct ice_vlan_filter *f; 996 997 TAILQ_FOREACH(f, &vsi->vlan_list, next) { 998 if (vlan->tpid == f->vlan_info.vlan.tpid && 999 vlan->vid == f->vlan_info.vlan.vid) 1000 return f; 1001 } 1002 1003 return NULL; 1004 } 1005 1006 static int 1007 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan) 1008 { 1009 struct ice_fltr_list_entry *v_list_itr = NULL; 1010 struct ice_vlan_filter *f; 1011 struct LIST_HEAD_TYPE list_head; 1012 struct ice_hw *hw; 1013 int ret = 0; 1014 1015 if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID) 1016 return -EINVAL; 1017 1018 hw = ICE_VSI_TO_HW(vsi); 1019 1020 /* If it's added and configured, return. */ 1021 f = ice_find_vlan_filter(vsi, vlan); 1022 if (f) { 1023 PMD_DRV_LOG(INFO, "This VLAN filter already exists."); 1024 return 0; 1025 } 1026 1027 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) 1028 return 0; 1029 1030 INIT_LIST_HEAD(&list_head); 1031 1032 v_list_itr = (struct ice_fltr_list_entry *) 1033 ice_malloc(hw, sizeof(*v_list_itr)); 1034 if (!v_list_itr) { 1035 ret = -ENOMEM; 1036 goto DONE; 1037 } 1038 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid; 1039 v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid; 1040 v_list_itr->fltr_info.l_data.vlan.tpid_valid = true; 1041 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 1042 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1043 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1044 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 1045 v_list_itr->fltr_info.vsi_handle = vsi->idx; 1046 1047 LIST_ADD(&v_list_itr->list_entry, &list_head); 1048 1049 /* Add the vlan */ 1050 ret = ice_add_vlan(hw, &list_head); 1051 if (ret != ICE_SUCCESS) { 1052 PMD_DRV_LOG(ERR, "Failed to add VLAN filter"); 1053 ret = -EINVAL; 1054 goto DONE; 1055 } 1056 1057 /* Add vlan into vlan list */ 1058 f = rte_zmalloc(NULL, sizeof(*f), 0); 1059 if (!f) { 1060 PMD_DRV_LOG(ERR, "failed to allocate memory"); 1061 ret = -ENOMEM; 1062 goto DONE; 1063 } 1064 f->vlan_info.vlan.tpid = vlan->tpid; 1065 f->vlan_info.vlan.vid = vlan->vid; 1066 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next); 1067 vsi->vlan_num++; 1068 1069 ret = 0; 1070 1071 DONE: 1072 rte_free(v_list_itr); 1073 return ret; 1074 } 1075 1076 static int 1077 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan) 1078 { 1079 struct ice_fltr_list_entry *v_list_itr = NULL; 1080 struct ice_vlan_filter *f; 1081 struct LIST_HEAD_TYPE list_head; 1082 struct ice_hw *hw; 1083 int ret = 0; 1084 1085 if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID) 1086 return -EINVAL; 1087 1088 hw = ICE_VSI_TO_HW(vsi); 1089 1090 /* Can't find it, return an error */ 1091 f = ice_find_vlan_filter(vsi, vlan); 1092 if (!f) 1093 return -EINVAL; 1094 1095 INIT_LIST_HEAD(&list_head); 1096 1097 v_list_itr = (struct ice_fltr_list_entry *) 1098 ice_malloc(hw, sizeof(*v_list_itr)); 1099 if (!v_list_itr) { 1100 ret = -ENOMEM; 1101 goto DONE; 1102 } 1103 1104 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid; 1105 v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid; 1106 v_list_itr->fltr_info.l_data.vlan.tpid_valid = true; 1107 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; 1108 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1109 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1110 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 1111 v_list_itr->fltr_info.vsi_handle = vsi->idx; 1112 1113 LIST_ADD(&v_list_itr->list_entry, &list_head); 1114 1115 /* remove the vlan filter */ 1116 ret = ice_remove_vlan(hw, &list_head); 1117 if (ret != ICE_SUCCESS) { 1118 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter"); 1119 ret = -EINVAL; 1120 goto DONE; 1121 } 1122 1123 /* Remove the vlan id from vlan list */ 1124 TAILQ_REMOVE(&vsi->vlan_list, f, next); 1125 rte_free(f); 1126 vsi->vlan_num--; 1127 1128 ret = 0; 1129 DONE: 1130 rte_free(v_list_itr); 1131 return ret; 1132 } 1133 1134 static int 1135 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi) 1136 { 1137 struct ice_mac_filter *m_f; 1138 struct ice_vlan_filter *v_f; 1139 void *temp; 1140 int ret = 0; 1141 1142 if (!vsi || !vsi->mac_num) 1143 return -EINVAL; 1144 1145 RTE_TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) { 1146 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr); 1147 if (ret != ICE_SUCCESS) { 1148 ret = -EINVAL; 1149 goto DONE; 1150 } 1151 } 1152 1153 if (vsi->vlan_num == 0) 1154 return 0; 1155 1156 RTE_TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) { 1157 ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan); 1158 if (ret != ICE_SUCCESS) { 1159 ret = -EINVAL; 1160 goto DONE; 1161 } 1162 } 1163 1164 DONE: 1165 return ret; 1166 } 1167 1168 /* Enable IRQ0 */ 1169 static void 1170 ice_pf_enable_irq0(struct ice_hw *hw) 1171 { 1172 /* reset the registers */ 1173 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0); 1174 ICE_READ_REG(hw, PFINT_OICR); 1175 1176 #ifdef ICE_LSE_SPT 1177 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 1178 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M & 1179 (~PFINT_OICR_LINK_STAT_CHANGE_M))); 1180 1181 ICE_WRITE_REG(hw, PFINT_OICR_CTL, 1182 (0 & PFINT_OICR_CTL_MSIX_INDX_M) | 1183 ((0 << PFINT_OICR_CTL_ITR_INDX_S) & 1184 PFINT_OICR_CTL_ITR_INDX_M) | 1185 PFINT_OICR_CTL_CAUSE_ENA_M); 1186 1187 ICE_WRITE_REG(hw, PFINT_FW_CTL, 1188 (0 & PFINT_FW_CTL_MSIX_INDX_M) | 1189 ((0 << PFINT_FW_CTL_ITR_INDX_S) & 1190 PFINT_FW_CTL_ITR_INDX_M) | 1191 PFINT_FW_CTL_CAUSE_ENA_M); 1192 #else 1193 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M); 1194 #endif 1195 1196 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), 1197 GLINT_DYN_CTL_INTENA_M | 1198 GLINT_DYN_CTL_CLEARPBA_M | 1199 GLINT_DYN_CTL_ITR_INDX_M); 1200 1201 ice_flush(hw); 1202 } 1203 1204 /* Disable IRQ0 */ 1205 static void 1206 ice_pf_disable_irq0(struct ice_hw *hw) 1207 { 1208 /* Disable all interrupt types */ 1209 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M); 1210 ice_flush(hw); 1211 } 1212 1213 #ifdef ICE_LSE_SPT 1214 static void 1215 ice_handle_aq_msg(struct rte_eth_dev *dev) 1216 { 1217 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1218 struct ice_ctl_q_info *cq = &hw->adminq; 1219 struct ice_rq_event_info event; 1220 uint16_t pending, opcode; 1221 int ret; 1222 1223 event.buf_len = ICE_AQ_MAX_BUF_LEN; 1224 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0); 1225 if (!event.msg_buf) { 1226 PMD_DRV_LOG(ERR, "Failed to allocate mem"); 1227 return; 1228 } 1229 1230 pending = 1; 1231 while (pending) { 1232 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1233 1234 if (ret != ICE_SUCCESS) { 1235 PMD_DRV_LOG(INFO, 1236 "Failed to read msg from AdminQ, " 1237 "adminq_err: %u", 1238 hw->adminq.sq_last_status); 1239 break; 1240 } 1241 opcode = rte_le_to_cpu_16(event.desc.opcode); 1242 1243 switch (opcode) { 1244 case ice_aqc_opc_get_link_status: 1245 ret = ice_link_update(dev, 0); 1246 if (!ret) 1247 rte_eth_dev_callback_process 1248 (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1249 break; 1250 default: 1251 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", 1252 opcode); 1253 break; 1254 } 1255 } 1256 rte_free(event.msg_buf); 1257 } 1258 #endif 1259 1260 /** 1261 * Interrupt handler triggered by NIC for handling 1262 * specific interrupt. 1263 * 1264 * @param handle 1265 * Pointer to interrupt handle. 1266 * @param param 1267 * The address of parameter (struct rte_eth_dev *) regsitered before. 1268 * 1269 * @return 1270 * void 1271 */ 1272 static void 1273 ice_interrupt_handler(void *param) 1274 { 1275 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1276 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1277 uint32_t oicr; 1278 uint32_t reg; 1279 uint8_t pf_num; 1280 uint8_t event; 1281 uint16_t queue; 1282 int ret; 1283 #ifdef ICE_LSE_SPT 1284 uint32_t int_fw_ctl; 1285 #endif 1286 1287 /* Disable interrupt */ 1288 ice_pf_disable_irq0(hw); 1289 1290 /* read out interrupt causes */ 1291 oicr = ICE_READ_REG(hw, PFINT_OICR); 1292 #ifdef ICE_LSE_SPT 1293 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL); 1294 #endif 1295 1296 /* No interrupt event indicated */ 1297 if (!(oicr & PFINT_OICR_INTEVENT_M)) { 1298 PMD_DRV_LOG(INFO, "No interrupt event"); 1299 goto done; 1300 } 1301 1302 #ifdef ICE_LSE_SPT 1303 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) { 1304 PMD_DRV_LOG(INFO, "FW_CTL: link state change event"); 1305 ice_handle_aq_msg(dev); 1306 } 1307 #else 1308 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) { 1309 PMD_DRV_LOG(INFO, "OICR: link state change event"); 1310 ret = ice_link_update(dev, 0); 1311 if (!ret) 1312 rte_eth_dev_callback_process 1313 (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1314 } 1315 #endif 1316 1317 if (oicr & PFINT_OICR_MAL_DETECT_M) { 1318 PMD_DRV_LOG(WARNING, "OICR: MDD event"); 1319 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM); 1320 if (reg & GL_MDET_TX_PQM_VALID_M) { 1321 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1322 GL_MDET_TX_PQM_PF_NUM_S; 1323 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1324 GL_MDET_TX_PQM_MAL_TYPE_S; 1325 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> 1326 GL_MDET_TX_PQM_QNUM_S; 1327 1328 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event " 1329 "%d by PQM on TX queue %d PF# %d", 1330 event, queue, pf_num); 1331 } 1332 1333 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN); 1334 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1335 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1336 GL_MDET_TX_TCLAN_PF_NUM_S; 1337 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1338 GL_MDET_TX_TCLAN_MAL_TYPE_S; 1339 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1340 GL_MDET_TX_TCLAN_QNUM_S; 1341 1342 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event " 1343 "%d by TCLAN on TX queue %d PF# %d", 1344 event, queue, pf_num); 1345 } 1346 } 1347 done: 1348 /* Enable interrupt */ 1349 ice_pf_enable_irq0(hw); 1350 rte_intr_ack(dev->intr_handle); 1351 } 1352 1353 static void 1354 ice_init_proto_xtr(struct rte_eth_dev *dev) 1355 { 1356 struct ice_adapter *ad = 1357 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1358 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1359 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1360 const struct proto_xtr_ol_flag *ol_flag; 1361 bool proto_xtr_enable = false; 1362 int offset; 1363 uint16_t i; 1364 1365 pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0); 1366 if (unlikely(pf->proto_xtr == NULL)) { 1367 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table"); 1368 return; 1369 } 1370 1371 for (i = 0; i < pf->lan_nb_qps; i++) { 1372 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ? 1373 ad->devargs.proto_xtr[i] : 1374 ad->devargs.proto_xtr_dflt; 1375 1376 if (pf->proto_xtr[i] != PROTO_XTR_NONE) { 1377 uint8_t type = pf->proto_xtr[i]; 1378 1379 ice_proto_xtr_ol_flag_params[type].required = true; 1380 proto_xtr_enable = true; 1381 } 1382 } 1383 1384 if (likely(!proto_xtr_enable)) 1385 return; 1386 1387 ice_check_proto_xtr_support(hw); 1388 1389 offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param); 1390 if (unlikely(offset == -1)) { 1391 PMD_DRV_LOG(ERR, 1392 "Protocol extraction metadata is disabled in mbuf with error %d", 1393 -rte_errno); 1394 return; 1395 } 1396 1397 PMD_DRV_LOG(DEBUG, 1398 "Protocol extraction metadata offset in mbuf is : %d", 1399 offset); 1400 rte_net_ice_dynfield_proto_xtr_metadata_offs = offset; 1401 1402 for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) { 1403 ol_flag = &ice_proto_xtr_ol_flag_params[i]; 1404 1405 if (!ol_flag->required) 1406 continue; 1407 1408 if (!ice_proto_xtr_hw_support[i]) { 1409 PMD_DRV_LOG(ERR, 1410 "Protocol extraction type %u is not supported in hardware", 1411 i); 1412 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1; 1413 break; 1414 } 1415 1416 offset = rte_mbuf_dynflag_register(&ol_flag->param); 1417 if (unlikely(offset == -1)) { 1418 PMD_DRV_LOG(ERR, 1419 "Protocol extraction offload '%s' failed to register with error %d", 1420 ol_flag->param.name, -rte_errno); 1421 1422 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1; 1423 break; 1424 } 1425 1426 PMD_DRV_LOG(DEBUG, 1427 "Protocol extraction offload '%s' offset in mbuf is : %d", 1428 ol_flag->param.name, offset); 1429 *ol_flag->ol_flag = 1ULL << offset; 1430 } 1431 } 1432 1433 /* Initialize SW parameters of PF */ 1434 static int 1435 ice_pf_sw_init(struct rte_eth_dev *dev) 1436 { 1437 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1438 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1439 1440 pf->lan_nb_qp_max = 1441 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq, 1442 hw->func_caps.common_cap.num_rxq); 1443 1444 pf->lan_nb_qps = pf->lan_nb_qp_max; 1445 1446 ice_init_proto_xtr(dev); 1447 1448 if (hw->func_caps.fd_fltr_guar > 0 || 1449 hw->func_caps.fd_fltr_best_effort > 0) { 1450 pf->flags |= ICE_FLAG_FDIR; 1451 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR; 1452 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps; 1453 } else { 1454 pf->fdir_nb_qps = 0; 1455 } 1456 pf->fdir_qp_offset = 0; 1457 1458 return 0; 1459 } 1460 1461 struct ice_vsi * 1462 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) 1463 { 1464 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1465 struct ice_vsi *vsi = NULL; 1466 struct ice_vsi_ctx vsi_ctx; 1467 int ret; 1468 struct rte_ether_addr broadcast = { 1469 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }; 1470 struct rte_ether_addr mac_addr; 1471 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 1472 uint8_t tc_bitmap = 0x1; 1473 uint16_t cfg; 1474 1475 /* hw->num_lports = 1 in NIC mode */ 1476 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0); 1477 if (!vsi) 1478 return NULL; 1479 1480 vsi->idx = pf->next_vsi_idx; 1481 pf->next_vsi_idx++; 1482 vsi->type = type; 1483 vsi->adapter = ICE_PF_TO_ADAPTER(pf); 1484 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX; 1485 vsi->vlan_anti_spoof_on = 0; 1486 vsi->vlan_filter_on = 1; 1487 TAILQ_INIT(&vsi->mac_list); 1488 TAILQ_INIT(&vsi->vlan_list); 1489 1490 /* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */ 1491 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size > 1492 RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 : 1493 hw->func_caps.common_cap.rss_table_size; 1494 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE; 1495 1496 memset(&vsi_ctx, 0, sizeof(vsi_ctx)); 1497 switch (type) { 1498 case ICE_VSI_PF: 1499 vsi->nb_qps = pf->lan_nb_qps; 1500 vsi->base_queue = 1; 1501 ice_vsi_config_default_rss(&vsi_ctx.info); 1502 vsi_ctx.alloc_from_pool = true; 1503 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; 1504 /* switch_id is queried by get_switch_config aq, which is done 1505 * by ice_init_hw 1506 */ 1507 vsi_ctx.info.sw_id = hw->port_info->sw_id; 1508 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 1509 /* Allow all untagged or tagged packets */ 1510 vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; 1511 vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 1512 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF | 1513 ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1514 if (ice_is_dvm_ena(hw)) { 1515 vsi_ctx.info.outer_vlan_flags = 1516 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << 1517 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & 1518 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M; 1519 vsi_ctx.info.outer_vlan_flags |= 1520 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 << 1521 ICE_AQ_VSI_OUTER_TAG_TYPE_S) & 1522 ICE_AQ_VSI_OUTER_TAG_TYPE_M; 1523 } 1524 1525 /* FDIR */ 1526 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID | 1527 ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 1528 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); 1529 cfg = ICE_AQ_VSI_FD_ENABLE; 1530 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); 1531 vsi_ctx.info.max_fd_fltr_dedicated = 1532 rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar); 1533 vsi_ctx.info.max_fd_fltr_shared = 1534 rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort); 1535 1536 /* Enable VLAN/UP trip */ 1537 ret = ice_vsi_config_tc_queue_mapping(vsi, 1538 &vsi_ctx.info, 1539 ICE_DEFAULT_TCMAP); 1540 if (ret) { 1541 PMD_INIT_LOG(ERR, 1542 "tc queue mapping with vsi failed, " 1543 "err = %d", 1544 ret); 1545 goto fail_mem; 1546 } 1547 1548 break; 1549 case ICE_VSI_CTRL: 1550 vsi->nb_qps = pf->fdir_nb_qps; 1551 vsi->base_queue = ICE_FDIR_QUEUE_ID; 1552 vsi_ctx.alloc_from_pool = true; 1553 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; 1554 1555 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 1556 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); 1557 cfg = ICE_AQ_VSI_FD_PROG_ENABLE; 1558 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); 1559 vsi_ctx.info.sw_id = hw->port_info->sw_id; 1560 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 1561 ret = ice_vsi_config_tc_queue_mapping(vsi, 1562 &vsi_ctx.info, 1563 ICE_DEFAULT_TCMAP); 1564 if (ret) { 1565 PMD_INIT_LOG(ERR, 1566 "tc queue mapping with vsi failed, " 1567 "err = %d", 1568 ret); 1569 goto fail_mem; 1570 } 1571 break; 1572 default: 1573 /* for other types of VSI */ 1574 PMD_INIT_LOG(ERR, "other types of VSI not supported"); 1575 goto fail_mem; 1576 } 1577 1578 /* VF has MSIX interrupt in VF range, don't allocate here */ 1579 if (type == ICE_VSI_PF) { 1580 ret = ice_res_pool_alloc(&pf->msix_pool, 1581 RTE_MIN(vsi->nb_qps, 1582 RTE_MAX_RXTX_INTR_VEC_ID)); 1583 if (ret < 0) { 1584 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d", 1585 vsi->vsi_id, ret); 1586 } 1587 vsi->msix_intr = ret; 1588 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); 1589 } else if (type == ICE_VSI_CTRL) { 1590 ret = ice_res_pool_alloc(&pf->msix_pool, 1); 1591 if (ret < 0) { 1592 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", 1593 vsi->vsi_id, ret); 1594 } 1595 vsi->msix_intr = ret; 1596 vsi->nb_msix = 1; 1597 } else { 1598 vsi->msix_intr = 0; 1599 vsi->nb_msix = 0; 1600 } 1601 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL); 1602 if (ret != ICE_SUCCESS) { 1603 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret); 1604 goto fail_mem; 1605 } 1606 /* store vsi information is SW structure */ 1607 vsi->vsi_id = vsi_ctx.vsi_num; 1608 vsi->info = vsi_ctx.info; 1609 pf->vsis_allocated = vsi_ctx.vsis_allocd; 1610 pf->vsis_unallocated = vsi_ctx.vsis_unallocated; 1611 1612 if (type == ICE_VSI_PF) { 1613 /* MAC configuration */ 1614 rte_ether_addr_copy((struct rte_ether_addr *) 1615 hw->port_info->mac.perm_addr, 1616 &pf->dev_addr); 1617 1618 rte_ether_addr_copy(&pf->dev_addr, &mac_addr); 1619 ret = ice_add_mac_filter(vsi, &mac_addr); 1620 if (ret != ICE_SUCCESS) 1621 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter"); 1622 1623 rte_ether_addr_copy(&broadcast, &mac_addr); 1624 ret = ice_add_mac_filter(vsi, &mac_addr); 1625 if (ret != ICE_SUCCESS) 1626 PMD_INIT_LOG(ERR, "Failed to add MAC filter"); 1627 } 1628 1629 /* At the beginning, only TC0. */ 1630 /* What we need here is the maximam number of the TX queues. 1631 * Currently vsi->nb_qps means it. 1632 * Correct it if any change. 1633 */ 1634 max_txqs[0] = vsi->nb_qps; 1635 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx, 1636 tc_bitmap, max_txqs); 1637 if (ret != ICE_SUCCESS) 1638 PMD_INIT_LOG(ERR, "Failed to config vsi sched"); 1639 1640 return vsi; 1641 fail_mem: 1642 rte_free(vsi); 1643 pf->next_vsi_idx--; 1644 return NULL; 1645 } 1646 1647 static int 1648 ice_send_driver_ver(struct ice_hw *hw) 1649 { 1650 struct ice_driver_ver dv; 1651 1652 /* we don't have driver version use 0 for dummy */ 1653 dv.major_ver = 0; 1654 dv.minor_ver = 0; 1655 dv.build_ver = 0; 1656 dv.subbuild_ver = 0; 1657 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string)); 1658 1659 return ice_aq_send_driver_ver(hw, &dv, NULL); 1660 } 1661 1662 static int 1663 ice_pf_setup(struct ice_pf *pf) 1664 { 1665 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1666 struct ice_vsi *vsi; 1667 uint16_t unused; 1668 1669 /* Clear all stats counters */ 1670 pf->offset_loaded = false; 1671 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats)); 1672 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats)); 1673 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats)); 1674 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats)); 1675 1676 /* force guaranteed filter pool for PF */ 1677 ice_alloc_fd_guar_item(hw, &unused, 1678 hw->func_caps.fd_fltr_guar); 1679 /* force shared filter pool for PF */ 1680 ice_alloc_fd_shrd_item(hw, &unused, 1681 hw->func_caps.fd_fltr_best_effort); 1682 1683 vsi = ice_setup_vsi(pf, ICE_VSI_PF); 1684 if (!vsi) { 1685 PMD_INIT_LOG(ERR, "Failed to add vsi for PF"); 1686 return -EINVAL; 1687 } 1688 1689 pf->main_vsi = vsi; 1690 1691 return 0; 1692 } 1693 1694 static enum ice_pkg_type 1695 ice_load_pkg_type(struct ice_hw *hw) 1696 { 1697 enum ice_pkg_type package_type; 1698 1699 /* store the activated package type (OS default or Comms) */ 1700 if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME, 1701 ICE_PKG_NAME_SIZE)) 1702 package_type = ICE_PKG_TYPE_OS_DEFAULT; 1703 else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME, 1704 ICE_PKG_NAME_SIZE)) 1705 package_type = ICE_PKG_TYPE_COMMS; 1706 else 1707 package_type = ICE_PKG_TYPE_UNKNOWN; 1708 1709 PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)", 1710 hw->active_pkg_ver.major, hw->active_pkg_ver.minor, 1711 hw->active_pkg_ver.update, hw->active_pkg_ver.draft, 1712 hw->active_pkg_name, 1713 ice_is_dvm_ena(hw) ? "double" : "single"); 1714 1715 return package_type; 1716 } 1717 1718 int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn) 1719 { 1720 struct ice_hw *hw = &adapter->hw; 1721 char pkg_file[ICE_MAX_PKG_FILENAME_SIZE]; 1722 char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE]; 1723 void *buf; 1724 size_t bufsz; 1725 int err; 1726 1727 if (!use_dsn) 1728 goto no_dsn; 1729 1730 memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE); 1731 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE, 1732 "ice-%016" PRIx64 ".pkg", dsn); 1733 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES, 1734 ICE_MAX_PKG_FILENAME_SIZE); 1735 strcat(pkg_file, opt_ddp_filename); 1736 if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0) 1737 goto load_fw; 1738 1739 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT, 1740 ICE_MAX_PKG_FILENAME_SIZE); 1741 strcat(pkg_file, opt_ddp_filename); 1742 if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0) 1743 goto load_fw; 1744 1745 no_dsn: 1746 strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE); 1747 if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0) 1748 goto load_fw; 1749 1750 strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE); 1751 if (rte_firmware_read(pkg_file, &buf, &bufsz) < 0) { 1752 PMD_INIT_LOG(ERR, "failed to search file path\n"); 1753 return -1; 1754 } 1755 1756 load_fw: 1757 PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_file); 1758 1759 err = ice_copy_and_init_pkg(hw, buf, bufsz); 1760 if (err) { 1761 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err); 1762 goto out; 1763 } 1764 1765 /* store the loaded pkg type info */ 1766 adapter->active_pkg_type = ice_load_pkg_type(hw); 1767 1768 out: 1769 free(buf); 1770 return err; 1771 } 1772 1773 static void 1774 ice_base_queue_get(struct ice_pf *pf) 1775 { 1776 uint32_t reg; 1777 struct ice_hw *hw = ICE_PF_TO_HW(pf); 1778 1779 reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC); 1780 if (reg & PFLAN_RX_QALLOC_VALID_M) { 1781 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M; 1782 } else { 1783 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue" 1784 " index"); 1785 } 1786 } 1787 1788 static int 1789 parse_bool(const char *key, const char *value, void *args) 1790 { 1791 int *i = (int *)args; 1792 char *end; 1793 int num; 1794 1795 num = strtoul(value, &end, 10); 1796 1797 if (num != 0 && num != 1) { 1798 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", " 1799 "value must be 0 or 1", 1800 value, key); 1801 return -1; 1802 } 1803 1804 *i = num; 1805 return 0; 1806 } 1807 1808 static int 1809 parse_u64(const char *key, const char *value, void *args) 1810 { 1811 u64 *num = (u64 *)args; 1812 u64 tmp; 1813 1814 errno = 0; 1815 tmp = strtoull(value, NULL, 16); 1816 if (errno) { 1817 PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u64", 1818 key, value); 1819 return -1; 1820 } 1821 1822 *num = tmp; 1823 1824 return 0; 1825 } 1826 1827 static int 1828 lookup_pps_type(const char *pps_name) 1829 { 1830 static struct { 1831 const char *name; 1832 enum pps_type type; 1833 } pps_type_map[] = { 1834 { "pin", PPS_PIN }, 1835 }; 1836 1837 uint32_t i; 1838 1839 for (i = 0; i < RTE_DIM(pps_type_map); i++) { 1840 if (strcmp(pps_name, pps_type_map[i].name) == 0) 1841 return pps_type_map[i].type; 1842 } 1843 1844 return -1; 1845 } 1846 1847 static int 1848 parse_pin_set(const char *input, int pps_type, struct ice_devargs *devargs) 1849 { 1850 const char *str = input; 1851 char *end = NULL; 1852 uint32_t idx; 1853 1854 while (isblank(*str)) 1855 str++; 1856 1857 if (!isdigit(*str)) 1858 return -1; 1859 1860 if (pps_type == PPS_PIN) { 1861 idx = strtoul(str, &end, 10); 1862 if (end == NULL || idx >= ICE_MAX_PIN_NUM) 1863 return -1; 1864 while (isblank(*end)) 1865 end++; 1866 if (*end != ']') 1867 return -1; 1868 1869 devargs->pin_idx = idx; 1870 devargs->pps_out_ena = 1; 1871 1872 return 0; 1873 } 1874 1875 return -1; 1876 } 1877 1878 static int 1879 parse_pps_out_parameter(const char *pins, struct ice_devargs *devargs) 1880 { 1881 const char *pin_start; 1882 uint32_t idx; 1883 int pps_type; 1884 char pps_name[32]; 1885 1886 while (isblank(*pins)) 1887 pins++; 1888 1889 pins++; 1890 while (isblank(*pins)) 1891 pins++; 1892 if (*pins == '\0') 1893 return -1; 1894 1895 for (idx = 0; ; idx++) { 1896 if (isblank(pins[idx]) || 1897 pins[idx] == ':' || 1898 pins[idx] == '\0') 1899 break; 1900 1901 pps_name[idx] = pins[idx]; 1902 } 1903 pps_name[idx] = '\0'; 1904 pps_type = lookup_pps_type(pps_name); 1905 if (pps_type < 0) 1906 return -1; 1907 1908 pins += idx; 1909 1910 pins += strcspn(pins, ":"); 1911 if (*pins++ != ':') 1912 return -1; 1913 while (isblank(*pins)) 1914 pins++; 1915 1916 pin_start = pins; 1917 1918 while (isblank(*pins)) 1919 pins++; 1920 1921 if (parse_pin_set(pin_start, pps_type, devargs) < 0) 1922 return -1; 1923 1924 return 0; 1925 } 1926 1927 static int 1928 handle_pps_out_arg(__rte_unused const char *key, const char *value, 1929 void *extra_args) 1930 { 1931 struct ice_devargs *devargs = extra_args; 1932 1933 if (value == NULL || extra_args == NULL) 1934 return -EINVAL; 1935 1936 if (parse_pps_out_parameter(value, devargs) < 0) { 1937 PMD_DRV_LOG(ERR, 1938 "The GPIO pin parameter is wrong : '%s'", 1939 value); 1940 return -1; 1941 } 1942 1943 return 0; 1944 } 1945 1946 static int ice_parse_devargs(struct rte_eth_dev *dev) 1947 { 1948 struct ice_adapter *ad = 1949 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1950 struct rte_devargs *devargs = dev->device->devargs; 1951 struct rte_kvargs *kvlist; 1952 int ret; 1953 1954 if (devargs == NULL) 1955 return 0; 1956 1957 kvlist = rte_kvargs_parse(devargs->args, ice_valid_args); 1958 if (kvlist == NULL) { 1959 PMD_INIT_LOG(ERR, "Invalid kvargs key\n"); 1960 return -EINVAL; 1961 } 1962 1963 ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE; 1964 memset(ad->devargs.proto_xtr, PROTO_XTR_NONE, 1965 sizeof(ad->devargs.proto_xtr)); 1966 1967 ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG, 1968 &handle_proto_xtr_arg, &ad->devargs); 1969 if (ret) 1970 goto bail; 1971 1972 ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG, 1973 &parse_bool, &ad->devargs.safe_mode_support); 1974 if (ret) 1975 goto bail; 1976 1977 ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG, 1978 &parse_bool, &ad->devargs.pipe_mode_support); 1979 if (ret) 1980 goto bail; 1981 1982 ret = rte_kvargs_process(kvlist, ICE_HW_DEBUG_MASK_ARG, 1983 &parse_u64, &ad->hw.debug_mask); 1984 if (ret) 1985 goto bail; 1986 1987 ret = rte_kvargs_process(kvlist, ICE_ONE_PPS_OUT_ARG, 1988 &handle_pps_out_arg, &ad->devargs); 1989 if (ret) 1990 goto bail; 1991 1992 ret = rte_kvargs_process(kvlist, ICE_RX_LOW_LATENCY_ARG, 1993 &parse_bool, &ad->devargs.rx_low_latency); 1994 1995 bail: 1996 rte_kvargs_free(kvlist); 1997 return ret; 1998 } 1999 2000 /* Forward LLDP packets to default VSI by set switch rules */ 2001 static int 2002 ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on) 2003 { 2004 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 2005 struct ice_fltr_list_entry *s_list_itr = NULL; 2006 struct LIST_HEAD_TYPE list_head; 2007 int ret = 0; 2008 2009 INIT_LIST_HEAD(&list_head); 2010 2011 s_list_itr = (struct ice_fltr_list_entry *) 2012 ice_malloc(hw, sizeof(*s_list_itr)); 2013 if (!s_list_itr) 2014 return -ENOMEM; 2015 s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 2016 s_list_itr->fltr_info.vsi_handle = vsi->idx; 2017 s_list_itr->fltr_info.l_data.ethertype_mac.ethertype = 2018 RTE_ETHER_TYPE_LLDP; 2019 s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; 2020 s_list_itr->fltr_info.flag = ICE_FLTR_RX; 2021 s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT; 2022 LIST_ADD(&s_list_itr->list_entry, &list_head); 2023 if (on) 2024 ret = ice_add_eth_mac(hw, &list_head); 2025 else 2026 ret = ice_remove_eth_mac(hw, &list_head); 2027 2028 rte_free(s_list_itr); 2029 return ret; 2030 } 2031 2032 static enum ice_status 2033 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type, 2034 uint16_t num, uint16_t desc_id, 2035 uint16_t *prof_buf, uint16_t *num_prof) 2036 { 2037 struct ice_aqc_res_elem *resp_buf; 2038 int ret; 2039 uint16_t buf_len; 2040 bool res_shared = 1; 2041 struct ice_aq_desc aq_desc; 2042 struct ice_sq_cd *cd = NULL; 2043 struct ice_aqc_get_allocd_res_desc *cmd = 2044 &aq_desc.params.get_res_desc; 2045 2046 buf_len = sizeof(*resp_buf) * num; 2047 resp_buf = ice_malloc(hw, buf_len); 2048 if (!resp_buf) 2049 return -ENOMEM; 2050 2051 ice_fill_dflt_direct_cmd_desc(&aq_desc, 2052 ice_aqc_opc_get_allocd_res_desc); 2053 2054 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) & 2055 ICE_AQC_RES_TYPE_M) | (res_shared ? 2056 ICE_AQC_RES_TYPE_FLAG_SHARED : 0)); 2057 cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id); 2058 2059 ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd); 2060 if (!ret) 2061 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc); 2062 else 2063 goto exit; 2064 2065 ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) * 2066 (*num_prof), ICE_NONDMA_TO_NONDMA); 2067 2068 exit: 2069 rte_free(resp_buf); 2070 return ret; 2071 } 2072 static int 2073 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type) 2074 { 2075 int ret; 2076 uint16_t prof_id; 2077 uint16_t prof_buf[ICE_MAX_RES_DESC_NUM]; 2078 uint16_t first_desc = 1; 2079 uint16_t num_prof = 0; 2080 2081 ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM, 2082 first_desc, prof_buf, &num_prof); 2083 if (ret) { 2084 PMD_INIT_LOG(ERR, "Failed to get fxp resource"); 2085 return ret; 2086 } 2087 2088 for (prof_id = 0; prof_id < num_prof; prof_id++) { 2089 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]); 2090 if (ret) { 2091 PMD_INIT_LOG(ERR, "Failed to free fxp resource"); 2092 return ret; 2093 } 2094 } 2095 return 0; 2096 } 2097 2098 static int 2099 ice_reset_fxp_resource(struct ice_hw *hw) 2100 { 2101 int ret; 2102 2103 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID); 2104 if (ret) { 2105 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource"); 2106 return ret; 2107 } 2108 2109 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID); 2110 if (ret) { 2111 PMD_INIT_LOG(ERR, "Failed to clearup rss resource"); 2112 return ret; 2113 } 2114 2115 return 0; 2116 } 2117 2118 static void 2119 ice_rss_ctx_init(struct ice_pf *pf) 2120 { 2121 memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx)); 2122 } 2123 2124 static uint64_t 2125 ice_get_supported_rxdid(struct ice_hw *hw) 2126 { 2127 uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */ 2128 uint32_t regval; 2129 int i; 2130 2131 supported_rxdid |= BIT(ICE_RXDID_LEGACY_1); 2132 2133 for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { 2134 regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0)); 2135 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) 2136 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) 2137 supported_rxdid |= BIT(i); 2138 } 2139 return supported_rxdid; 2140 } 2141 2142 static int 2143 ice_dev_init(struct rte_eth_dev *dev) 2144 { 2145 struct rte_pci_device *pci_dev; 2146 struct rte_intr_handle *intr_handle; 2147 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2148 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2149 struct ice_adapter *ad = 2150 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2151 struct ice_vsi *vsi; 2152 int ret; 2153 #ifndef RTE_EXEC_ENV_WINDOWS 2154 off_t pos; 2155 uint32_t dsn_low, dsn_high; 2156 uint64_t dsn; 2157 bool use_dsn; 2158 #endif 2159 2160 dev->dev_ops = &ice_eth_dev_ops; 2161 dev->rx_queue_count = ice_rx_queue_count; 2162 dev->rx_descriptor_status = ice_rx_descriptor_status; 2163 dev->tx_descriptor_status = ice_tx_descriptor_status; 2164 dev->rx_pkt_burst = ice_recv_pkts; 2165 dev->tx_pkt_burst = ice_xmit_pkts; 2166 dev->tx_pkt_prepare = ice_prep_pkts; 2167 2168 /* for secondary processes, we don't initialise any further as primary 2169 * has already done this work. 2170 */ 2171 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2172 ice_set_rx_function(dev); 2173 ice_set_tx_function(dev); 2174 return 0; 2175 } 2176 2177 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2178 2179 ice_set_default_ptype_table(dev); 2180 pci_dev = RTE_DEV_TO_PCI(dev->device); 2181 intr_handle = pci_dev->intr_handle; 2182 2183 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2184 pf->dev_data = dev->data; 2185 hw->back = pf->adapter; 2186 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr; 2187 hw->vendor_id = pci_dev->id.vendor_id; 2188 hw->device_id = pci_dev->id.device_id; 2189 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 2190 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 2191 hw->bus.device = pci_dev->addr.devid; 2192 hw->bus.func = pci_dev->addr.function; 2193 2194 ret = ice_parse_devargs(dev); 2195 if (ret) { 2196 PMD_INIT_LOG(ERR, "Failed to parse devargs"); 2197 return -EINVAL; 2198 } 2199 2200 ice_init_controlq_parameter(hw); 2201 2202 ret = ice_init_hw(hw); 2203 if (ret) { 2204 PMD_INIT_LOG(ERR, "Failed to initialize HW"); 2205 return -EINVAL; 2206 } 2207 2208 #ifndef RTE_EXEC_ENV_WINDOWS 2209 use_dsn = false; 2210 dsn = 0; 2211 pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN); 2212 if (pos) { 2213 if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0 || 2214 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) { 2215 PMD_INIT_LOG(ERR, "Failed to read pci config space\n"); 2216 } else { 2217 use_dsn = true; 2218 dsn = (uint64_t)dsn_high << 32 | dsn_low; 2219 } 2220 } else { 2221 PMD_INIT_LOG(ERR, "Failed to read device serial number\n"); 2222 } 2223 2224 ret = ice_load_pkg(pf->adapter, use_dsn, dsn); 2225 if (ret == 0) { 2226 ret = ice_init_hw_tbls(hw); 2227 if (ret) { 2228 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", ret); 2229 rte_free(hw->pkg_copy); 2230 } 2231 } 2232 2233 if (ret) { 2234 if (ad->devargs.safe_mode_support == 0) { 2235 PMD_INIT_LOG(ERR, "Failed to load the DDP package," 2236 "Use safe-mode-support=1 to enter Safe Mode"); 2237 goto err_init_fw; 2238 } 2239 2240 PMD_INIT_LOG(WARNING, "Failed to load the DDP package," 2241 "Entering Safe Mode"); 2242 ad->is_safe_mode = 1; 2243 } 2244 #endif 2245 2246 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d", 2247 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, 2248 hw->api_maj_ver, hw->api_min_ver); 2249 2250 ice_pf_sw_init(dev); 2251 ret = ice_init_mac_address(dev); 2252 if (ret) { 2253 PMD_INIT_LOG(ERR, "Failed to initialize mac address"); 2254 goto err_init_mac; 2255 } 2256 2257 ret = ice_res_pool_init(&pf->msix_pool, 1, 2258 hw->func_caps.common_cap.num_msix_vectors - 1); 2259 if (ret) { 2260 PMD_INIT_LOG(ERR, "Failed to init MSIX pool"); 2261 goto err_msix_pool_init; 2262 } 2263 2264 ret = ice_pf_setup(pf); 2265 if (ret) { 2266 PMD_INIT_LOG(ERR, "Failed to setup PF"); 2267 goto err_pf_setup; 2268 } 2269 2270 ret = ice_send_driver_ver(hw); 2271 if (ret) { 2272 PMD_INIT_LOG(ERR, "Failed to send driver version"); 2273 goto err_pf_setup; 2274 } 2275 2276 vsi = pf->main_vsi; 2277 2278 ret = ice_aq_stop_lldp(hw, true, false, NULL); 2279 if (ret != ICE_SUCCESS) 2280 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n"); 2281 ret = ice_init_dcb(hw, true); 2282 if (ret != ICE_SUCCESS) 2283 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n"); 2284 /* Forward LLDP packets to default VSI */ 2285 ret = ice_vsi_config_sw_lldp(vsi, true); 2286 if (ret != ICE_SUCCESS) 2287 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n"); 2288 /* register callback func to eal lib */ 2289 rte_intr_callback_register(intr_handle, 2290 ice_interrupt_handler, dev); 2291 2292 ice_pf_enable_irq0(hw); 2293 2294 /* enable uio intr after callback register */ 2295 rte_intr_enable(intr_handle); 2296 2297 /* get base queue pairs index in the device */ 2298 ice_base_queue_get(pf); 2299 2300 /* Initialize RSS context for gtpu_eh */ 2301 ice_rss_ctx_init(pf); 2302 2303 if (!ad->is_safe_mode) { 2304 ret = ice_flow_init(ad); 2305 if (ret) { 2306 PMD_INIT_LOG(ERR, "Failed to initialize flow"); 2307 goto err_flow_init; 2308 } 2309 } 2310 2311 ret = ice_reset_fxp_resource(hw); 2312 if (ret) { 2313 PMD_INIT_LOG(ERR, "Failed to reset fxp resource"); 2314 goto err_flow_init; 2315 } 2316 2317 pf->supported_rxdid = ice_get_supported_rxdid(hw); 2318 2319 return 0; 2320 2321 err_flow_init: 2322 ice_flow_uninit(ad); 2323 rte_intr_disable(intr_handle); 2324 ice_pf_disable_irq0(hw); 2325 rte_intr_callback_unregister(intr_handle, 2326 ice_interrupt_handler, dev); 2327 err_pf_setup: 2328 ice_res_pool_destroy(&pf->msix_pool); 2329 err_msix_pool_init: 2330 rte_free(dev->data->mac_addrs); 2331 dev->data->mac_addrs = NULL; 2332 err_init_mac: 2333 rte_free(pf->proto_xtr); 2334 #ifndef RTE_EXEC_ENV_WINDOWS 2335 err_init_fw: 2336 #endif 2337 ice_deinit_hw(hw); 2338 2339 return ret; 2340 } 2341 2342 int 2343 ice_release_vsi(struct ice_vsi *vsi) 2344 { 2345 struct ice_hw *hw; 2346 struct ice_vsi_ctx vsi_ctx; 2347 enum ice_status ret; 2348 int error = 0; 2349 2350 if (!vsi) 2351 return error; 2352 2353 hw = ICE_VSI_TO_HW(vsi); 2354 2355 ice_remove_all_mac_vlan_filters(vsi); 2356 2357 memset(&vsi_ctx, 0, sizeof(vsi_ctx)); 2358 2359 vsi_ctx.vsi_num = vsi->vsi_id; 2360 vsi_ctx.info = vsi->info; 2361 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL); 2362 if (ret != ICE_SUCCESS) { 2363 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id); 2364 error = -1; 2365 } 2366 2367 rte_free(vsi->rss_lut); 2368 rte_free(vsi->rss_key); 2369 rte_free(vsi); 2370 return error; 2371 } 2372 2373 void 2374 ice_vsi_disable_queues_intr(struct ice_vsi *vsi) 2375 { 2376 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id]; 2377 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 2378 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2379 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 2380 uint16_t msix_intr, i; 2381 2382 /* disable interrupt and also clear all the exist config */ 2383 for (i = 0; i < vsi->nb_qps; i++) { 2384 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0); 2385 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0); 2386 rte_wmb(); 2387 } 2388 2389 if (rte_intr_allow_others(intr_handle)) 2390 /* vfio-pci */ 2391 for (i = 0; i < vsi->nb_msix; i++) { 2392 msix_intr = vsi->msix_intr + i; 2393 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), 2394 GLINT_DYN_CTL_WB_ON_ITR_M); 2395 } 2396 else 2397 /* igb_uio */ 2398 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M); 2399 } 2400 2401 static int 2402 ice_dev_stop(struct rte_eth_dev *dev) 2403 { 2404 struct rte_eth_dev_data *data = dev->data; 2405 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2406 struct ice_vsi *main_vsi = pf->main_vsi; 2407 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 2408 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2409 uint16_t i; 2410 2411 /* avoid stopping again */ 2412 if (pf->adapter_stopped) 2413 return 0; 2414 2415 /* stop and clear all Rx queues */ 2416 for (i = 0; i < data->nb_rx_queues; i++) 2417 ice_rx_queue_stop(dev, i); 2418 2419 /* stop and clear all Tx queues */ 2420 for (i = 0; i < data->nb_tx_queues; i++) 2421 ice_tx_queue_stop(dev, i); 2422 2423 /* disable all queue interrupts */ 2424 ice_vsi_disable_queues_intr(main_vsi); 2425 2426 if (pf->init_link_up) 2427 ice_dev_set_link_up(dev); 2428 else 2429 ice_dev_set_link_down(dev); 2430 2431 /* Clean datapath event and queue/vec mapping */ 2432 rte_intr_efd_disable(intr_handle); 2433 rte_intr_vec_list_free(intr_handle); 2434 2435 pf->adapter_stopped = true; 2436 dev->data->dev_started = 0; 2437 2438 return 0; 2439 } 2440 2441 static int 2442 ice_dev_close(struct rte_eth_dev *dev) 2443 { 2444 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2445 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2446 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2447 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2448 struct ice_adapter *ad = 2449 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2450 int ret; 2451 uint32_t val; 2452 uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned; 2453 uint32_t pin_idx = ad->devargs.pin_idx; 2454 2455 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2456 return 0; 2457 2458 /* Since stop will make link down, then the link event will be 2459 * triggered, disable the irq firstly to avoid the port_infoe etc 2460 * resources deallocation causing the interrupt service thread 2461 * crash. 2462 */ 2463 ice_pf_disable_irq0(hw); 2464 2465 ret = ice_dev_stop(dev); 2466 2467 if (!ad->is_safe_mode) 2468 ice_flow_uninit(ad); 2469 2470 /* release all queue resource */ 2471 ice_free_queues(dev); 2472 2473 ice_res_pool_destroy(&pf->msix_pool); 2474 ice_release_vsi(pf->main_vsi); 2475 ice_sched_cleanup_all(hw); 2476 ice_free_hw_tbls(hw); 2477 rte_free(hw->port_info); 2478 hw->port_info = NULL; 2479 ice_shutdown_all_ctrlq(hw); 2480 rte_free(pf->proto_xtr); 2481 pf->proto_xtr = NULL; 2482 2483 if (ad->devargs.pps_out_ena) { 2484 ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0); 2485 ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0); 2486 ICE_WRITE_REG(hw, GLTSYN_TGT_L(pin_idx, timer), 0); 2487 ICE_WRITE_REG(hw, GLTSYN_TGT_H(pin_idx, timer), 0); 2488 2489 val = GLGEN_GPIO_CTL_PIN_DIR_M; 2490 ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(pin_idx), val); 2491 } 2492 2493 /* disable uio intr before callback unregister */ 2494 rte_intr_disable(intr_handle); 2495 2496 /* unregister callback func from eal lib */ 2497 rte_intr_callback_unregister(intr_handle, 2498 ice_interrupt_handler, dev); 2499 2500 return ret; 2501 } 2502 2503 static int 2504 ice_dev_uninit(struct rte_eth_dev *dev) 2505 { 2506 ice_dev_close(dev); 2507 2508 return 0; 2509 } 2510 2511 static bool 2512 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg) 2513 { 2514 return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false; 2515 } 2516 2517 static void 2518 hash_cfg_reset(struct ice_rss_hash_cfg *cfg) 2519 { 2520 cfg->hash_flds = 0; 2521 cfg->addl_hdrs = 0; 2522 cfg->symm = 0; 2523 cfg->hdr_type = ICE_RSS_OUTER_HEADERS; 2524 } 2525 2526 static int 2527 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2528 { 2529 enum ice_status status = ICE_SUCCESS; 2530 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2531 struct ice_vsi *vsi = pf->main_vsi; 2532 2533 if (!is_hash_cfg_valid(cfg)) 2534 return -ENOENT; 2535 2536 status = ice_rem_rss_cfg(hw, vsi->idx, cfg); 2537 if (status && status != ICE_ERR_DOES_NOT_EXIST) { 2538 PMD_DRV_LOG(ERR, 2539 "ice_rem_rss_cfg failed for VSI:%d, error:%d\n", 2540 vsi->idx, status); 2541 return -EBUSY; 2542 } 2543 2544 return 0; 2545 } 2546 2547 static int 2548 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2549 { 2550 enum ice_status status = ICE_SUCCESS; 2551 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2552 struct ice_vsi *vsi = pf->main_vsi; 2553 2554 if (!is_hash_cfg_valid(cfg)) 2555 return -ENOENT; 2556 2557 status = ice_add_rss_cfg(hw, vsi->idx, cfg); 2558 if (status) { 2559 PMD_DRV_LOG(ERR, 2560 "ice_add_rss_cfg failed for VSI:%d, error:%d\n", 2561 vsi->idx, status); 2562 return -EBUSY; 2563 } 2564 2565 return 0; 2566 } 2567 2568 static int 2569 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2570 { 2571 int ret; 2572 2573 ret = ice_hash_moveout(pf, cfg); 2574 if (ret && (ret != -ENOENT)) 2575 return ret; 2576 2577 hash_cfg_reset(cfg); 2578 2579 return 0; 2580 } 2581 2582 static int 2583 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx, 2584 u8 ctx_idx) 2585 { 2586 int ret; 2587 2588 switch (ctx_idx) { 2589 case ICE_HASH_GTPU_CTX_EH_IP: 2590 ret = ice_hash_remove(pf, 2591 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2592 if (ret && (ret != -ENOENT)) 2593 return ret; 2594 2595 ret = ice_hash_remove(pf, 2596 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2597 if (ret && (ret != -ENOENT)) 2598 return ret; 2599 2600 ret = ice_hash_remove(pf, 2601 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2602 if (ret && (ret != -ENOENT)) 2603 return ret; 2604 2605 ret = ice_hash_remove(pf, 2606 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2607 if (ret && (ret != -ENOENT)) 2608 return ret; 2609 2610 ret = ice_hash_remove(pf, 2611 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2612 if (ret && (ret != -ENOENT)) 2613 return ret; 2614 2615 ret = ice_hash_remove(pf, 2616 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2617 if (ret && (ret != -ENOENT)) 2618 return ret; 2619 2620 ret = ice_hash_remove(pf, 2621 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2622 if (ret && (ret != -ENOENT)) 2623 return ret; 2624 2625 ret = ice_hash_remove(pf, 2626 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2627 if (ret && (ret != -ENOENT)) 2628 return ret; 2629 2630 break; 2631 case ICE_HASH_GTPU_CTX_EH_IP_UDP: 2632 ret = ice_hash_remove(pf, 2633 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2634 if (ret && (ret != -ENOENT)) 2635 return ret; 2636 2637 ret = ice_hash_remove(pf, 2638 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2639 if (ret && (ret != -ENOENT)) 2640 return ret; 2641 2642 ret = ice_hash_moveout(pf, 2643 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2644 if (ret && (ret != -ENOENT)) 2645 return ret; 2646 2647 ret = ice_hash_moveout(pf, 2648 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2649 if (ret && (ret != -ENOENT)) 2650 return ret; 2651 2652 ret = ice_hash_moveout(pf, 2653 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2654 if (ret && (ret != -ENOENT)) 2655 return ret; 2656 2657 ret = ice_hash_moveout(pf, 2658 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2659 if (ret && (ret != -ENOENT)) 2660 return ret; 2661 2662 break; 2663 case ICE_HASH_GTPU_CTX_EH_IP_TCP: 2664 ret = ice_hash_remove(pf, 2665 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2666 if (ret && (ret != -ENOENT)) 2667 return ret; 2668 2669 ret = ice_hash_remove(pf, 2670 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2671 if (ret && (ret != -ENOENT)) 2672 return ret; 2673 2674 ret = ice_hash_moveout(pf, 2675 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2676 if (ret && (ret != -ENOENT)) 2677 return ret; 2678 2679 ret = ice_hash_moveout(pf, 2680 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2681 if (ret && (ret != -ENOENT)) 2682 return ret; 2683 2684 ret = ice_hash_moveout(pf, 2685 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2686 if (ret && (ret != -ENOENT)) 2687 return ret; 2688 2689 ret = ice_hash_moveout(pf, 2690 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2691 if (ret && (ret != -ENOENT)) 2692 return ret; 2693 2694 break; 2695 case ICE_HASH_GTPU_CTX_UP_IP: 2696 ret = ice_hash_remove(pf, 2697 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2698 if (ret && (ret != -ENOENT)) 2699 return ret; 2700 2701 ret = ice_hash_remove(pf, 2702 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2703 if (ret && (ret != -ENOENT)) 2704 return ret; 2705 2706 ret = ice_hash_moveout(pf, 2707 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2708 if (ret && (ret != -ENOENT)) 2709 return ret; 2710 2711 ret = ice_hash_moveout(pf, 2712 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2713 if (ret && (ret != -ENOENT)) 2714 return ret; 2715 2716 ret = ice_hash_moveout(pf, 2717 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2718 if (ret && (ret != -ENOENT)) 2719 return ret; 2720 2721 break; 2722 case ICE_HASH_GTPU_CTX_UP_IP_UDP: 2723 case ICE_HASH_GTPU_CTX_UP_IP_TCP: 2724 ret = ice_hash_moveout(pf, 2725 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2726 if (ret && (ret != -ENOENT)) 2727 return ret; 2728 2729 ret = ice_hash_moveout(pf, 2730 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2731 if (ret && (ret != -ENOENT)) 2732 return ret; 2733 2734 ret = ice_hash_moveout(pf, 2735 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2736 if (ret && (ret != -ENOENT)) 2737 return ret; 2738 2739 break; 2740 case ICE_HASH_GTPU_CTX_DW_IP: 2741 ret = ice_hash_remove(pf, 2742 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2743 if (ret && (ret != -ENOENT)) 2744 return ret; 2745 2746 ret = ice_hash_remove(pf, 2747 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2748 if (ret && (ret != -ENOENT)) 2749 return ret; 2750 2751 ret = ice_hash_moveout(pf, 2752 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2753 if (ret && (ret != -ENOENT)) 2754 return ret; 2755 2756 ret = ice_hash_moveout(pf, 2757 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2758 if (ret && (ret != -ENOENT)) 2759 return ret; 2760 2761 ret = ice_hash_moveout(pf, 2762 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2763 if (ret && (ret != -ENOENT)) 2764 return ret; 2765 2766 break; 2767 case ICE_HASH_GTPU_CTX_DW_IP_UDP: 2768 case ICE_HASH_GTPU_CTX_DW_IP_TCP: 2769 ret = ice_hash_moveout(pf, 2770 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2771 if (ret && (ret != -ENOENT)) 2772 return ret; 2773 2774 ret = ice_hash_moveout(pf, 2775 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2776 if (ret && (ret != -ENOENT)) 2777 return ret; 2778 2779 ret = ice_hash_moveout(pf, 2780 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2781 if (ret && (ret != -ENOENT)) 2782 return ret; 2783 2784 break; 2785 default: 2786 break; 2787 } 2788 2789 return 0; 2790 } 2791 2792 static u8 calc_gtpu_ctx_idx(uint32_t hdr) 2793 { 2794 u8 eh_idx, ip_idx; 2795 2796 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) 2797 eh_idx = 0; 2798 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP) 2799 eh_idx = 1; 2800 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN) 2801 eh_idx = 2; 2802 else 2803 return ICE_HASH_GTPU_CTX_MAX; 2804 2805 ip_idx = 0; 2806 if (hdr & ICE_FLOW_SEG_HDR_UDP) 2807 ip_idx = 1; 2808 else if (hdr & ICE_FLOW_SEG_HDR_TCP) 2809 ip_idx = 2; 2810 2811 if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)) 2812 return eh_idx * 3 + ip_idx; 2813 else 2814 return ICE_HASH_GTPU_CTX_MAX; 2815 } 2816 2817 static int 2818 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr) 2819 { 2820 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr); 2821 2822 if (hdr & ICE_FLOW_SEG_HDR_IPV4) 2823 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4, 2824 gtpu_ctx_idx); 2825 else if (hdr & ICE_FLOW_SEG_HDR_IPV6) 2826 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6, 2827 gtpu_ctx_idx); 2828 2829 return 0; 2830 } 2831 2832 static int 2833 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx, 2834 u8 ctx_idx, struct ice_rss_hash_cfg *cfg) 2835 { 2836 int ret; 2837 2838 if (ctx_idx < ICE_HASH_GTPU_CTX_MAX) 2839 ctx->ctx[ctx_idx] = *cfg; 2840 2841 switch (ctx_idx) { 2842 case ICE_HASH_GTPU_CTX_EH_IP: 2843 break; 2844 case ICE_HASH_GTPU_CTX_EH_IP_UDP: 2845 ret = ice_hash_moveback(pf, 2846 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2847 if (ret && (ret != -ENOENT)) 2848 return ret; 2849 2850 ret = ice_hash_moveback(pf, 2851 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); 2852 if (ret && (ret != -ENOENT)) 2853 return ret; 2854 2855 ret = ice_hash_moveback(pf, 2856 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2857 if (ret && (ret != -ENOENT)) 2858 return ret; 2859 2860 ret = ice_hash_moveback(pf, 2861 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); 2862 if (ret && (ret != -ENOENT)) 2863 return ret; 2864 2865 break; 2866 case ICE_HASH_GTPU_CTX_EH_IP_TCP: 2867 ret = ice_hash_moveback(pf, 2868 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); 2869 if (ret && (ret != -ENOENT)) 2870 return ret; 2871 2872 ret = ice_hash_moveback(pf, 2873 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); 2874 if (ret && (ret != -ENOENT)) 2875 return ret; 2876 2877 ret = ice_hash_moveback(pf, 2878 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); 2879 if (ret && (ret != -ENOENT)) 2880 return ret; 2881 2882 ret = ice_hash_moveback(pf, 2883 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); 2884 if (ret && (ret != -ENOENT)) 2885 return ret; 2886 2887 break; 2888 case ICE_HASH_GTPU_CTX_UP_IP: 2889 case ICE_HASH_GTPU_CTX_UP_IP_UDP: 2890 case ICE_HASH_GTPU_CTX_UP_IP_TCP: 2891 case ICE_HASH_GTPU_CTX_DW_IP: 2892 case ICE_HASH_GTPU_CTX_DW_IP_UDP: 2893 case ICE_HASH_GTPU_CTX_DW_IP_TCP: 2894 ret = ice_hash_moveback(pf, 2895 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); 2896 if (ret && (ret != -ENOENT)) 2897 return ret; 2898 2899 ret = ice_hash_moveback(pf, 2900 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); 2901 if (ret && (ret != -ENOENT)) 2902 return ret; 2903 2904 ret = ice_hash_moveback(pf, 2905 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); 2906 if (ret && (ret != -ENOENT)) 2907 return ret; 2908 2909 break; 2910 default: 2911 break; 2912 } 2913 2914 return 0; 2915 } 2916 2917 static int 2918 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) 2919 { 2920 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs); 2921 2922 if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) 2923 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4, 2924 gtpu_ctx_idx, cfg); 2925 else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) 2926 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6, 2927 gtpu_ctx_idx, cfg); 2928 2929 return 0; 2930 } 2931 2932 static void 2933 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr) 2934 { 2935 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr); 2936 2937 if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX) 2938 return; 2939 2940 if (hdr & ICE_FLOW_SEG_HDR_IPV4) 2941 hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]); 2942 else if (hdr & ICE_FLOW_SEG_HDR_IPV6) 2943 hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]); 2944 } 2945 2946 int 2947 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, 2948 struct ice_rss_hash_cfg *cfg) 2949 { 2950 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2951 int ret; 2952 2953 ret = ice_rem_rss_cfg(hw, vsi_id, cfg); 2954 if (ret && ret != ICE_ERR_DOES_NOT_EXIST) 2955 PMD_DRV_LOG(ERR, "remove rss cfg failed\n"); 2956 2957 ice_rem_rss_cfg_post(pf, cfg->addl_hdrs); 2958 2959 return 0; 2960 } 2961 2962 int 2963 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, 2964 struct ice_rss_hash_cfg *cfg) 2965 { 2966 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2967 int ret; 2968 2969 ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs); 2970 if (ret) 2971 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n"); 2972 2973 ret = ice_add_rss_cfg(hw, vsi_id, cfg); 2974 if (ret) 2975 PMD_DRV_LOG(ERR, "add rss cfg failed\n"); 2976 2977 ret = ice_add_rss_cfg_post(pf, cfg); 2978 if (ret) 2979 PMD_DRV_LOG(ERR, "add rss cfg post failed\n"); 2980 2981 return 0; 2982 } 2983 2984 static void 2985 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf) 2986 { 2987 struct ice_hw *hw = ICE_PF_TO_HW(pf); 2988 struct ice_vsi *vsi = pf->main_vsi; 2989 struct ice_rss_hash_cfg cfg; 2990 int ret; 2991 2992 #define ICE_RSS_HF_ALL ( \ 2993 RTE_ETH_RSS_IPV4 | \ 2994 RTE_ETH_RSS_IPV6 | \ 2995 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \ 2996 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ 2997 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ 2998 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ 2999 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \ 3000 RTE_ETH_RSS_NONFRAG_IPV6_SCTP) 3001 3002 ret = ice_rem_vsi_rss_cfg(hw, vsi->idx); 3003 if (ret) 3004 PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d", 3005 __func__, ret); 3006 3007 cfg.symm = 0; 3008 cfg.hdr_type = ICE_RSS_OUTER_HEADERS; 3009 /* Configure RSS for IPv4 with src/dst addr as input set */ 3010 if (rss_hf & RTE_ETH_RSS_IPV4) { 3011 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3012 cfg.hash_flds = ICE_FLOW_HASH_IPV4; 3013 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3014 if (ret) 3015 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", 3016 __func__, ret); 3017 } 3018 3019 /* Configure RSS for IPv6 with src/dst addr as input set */ 3020 if (rss_hf & RTE_ETH_RSS_IPV6) { 3021 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3022 cfg.hash_flds = ICE_FLOW_HASH_IPV6; 3023 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3024 if (ret) 3025 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", 3026 __func__, ret); 3027 } 3028 3029 /* Configure RSS for udp4 with src/dst addr and port as input set */ 3030 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) { 3031 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 | 3032 ICE_FLOW_SEG_HDR_IPV_OTHER; 3033 cfg.hash_flds = ICE_HASH_UDP_IPV4; 3034 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3035 if (ret) 3036 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", 3037 __func__, ret); 3038 } 3039 3040 /* Configure RSS for udp6 with src/dst addr and port as input set */ 3041 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) { 3042 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 | 3043 ICE_FLOW_SEG_HDR_IPV_OTHER; 3044 cfg.hash_flds = ICE_HASH_UDP_IPV6; 3045 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3046 if (ret) 3047 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", 3048 __func__, ret); 3049 } 3050 3051 /* Configure RSS for tcp4 with src/dst addr and port as input set */ 3052 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) { 3053 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 | 3054 ICE_FLOW_SEG_HDR_IPV_OTHER; 3055 cfg.hash_flds = ICE_HASH_TCP_IPV4; 3056 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3057 if (ret) 3058 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", 3059 __func__, ret); 3060 } 3061 3062 /* Configure RSS for tcp6 with src/dst addr and port as input set */ 3063 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) { 3064 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 | 3065 ICE_FLOW_SEG_HDR_IPV_OTHER; 3066 cfg.hash_flds = ICE_HASH_TCP_IPV6; 3067 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3068 if (ret) 3069 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", 3070 __func__, ret); 3071 } 3072 3073 /* Configure RSS for sctp4 with src/dst addr and port as input set */ 3074 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) { 3075 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 | 3076 ICE_FLOW_SEG_HDR_IPV_OTHER; 3077 cfg.hash_flds = ICE_HASH_SCTP_IPV4; 3078 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3079 if (ret) 3080 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d", 3081 __func__, ret); 3082 } 3083 3084 /* Configure RSS for sctp6 with src/dst addr and port as input set */ 3085 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) { 3086 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 | 3087 ICE_FLOW_SEG_HDR_IPV_OTHER; 3088 cfg.hash_flds = ICE_HASH_SCTP_IPV6; 3089 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3090 if (ret) 3091 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d", 3092 __func__, ret); 3093 } 3094 3095 if (rss_hf & RTE_ETH_RSS_IPV4) { 3096 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 | 3097 ICE_FLOW_SEG_HDR_IPV_OTHER; 3098 cfg.hash_flds = ICE_FLOW_HASH_IPV4; 3099 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3100 if (ret) 3101 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d", 3102 __func__, ret); 3103 } 3104 3105 if (rss_hf & RTE_ETH_RSS_IPV6) { 3106 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 | 3107 ICE_FLOW_SEG_HDR_IPV_OTHER; 3108 cfg.hash_flds = ICE_FLOW_HASH_IPV6; 3109 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3110 if (ret) 3111 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d", 3112 __func__, ret); 3113 } 3114 3115 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) { 3116 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP | 3117 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3118 cfg.hash_flds = ICE_HASH_UDP_IPV4; 3119 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3120 if (ret) 3121 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d", 3122 __func__, ret); 3123 } 3124 3125 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) { 3126 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP | 3127 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3128 cfg.hash_flds = ICE_HASH_UDP_IPV6; 3129 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3130 if (ret) 3131 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d", 3132 __func__, ret); 3133 } 3134 3135 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) { 3136 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP | 3137 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3138 cfg.hash_flds = ICE_HASH_TCP_IPV4; 3139 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3140 if (ret) 3141 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d", 3142 __func__, ret); 3143 } 3144 3145 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) { 3146 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP | 3147 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; 3148 cfg.hash_flds = ICE_HASH_TCP_IPV6; 3149 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg); 3150 if (ret) 3151 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d", 3152 __func__, ret); 3153 } 3154 3155 pf->rss_hf = rss_hf & ICE_RSS_HF_ALL; 3156 } 3157 3158 static void 3159 ice_get_default_rss_key(uint8_t *rss_key, uint32_t rss_key_size) 3160 { 3161 static struct ice_aqc_get_set_rss_keys default_key; 3162 static bool default_key_done; 3163 uint8_t *key = (uint8_t *)&default_key; 3164 size_t i; 3165 3166 if (rss_key_size > sizeof(default_key)) { 3167 PMD_DRV_LOG(WARNING, 3168 "requested size %u is larger than default %zu, " 3169 "only %zu bytes are gotten for key\n", 3170 rss_key_size, sizeof(default_key), 3171 sizeof(default_key)); 3172 } 3173 3174 if (!default_key_done) { 3175 /* Calculate the default hash key */ 3176 for (i = 0; i < sizeof(default_key); i++) 3177 key[i] = (uint8_t)rte_rand(); 3178 default_key_done = true; 3179 } 3180 rte_memcpy(rss_key, key, RTE_MIN(rss_key_size, sizeof(default_key))); 3181 } 3182 3183 static int ice_init_rss(struct ice_pf *pf) 3184 { 3185 struct ice_hw *hw = ICE_PF_TO_HW(pf); 3186 struct ice_vsi *vsi = pf->main_vsi; 3187 struct rte_eth_dev_data *dev_data = pf->dev_data; 3188 struct ice_aq_get_set_rss_lut_params lut_params; 3189 struct rte_eth_rss_conf *rss_conf; 3190 struct ice_aqc_get_set_rss_keys key; 3191 uint16_t i, nb_q; 3192 int ret = 0; 3193 bool is_safe_mode = pf->adapter->is_safe_mode; 3194 uint32_t reg; 3195 3196 rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf; 3197 nb_q = dev_data->nb_rx_queues; 3198 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE; 3199 vsi->rss_lut_size = pf->hash_lut_size; 3200 3201 if (nb_q == 0) { 3202 PMD_DRV_LOG(WARNING, 3203 "RSS is not supported as rx queues number is zero\n"); 3204 return 0; 3205 } 3206 3207 if (is_safe_mode) { 3208 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n"); 3209 return 0; 3210 } 3211 3212 if (!vsi->rss_key) { 3213 vsi->rss_key = rte_zmalloc(NULL, 3214 vsi->rss_key_size, 0); 3215 if (vsi->rss_key == NULL) { 3216 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key"); 3217 return -ENOMEM; 3218 } 3219 } 3220 if (!vsi->rss_lut) { 3221 vsi->rss_lut = rte_zmalloc(NULL, 3222 vsi->rss_lut_size, 0); 3223 if (vsi->rss_lut == NULL) { 3224 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key"); 3225 rte_free(vsi->rss_key); 3226 vsi->rss_key = NULL; 3227 return -ENOMEM; 3228 } 3229 } 3230 /* configure RSS key */ 3231 if (!rss_conf->rss_key) 3232 ice_get_default_rss_key(vsi->rss_key, vsi->rss_key_size); 3233 else 3234 rte_memcpy(vsi->rss_key, rss_conf->rss_key, 3235 RTE_MIN(rss_conf->rss_key_len, 3236 vsi->rss_key_size)); 3237 3238 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size); 3239 ret = ice_aq_set_rss_key(hw, vsi->idx, &key); 3240 if (ret) 3241 goto out; 3242 3243 /* init RSS LUT table */ 3244 for (i = 0; i < vsi->rss_lut_size; i++) 3245 vsi->rss_lut[i] = i % nb_q; 3246 3247 lut_params.vsi_handle = vsi->idx; 3248 lut_params.lut_size = vsi->rss_lut_size; 3249 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 3250 lut_params.lut = vsi->rss_lut; 3251 lut_params.global_lut_id = 0; 3252 ret = ice_aq_set_rss_lut(hw, &lut_params); 3253 if (ret) 3254 goto out; 3255 3256 /* Enable registers for symmetric_toeplitz function. */ 3257 reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id)); 3258 reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) | 3259 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); 3260 ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); 3261 3262 /* RSS hash configuration */ 3263 ice_rss_hash_set(pf, rss_conf->rss_hf); 3264 3265 return 0; 3266 out: 3267 rte_free(vsi->rss_key); 3268 vsi->rss_key = NULL; 3269 rte_free(vsi->rss_lut); 3270 vsi->rss_lut = NULL; 3271 return -EINVAL; 3272 } 3273 3274 static int 3275 ice_dev_configure(struct rte_eth_dev *dev) 3276 { 3277 struct ice_adapter *ad = 3278 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3279 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3280 int ret; 3281 3282 /* Initialize to TRUE. If any of Rx queues doesn't meet the 3283 * bulk allocation or vector Rx preconditions we will reset it. 3284 */ 3285 ad->rx_bulk_alloc_allowed = true; 3286 ad->tx_simple_allowed = true; 3287 3288 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 3289 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 3290 3291 if (dev->data->nb_rx_queues) { 3292 ret = ice_init_rss(pf); 3293 if (ret) { 3294 PMD_DRV_LOG(ERR, "Failed to enable rss for PF"); 3295 return ret; 3296 } 3297 } 3298 3299 return 0; 3300 } 3301 3302 static void 3303 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect, 3304 int base_queue, int nb_queue) 3305 { 3306 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 3307 uint32_t val, val_tx; 3308 int rx_low_latency, i; 3309 3310 rx_low_latency = vsi->adapter->devargs.rx_low_latency; 3311 for (i = 0; i < nb_queue; i++) { 3312 /*do actual bind*/ 3313 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) | 3314 (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M; 3315 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) | 3316 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M; 3317 3318 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d", 3319 base_queue + i, msix_vect); 3320 3321 /* set ITR0 value */ 3322 if (rx_low_latency) { 3323 /** 3324 * Empirical configuration for optimal real time 3325 * latency reduced interrupt throttling to 2us 3326 */ 3327 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x1); 3328 ICE_WRITE_REG(hw, QRX_ITR(base_queue + i), 3329 QRX_ITR_NO_EXPR_M); 3330 } else { 3331 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2); 3332 ICE_WRITE_REG(hw, QRX_ITR(base_queue + i), 0); 3333 } 3334 3335 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val); 3336 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx); 3337 } 3338 } 3339 3340 void 3341 ice_vsi_queues_bind_intr(struct ice_vsi *vsi) 3342 { 3343 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id]; 3344 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 3345 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3346 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 3347 uint16_t msix_vect = vsi->msix_intr; 3348 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, 3349 rte_intr_nb_efd_get(intr_handle)); 3350 uint16_t queue_idx = 0; 3351 int record = 0; 3352 int i; 3353 3354 /* clear Rx/Tx queue interrupt */ 3355 for (i = 0; i < vsi->nb_used_qps; i++) { 3356 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0); 3357 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0); 3358 } 3359 3360 /* PF bind interrupt */ 3361 if (rte_intr_dp_is_en(intr_handle)) { 3362 queue_idx = 0; 3363 record = 1; 3364 } 3365 3366 for (i = 0; i < vsi->nb_used_qps; i++) { 3367 if (nb_msix <= 1) { 3368 if (!rte_intr_allow_others(intr_handle)) 3369 msix_vect = ICE_MISC_VEC_ID; 3370 3371 /* uio mapping all queue to one msix_vect */ 3372 __vsi_queues_bind_intr(vsi, msix_vect, 3373 vsi->base_queue + i, 3374 vsi->nb_used_qps - i); 3375 3376 for (; !!record && i < vsi->nb_used_qps; i++) 3377 rte_intr_vec_list_index_set(intr_handle, 3378 queue_idx + i, msix_vect); 3379 3380 break; 3381 } 3382 3383 /* vfio 1:1 queue/msix_vect mapping */ 3384 __vsi_queues_bind_intr(vsi, msix_vect, 3385 vsi->base_queue + i, 1); 3386 3387 if (!!record) 3388 rte_intr_vec_list_index_set(intr_handle, 3389 queue_idx + i, 3390 msix_vect); 3391 3392 msix_vect++; 3393 nb_msix--; 3394 } 3395 } 3396 3397 void 3398 ice_vsi_enable_queues_intr(struct ice_vsi *vsi) 3399 { 3400 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id]; 3401 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 3402 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3403 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 3404 uint16_t msix_intr, i; 3405 3406 if (rte_intr_allow_others(intr_handle)) 3407 for (i = 0; i < vsi->nb_used_qps; i++) { 3408 msix_intr = vsi->msix_intr + i; 3409 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), 3410 GLINT_DYN_CTL_INTENA_M | 3411 GLINT_DYN_CTL_CLEARPBA_M | 3412 GLINT_DYN_CTL_ITR_INDX_M | 3413 GLINT_DYN_CTL_WB_ON_ITR_M); 3414 } 3415 else 3416 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), 3417 GLINT_DYN_CTL_INTENA_M | 3418 GLINT_DYN_CTL_CLEARPBA_M | 3419 GLINT_DYN_CTL_ITR_INDX_M | 3420 GLINT_DYN_CTL_WB_ON_ITR_M); 3421 } 3422 3423 static int 3424 ice_rxq_intr_setup(struct rte_eth_dev *dev) 3425 { 3426 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3427 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 3428 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3429 struct ice_vsi *vsi = pf->main_vsi; 3430 uint32_t intr_vector = 0; 3431 3432 rte_intr_disable(intr_handle); 3433 3434 /* check and configure queue intr-vector mapping */ 3435 if ((rte_intr_cap_multiple(intr_handle) || 3436 !RTE_ETH_DEV_SRIOV(dev).active) && 3437 dev->data->dev_conf.intr_conf.rxq != 0) { 3438 intr_vector = dev->data->nb_rx_queues; 3439 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) { 3440 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 3441 ICE_MAX_INTR_QUEUE_NUM); 3442 return -ENOTSUP; 3443 } 3444 if (rte_intr_efd_enable(intr_handle, intr_vector)) 3445 return -1; 3446 } 3447 3448 if (rte_intr_dp_is_en(intr_handle)) { 3449 if (rte_intr_vec_list_alloc(intr_handle, NULL, 3450 dev->data->nb_rx_queues)) { 3451 PMD_DRV_LOG(ERR, 3452 "Failed to allocate %d rx_queues intr_vec", 3453 dev->data->nb_rx_queues); 3454 return -ENOMEM; 3455 } 3456 } 3457 3458 /* Map queues with MSIX interrupt */ 3459 vsi->nb_used_qps = dev->data->nb_rx_queues; 3460 ice_vsi_queues_bind_intr(vsi); 3461 3462 /* Enable interrupts for all the queues */ 3463 ice_vsi_enable_queues_intr(vsi); 3464 3465 rte_intr_enable(intr_handle); 3466 3467 return 0; 3468 } 3469 3470 static void 3471 ice_get_init_link_status(struct rte_eth_dev *dev) 3472 { 3473 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3474 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3475 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; 3476 struct ice_link_status link_status; 3477 int ret; 3478 3479 ret = ice_aq_get_link_info(hw->port_info, enable_lse, 3480 &link_status, NULL); 3481 if (ret != ICE_SUCCESS) { 3482 PMD_DRV_LOG(ERR, "Failed to get link info"); 3483 pf->init_link_up = false; 3484 return; 3485 } 3486 3487 if (link_status.link_info & ICE_AQ_LINK_UP) 3488 pf->init_link_up = true; 3489 } 3490 3491 static int 3492 ice_pps_out_cfg(struct ice_hw *hw, int idx, int timer) 3493 { 3494 uint64_t current_time, start_time; 3495 uint32_t hi, lo, lo2, func, val; 3496 3497 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer)); 3498 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer)); 3499 lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(timer)); 3500 3501 if (lo2 < lo) { 3502 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer)); 3503 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer)); 3504 } 3505 3506 current_time = ((uint64_t)hi << 32) | lo; 3507 3508 start_time = (current_time + NSEC_PER_SEC) / 3509 NSEC_PER_SEC * NSEC_PER_SEC; 3510 start_time = start_time - PPS_OUT_DELAY_NS; 3511 3512 func = 8 + idx + timer * 4; 3513 val = GLGEN_GPIO_CTL_PIN_DIR_M | 3514 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & 3515 GLGEN_GPIO_CTL_PIN_FUNC_M); 3516 3517 /* Write clkout with half of period value */ 3518 ICE_WRITE_REG(hw, GLTSYN_CLKO(idx, timer), NSEC_PER_SEC / 2); 3519 3520 /* Write TARGET time register */ 3521 ICE_WRITE_REG(hw, GLTSYN_TGT_L(idx, timer), start_time & 0xffffffff); 3522 ICE_WRITE_REG(hw, GLTSYN_TGT_H(idx, timer), start_time >> 32); 3523 3524 /* Write AUX_OUT register */ 3525 ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(idx, timer), 3526 GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M); 3527 3528 /* Write GPIO CTL register */ 3529 ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(idx), val); 3530 3531 return 0; 3532 } 3533 3534 static int 3535 ice_dev_start(struct rte_eth_dev *dev) 3536 { 3537 struct rte_eth_dev_data *data = dev->data; 3538 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3539 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3540 struct ice_vsi *vsi = pf->main_vsi; 3541 struct ice_adapter *ad = 3542 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3543 uint16_t nb_rxq = 0; 3544 uint16_t nb_txq, i; 3545 uint16_t max_frame_size; 3546 int mask, ret; 3547 uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned; 3548 uint32_t pin_idx = ad->devargs.pin_idx; 3549 3550 /* program Tx queues' context in hardware */ 3551 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) { 3552 ret = ice_tx_queue_start(dev, nb_txq); 3553 if (ret) { 3554 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq); 3555 goto tx_err; 3556 } 3557 } 3558 3559 /* program Rx queues' context in hardware*/ 3560 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) { 3561 ret = ice_rx_queue_start(dev, nb_rxq); 3562 if (ret) { 3563 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq); 3564 goto rx_err; 3565 } 3566 } 3567 3568 ice_set_rx_function(dev); 3569 ice_set_tx_function(dev); 3570 3571 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 3572 RTE_ETH_VLAN_EXTEND_MASK; 3573 ret = ice_vlan_offload_set(dev, mask); 3574 if (ret) { 3575 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 3576 goto rx_err; 3577 } 3578 3579 /* enable Rx interrput and mapping Rx queue to interrupt vector */ 3580 if (ice_rxq_intr_setup(dev)) 3581 return -EIO; 3582 3583 /* Enable receiving broadcast packets and transmitting packets */ 3584 ret = ice_set_vsi_promisc(hw, vsi->idx, 3585 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX | 3586 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX, 3587 0); 3588 if (ret != ICE_SUCCESS) 3589 PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); 3590 3591 ret = ice_aq_set_event_mask(hw, hw->port_info->lport, 3592 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT | 3593 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM | 3594 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS | 3595 ICE_AQ_LINK_EVENT_SIGNAL_DETECT | 3596 ICE_AQ_LINK_EVENT_AN_COMPLETED | 3597 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)), 3598 NULL); 3599 if (ret != ICE_SUCCESS) 3600 PMD_DRV_LOG(WARNING, "Fail to set phy mask"); 3601 3602 ice_get_init_link_status(dev); 3603 3604 ice_dev_set_link_up(dev); 3605 3606 /* Call get_link_info aq commond to enable/disable LSE */ 3607 ice_link_update(dev, 0); 3608 3609 pf->adapter_stopped = false; 3610 3611 /* Set the max frame size to default value*/ 3612 max_frame_size = pf->dev_data->mtu ? 3613 pf->dev_data->mtu + ICE_ETH_OVERHEAD : 3614 ICE_FRAME_SIZE_MAX; 3615 3616 /* Set the max frame size to HW*/ 3617 ice_aq_set_mac_cfg(hw, max_frame_size, NULL); 3618 3619 if (ad->devargs.pps_out_ena) { 3620 ret = ice_pps_out_cfg(hw, pin_idx, timer); 3621 if (ret) { 3622 PMD_DRV_LOG(ERR, "Fail to configure 1pps out"); 3623 goto rx_err; 3624 } 3625 } 3626 3627 return 0; 3628 3629 /* stop the started queues if failed to start all queues */ 3630 rx_err: 3631 for (i = 0; i < nb_rxq; i++) 3632 ice_rx_queue_stop(dev, i); 3633 tx_err: 3634 for (i = 0; i < nb_txq; i++) 3635 ice_tx_queue_stop(dev, i); 3636 3637 return -EIO; 3638 } 3639 3640 static int 3641 ice_dev_reset(struct rte_eth_dev *dev) 3642 { 3643 int ret; 3644 3645 if (dev->data->sriov.active) 3646 return -ENOTSUP; 3647 3648 ret = ice_dev_uninit(dev); 3649 if (ret) { 3650 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret); 3651 return -ENXIO; 3652 } 3653 3654 ret = ice_dev_init(dev); 3655 if (ret) { 3656 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret); 3657 return -ENXIO; 3658 } 3659 3660 return 0; 3661 } 3662 3663 static int 3664 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3665 { 3666 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 3667 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3668 struct ice_vsi *vsi = pf->main_vsi; 3669 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); 3670 bool is_safe_mode = pf->adapter->is_safe_mode; 3671 u64 phy_type_low; 3672 u64 phy_type_high; 3673 3674 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN; 3675 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX; 3676 dev_info->max_rx_queues = vsi->nb_qps; 3677 dev_info->max_tx_queues = vsi->nb_qps; 3678 dev_info->max_mac_addrs = vsi->max_macaddrs; 3679 dev_info->max_vfs = pci_dev->max_vfs; 3680 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD; 3681 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3682 3683 dev_info->rx_offload_capa = 3684 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 3685 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 3686 RTE_ETH_RX_OFFLOAD_SCATTER | 3687 RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3688 dev_info->tx_offload_capa = 3689 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 3690 RTE_ETH_TX_OFFLOAD_TCP_TSO | 3691 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 3692 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 3693 dev_info->flow_type_rss_offloads = 0; 3694 3695 if (!is_safe_mode) { 3696 dev_info->rx_offload_capa |= 3697 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 3698 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 3699 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 3700 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 3701 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 3702 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 3703 RTE_ETH_RX_OFFLOAD_RSS_HASH | 3704 RTE_ETH_RX_OFFLOAD_TIMESTAMP; 3705 dev_info->tx_offload_capa |= 3706 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | 3707 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 3708 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 3709 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 3710 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | 3711 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 3712 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; 3713 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; 3714 } 3715 3716 dev_info->rx_queue_offload_capa = 0; 3717 dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 3718 3719 dev_info->reta_size = pf->hash_lut_size; 3720 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); 3721 3722 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3723 .rx_thresh = { 3724 .pthresh = ICE_DEFAULT_RX_PTHRESH, 3725 .hthresh = ICE_DEFAULT_RX_HTHRESH, 3726 .wthresh = ICE_DEFAULT_RX_WTHRESH, 3727 }, 3728 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH, 3729 .rx_drop_en = 0, 3730 .offloads = 0, 3731 }; 3732 3733 dev_info->default_txconf = (struct rte_eth_txconf) { 3734 .tx_thresh = { 3735 .pthresh = ICE_DEFAULT_TX_PTHRESH, 3736 .hthresh = ICE_DEFAULT_TX_HTHRESH, 3737 .wthresh = ICE_DEFAULT_TX_WTHRESH, 3738 }, 3739 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH, 3740 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH, 3741 .offloads = 0, 3742 }; 3743 3744 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 3745 .nb_max = ICE_MAX_RING_DESC, 3746 .nb_min = ICE_MIN_RING_DESC, 3747 .nb_align = ICE_ALIGN_RING_DESC, 3748 }; 3749 3750 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 3751 .nb_max = ICE_MAX_RING_DESC, 3752 .nb_min = ICE_MIN_RING_DESC, 3753 .nb_align = ICE_ALIGN_RING_DESC, 3754 }; 3755 3756 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M | 3757 RTE_ETH_LINK_SPEED_100M | 3758 RTE_ETH_LINK_SPEED_1G | 3759 RTE_ETH_LINK_SPEED_2_5G | 3760 RTE_ETH_LINK_SPEED_5G | 3761 RTE_ETH_LINK_SPEED_10G | 3762 RTE_ETH_LINK_SPEED_20G | 3763 RTE_ETH_LINK_SPEED_25G; 3764 3765 phy_type_low = hw->port_info->phy.phy_type_low; 3766 phy_type_high = hw->port_info->phy.phy_type_high; 3767 3768 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low)) 3769 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G; 3770 3771 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) || 3772 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high)) 3773 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G; 3774 3775 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3776 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3777 3778 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST; 3779 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST; 3780 dev_info->default_rxportconf.nb_queues = 1; 3781 dev_info->default_txportconf.nb_queues = 1; 3782 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN; 3783 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN; 3784 3785 return 0; 3786 } 3787 3788 static inline int 3789 ice_atomic_read_link_status(struct rte_eth_dev *dev, 3790 struct rte_eth_link *link) 3791 { 3792 struct rte_eth_link *dst = link; 3793 struct rte_eth_link *src = &dev->data->dev_link; 3794 3795 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 3796 *(uint64_t *)src) == 0) 3797 return -1; 3798 3799 return 0; 3800 } 3801 3802 static inline int 3803 ice_atomic_write_link_status(struct rte_eth_dev *dev, 3804 struct rte_eth_link *link) 3805 { 3806 struct rte_eth_link *dst = &dev->data->dev_link; 3807 struct rte_eth_link *src = link; 3808 3809 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 3810 *(uint64_t *)src) == 0) 3811 return -1; 3812 3813 return 0; 3814 } 3815 3816 static int 3817 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) 3818 { 3819 #define CHECK_INTERVAL 100 /* 100ms */ 3820 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ 3821 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3822 struct ice_link_status link_status; 3823 struct rte_eth_link link, old; 3824 int status; 3825 unsigned int rep_cnt = MAX_REPEAT_TIME; 3826 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; 3827 3828 memset(&link, 0, sizeof(link)); 3829 memset(&old, 0, sizeof(old)); 3830 memset(&link_status, 0, sizeof(link_status)); 3831 ice_atomic_read_link_status(dev, &old); 3832 3833 do { 3834 /* Get link status information from hardware */ 3835 status = ice_aq_get_link_info(hw->port_info, enable_lse, 3836 &link_status, NULL); 3837 if (status != ICE_SUCCESS) { 3838 link.link_speed = RTE_ETH_SPEED_NUM_100M; 3839 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 3840 PMD_DRV_LOG(ERR, "Failed to get link info"); 3841 goto out; 3842 } 3843 3844 link.link_status = link_status.link_info & ICE_AQ_LINK_UP; 3845 if (!wait_to_complete || link.link_status) 3846 break; 3847 3848 rte_delay_ms(CHECK_INTERVAL); 3849 } while (--rep_cnt); 3850 3851 if (!link.link_status) 3852 goto out; 3853 3854 /* Full-duplex operation at all supported speeds */ 3855 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 3856 3857 /* Parse the link status */ 3858 switch (link_status.link_speed) { 3859 case ICE_AQ_LINK_SPEED_10MB: 3860 link.link_speed = RTE_ETH_SPEED_NUM_10M; 3861 break; 3862 case ICE_AQ_LINK_SPEED_100MB: 3863 link.link_speed = RTE_ETH_SPEED_NUM_100M; 3864 break; 3865 case ICE_AQ_LINK_SPEED_1000MB: 3866 link.link_speed = RTE_ETH_SPEED_NUM_1G; 3867 break; 3868 case ICE_AQ_LINK_SPEED_2500MB: 3869 link.link_speed = RTE_ETH_SPEED_NUM_2_5G; 3870 break; 3871 case ICE_AQ_LINK_SPEED_5GB: 3872 link.link_speed = RTE_ETH_SPEED_NUM_5G; 3873 break; 3874 case ICE_AQ_LINK_SPEED_10GB: 3875 link.link_speed = RTE_ETH_SPEED_NUM_10G; 3876 break; 3877 case ICE_AQ_LINK_SPEED_20GB: 3878 link.link_speed = RTE_ETH_SPEED_NUM_20G; 3879 break; 3880 case ICE_AQ_LINK_SPEED_25GB: 3881 link.link_speed = RTE_ETH_SPEED_NUM_25G; 3882 break; 3883 case ICE_AQ_LINK_SPEED_40GB: 3884 link.link_speed = RTE_ETH_SPEED_NUM_40G; 3885 break; 3886 case ICE_AQ_LINK_SPEED_50GB: 3887 link.link_speed = RTE_ETH_SPEED_NUM_50G; 3888 break; 3889 case ICE_AQ_LINK_SPEED_100GB: 3890 link.link_speed = RTE_ETH_SPEED_NUM_100G; 3891 break; 3892 case ICE_AQ_LINK_SPEED_UNKNOWN: 3893 PMD_DRV_LOG(ERR, "Unknown link speed"); 3894 link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 3895 break; 3896 default: 3897 PMD_DRV_LOG(ERR, "None link speed"); 3898 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 3899 break; 3900 } 3901 3902 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 3903 RTE_ETH_LINK_SPEED_FIXED); 3904 3905 out: 3906 ice_atomic_write_link_status(dev, &link); 3907 if (link.link_status == old.link_status) 3908 return -1; 3909 3910 return 0; 3911 } 3912 3913 /* Force the physical link state by getting the current PHY capabilities from 3914 * hardware and setting the PHY config based on the determined capabilities. If 3915 * link changes, link event will be triggered because both the Enable Automatic 3916 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 3917 */ 3918 static enum ice_status 3919 ice_force_phys_link_state(struct ice_hw *hw, bool link_up) 3920 { 3921 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3922 struct ice_aqc_get_phy_caps_data *pcaps; 3923 struct ice_port_info *pi; 3924 enum ice_status status; 3925 3926 if (!hw || !hw->port_info) 3927 return ICE_ERR_PARAM; 3928 3929 pi = hw->port_info; 3930 3931 pcaps = (struct ice_aqc_get_phy_caps_data *) 3932 ice_malloc(hw, sizeof(*pcaps)); 3933 if (!pcaps) 3934 return ICE_ERR_NO_MEMORY; 3935 3936 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3937 pcaps, NULL); 3938 if (status) 3939 goto out; 3940 3941 /* No change in link */ 3942 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 3943 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 3944 goto out; 3945 3946 cfg.phy_type_low = pcaps->phy_type_low; 3947 cfg.phy_type_high = pcaps->phy_type_high; 3948 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3949 cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an; 3950 cfg.eee_cap = pcaps->eee_cap; 3951 cfg.eeer_value = pcaps->eeer_value; 3952 cfg.link_fec_opt = pcaps->link_fec_options; 3953 if (link_up) 3954 cfg.caps |= ICE_AQ_PHY_ENA_LINK; 3955 else 3956 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK; 3957 3958 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3959 3960 out: 3961 ice_free(hw, pcaps); 3962 return status; 3963 } 3964 3965 static int 3966 ice_dev_set_link_up(struct rte_eth_dev *dev) 3967 { 3968 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3969 3970 return ice_force_phys_link_state(hw, true); 3971 } 3972 3973 static int 3974 ice_dev_set_link_down(struct rte_eth_dev *dev) 3975 { 3976 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3977 3978 return ice_force_phys_link_state(hw, false); 3979 } 3980 3981 static int 3982 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused) 3983 { 3984 /* mtu setting is forbidden if port is start */ 3985 if (dev->data->dev_started != 0) { 3986 PMD_DRV_LOG(ERR, 3987 "port %d must be stopped before configuration", 3988 dev->data->port_id); 3989 return -EBUSY; 3990 } 3991 3992 return 0; 3993 } 3994 3995 static int ice_macaddr_set(struct rte_eth_dev *dev, 3996 struct rte_ether_addr *mac_addr) 3997 { 3998 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3999 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4000 struct ice_vsi *vsi = pf->main_vsi; 4001 struct ice_mac_filter *f; 4002 uint8_t flags = 0; 4003 int ret; 4004 4005 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 4006 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); 4007 return -EINVAL; 4008 } 4009 4010 TAILQ_FOREACH(f, &vsi->mac_list, next) { 4011 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr)) 4012 break; 4013 } 4014 4015 if (!f) { 4016 PMD_DRV_LOG(ERR, "Failed to find filter for default mac"); 4017 return -EIO; 4018 } 4019 4020 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr); 4021 if (ret != ICE_SUCCESS) { 4022 PMD_DRV_LOG(ERR, "Failed to delete mac filter"); 4023 return -EIO; 4024 } 4025 ret = ice_add_mac_filter(vsi, mac_addr); 4026 if (ret != ICE_SUCCESS) { 4027 PMD_DRV_LOG(ERR, "Failed to add mac filter"); 4028 return -EIO; 4029 } 4030 rte_ether_addr_copy(mac_addr, &pf->dev_addr); 4031 4032 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 4033 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL); 4034 if (ret != ICE_SUCCESS) 4035 PMD_DRV_LOG(ERR, "Failed to set manage mac"); 4036 4037 return 0; 4038 } 4039 4040 /* Add a MAC address, and update filters */ 4041 static int 4042 ice_macaddr_add(struct rte_eth_dev *dev, 4043 struct rte_ether_addr *mac_addr, 4044 __rte_unused uint32_t index, 4045 __rte_unused uint32_t pool) 4046 { 4047 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4048 struct ice_vsi *vsi = pf->main_vsi; 4049 int ret; 4050 4051 ret = ice_add_mac_filter(vsi, mac_addr); 4052 if (ret != ICE_SUCCESS) { 4053 PMD_DRV_LOG(ERR, "Failed to add MAC filter"); 4054 return -EINVAL; 4055 } 4056 4057 return ICE_SUCCESS; 4058 } 4059 4060 /* Remove a MAC address, and update filters */ 4061 static void 4062 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) 4063 { 4064 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4065 struct ice_vsi *vsi = pf->main_vsi; 4066 struct rte_eth_dev_data *data = dev->data; 4067 struct rte_ether_addr *macaddr; 4068 int ret; 4069 4070 macaddr = &data->mac_addrs[index]; 4071 ret = ice_remove_mac_filter(vsi, macaddr); 4072 if (ret) { 4073 PMD_DRV_LOG(ERR, "Failed to remove MAC filter"); 4074 return; 4075 } 4076 } 4077 4078 static int 4079 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 4080 { 4081 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4082 struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id); 4083 struct ice_vsi *vsi = pf->main_vsi; 4084 int ret; 4085 4086 PMD_INIT_FUNC_TRACE(); 4087 4088 /** 4089 * Vlan 0 is the generic filter for untagged packets 4090 * and can't be removed or added by user. 4091 */ 4092 if (vlan_id == 0) 4093 return 0; 4094 4095 if (on) { 4096 ret = ice_add_vlan_filter(vsi, &vlan); 4097 if (ret < 0) { 4098 PMD_DRV_LOG(ERR, "Failed to add vlan filter"); 4099 return -EINVAL; 4100 } 4101 } else { 4102 ret = ice_remove_vlan_filter(vsi, &vlan); 4103 if (ret < 0) { 4104 PMD_DRV_LOG(ERR, "Failed to remove vlan filter"); 4105 return -EINVAL; 4106 } 4107 } 4108 4109 return 0; 4110 } 4111 4112 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are 4113 * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) 4114 * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via 4115 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID. 4116 * 4117 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic 4118 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged 4119 * traffic in SVM, since the VLAN TPID isn't part of filtering. 4120 * 4121 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be 4122 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is 4123 * part of filtering. 4124 */ 4125 static int 4126 ice_vsi_add_vlan_zero(struct ice_vsi *vsi) 4127 { 4128 struct ice_vlan vlan; 4129 int err; 4130 4131 vlan = ICE_VLAN(0, 0); 4132 err = ice_add_vlan_filter(vsi, &vlan); 4133 if (err) { 4134 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0"); 4135 return err; 4136 } 4137 4138 /* in SVM both VLAN 0 filters are identical */ 4139 if (!ice_is_dvm_ena(&vsi->adapter->hw)) 4140 return 0; 4141 4142 vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0); 4143 err = ice_add_vlan_filter(vsi, &vlan); 4144 if (err) { 4145 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode"); 4146 return err; 4147 } 4148 4149 return 0; 4150 } 4151 4152 /* 4153 * Delete the VLAN 0 filters in the same manner that they were added in 4154 * ice_vsi_add_vlan_zero. 4155 */ 4156 static int 4157 ice_vsi_del_vlan_zero(struct ice_vsi *vsi) 4158 { 4159 struct ice_vlan vlan; 4160 int err; 4161 4162 vlan = ICE_VLAN(0, 0); 4163 err = ice_remove_vlan_filter(vsi, &vlan); 4164 if (err) { 4165 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0"); 4166 return err; 4167 } 4168 4169 /* in SVM both VLAN 0 filters are identical */ 4170 if (!ice_is_dvm_ena(&vsi->adapter->hw)) 4171 return 0; 4172 4173 vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0); 4174 err = ice_remove_vlan_filter(vsi, &vlan); 4175 if (err) { 4176 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode"); 4177 return err; 4178 } 4179 4180 return 0; 4181 } 4182 4183 /* Configure vlan filter on or off */ 4184 static int 4185 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on) 4186 { 4187 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4188 struct ice_vsi_ctx ctxt; 4189 uint8_t sw_flags2; 4190 int ret = 0; 4191 4192 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 4193 4194 if (on) 4195 vsi->info.sw_flags2 |= sw_flags2; 4196 else 4197 vsi->info.sw_flags2 &= ~sw_flags2; 4198 4199 vsi->info.sw_id = hw->port_info->sw_id; 4200 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4201 ctxt.info.valid_sections = 4202 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID | 4203 ICE_AQ_VSI_PROP_SECURITY_VALID); 4204 ctxt.vsi_num = vsi->vsi_id; 4205 4206 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4207 if (ret) { 4208 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning", 4209 on ? "enable" : "disable"); 4210 return -EINVAL; 4211 } else { 4212 vsi->info.valid_sections |= 4213 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID | 4214 ICE_AQ_VSI_PROP_SECURITY_VALID); 4215 } 4216 4217 /* consist with other drivers, allow untagged packet when vlan filter on */ 4218 if (on) 4219 ret = ice_vsi_add_vlan_zero(vsi); 4220 else 4221 ret = ice_vsi_del_vlan_zero(vsi); 4222 4223 return 0; 4224 } 4225 4226 /* Manage VLAN stripping for the VSI for Rx */ 4227 static int 4228 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) 4229 { 4230 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4231 struct ice_vsi_ctx ctxt; 4232 enum ice_status status; 4233 int err = 0; 4234 4235 /* do not allow modifying VLAN stripping when a port VLAN is configured 4236 * on this VSI 4237 */ 4238 if (vsi->info.port_based_inner_vlan) 4239 return 0; 4240 4241 memset(&ctxt, 0, sizeof(ctxt)); 4242 4243 if (ena) 4244 /* Strip VLAN tag from Rx packet and put it in the desc */ 4245 ctxt.info.inner_vlan_flags = 4246 ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH; 4247 else 4248 /* Disable stripping. Leave tag in packet */ 4249 ctxt.info.inner_vlan_flags = 4250 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 4251 4252 /* Allow all packets untagged/tagged */ 4253 ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; 4254 4255 ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); 4256 4257 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4258 if (status) { 4259 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping", 4260 ena ? "enable" : "disable"); 4261 err = -EIO; 4262 } else { 4263 vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags; 4264 } 4265 4266 return err; 4267 } 4268 4269 static int 4270 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi) 4271 { 4272 return ice_vsi_manage_vlan_stripping(vsi, true); 4273 } 4274 4275 static int 4276 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi) 4277 { 4278 return ice_vsi_manage_vlan_stripping(vsi, false); 4279 } 4280 4281 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi) 4282 { 4283 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4284 struct ice_vsi_ctx ctxt; 4285 enum ice_status status; 4286 int err = 0; 4287 4288 /* do not allow modifying VLAN stripping when a port VLAN is configured 4289 * on this VSI 4290 */ 4291 if (vsi->info.port_based_outer_vlan) 4292 return 0; 4293 4294 memset(&ctxt, 0, sizeof(ctxt)); 4295 4296 ctxt.info.valid_sections = 4297 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); 4298 /* clear current outer VLAN strip settings */ 4299 ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags & 4300 ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M); 4301 ctxt.info.outer_vlan_flags |= 4302 (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH << 4303 ICE_AQ_VSI_OUTER_VLAN_EMODE_S) | 4304 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 << 4305 ICE_AQ_VSI_OUTER_TAG_TYPE_S); 4306 4307 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4308 if (status) { 4309 PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping"); 4310 err = -EIO; 4311 } else { 4312 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags; 4313 } 4314 4315 return err; 4316 } 4317 4318 static int 4319 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi) 4320 { 4321 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4322 struct ice_vsi_ctx ctxt; 4323 enum ice_status status; 4324 int err = 0; 4325 4326 if (vsi->info.port_based_outer_vlan) 4327 return 0; 4328 4329 memset(&ctxt, 0, sizeof(ctxt)); 4330 4331 ctxt.info.valid_sections = 4332 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); 4333 /* clear current outer VLAN strip settings */ 4334 ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags & 4335 ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M; 4336 ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING << 4337 ICE_AQ_VSI_OUTER_VLAN_EMODE_S; 4338 4339 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4340 if (status) { 4341 PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping"); 4342 err = -EIO; 4343 } else { 4344 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags; 4345 } 4346 4347 return err; 4348 } 4349 4350 static int 4351 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena) 4352 { 4353 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4354 int ret; 4355 4356 if (ice_is_dvm_ena(hw)) { 4357 if (ena) 4358 ret = ice_vsi_ena_outer_stripping(vsi); 4359 else 4360 ret = ice_vsi_dis_outer_stripping(vsi); 4361 } else { 4362 if (ena) 4363 ret = ice_vsi_ena_inner_stripping(vsi); 4364 else 4365 ret = ice_vsi_dis_inner_stripping(vsi); 4366 } 4367 4368 return ret; 4369 } 4370 4371 static int 4372 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask) 4373 { 4374 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4375 struct ice_vsi *vsi = pf->main_vsi; 4376 struct rte_eth_rxmode *rxmode; 4377 4378 rxmode = &dev->data->dev_conf.rxmode; 4379 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 4380 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 4381 ice_vsi_config_vlan_filter(vsi, true); 4382 else 4383 ice_vsi_config_vlan_filter(vsi, false); 4384 } 4385 4386 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 4387 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 4388 ice_vsi_config_vlan_stripping(vsi, true); 4389 else 4390 ice_vsi_config_vlan_stripping(vsi, false); 4391 } 4392 4393 return 0; 4394 } 4395 4396 static int 4397 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size) 4398 { 4399 struct ice_aq_get_set_rss_lut_params lut_params; 4400 struct ice_pf *pf = ICE_VSI_TO_PF(vsi); 4401 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4402 int ret; 4403 4404 if (!lut) 4405 return -EINVAL; 4406 4407 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) { 4408 lut_params.vsi_handle = vsi->idx; 4409 lut_params.lut_size = lut_size; 4410 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 4411 lut_params.lut = lut; 4412 lut_params.global_lut_id = 0; 4413 ret = ice_aq_get_rss_lut(hw, &lut_params); 4414 if (ret) { 4415 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); 4416 return -EINVAL; 4417 } 4418 } else { 4419 uint64_t *lut_dw = (uint64_t *)lut; 4420 uint16_t i, lut_size_dw = lut_size / 4; 4421 4422 for (i = 0; i < lut_size_dw; i++) 4423 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i)); 4424 } 4425 4426 return 0; 4427 } 4428 4429 static int 4430 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size) 4431 { 4432 struct ice_aq_get_set_rss_lut_params lut_params; 4433 struct ice_pf *pf; 4434 struct ice_hw *hw; 4435 int ret; 4436 4437 if (!vsi || !lut) 4438 return -EINVAL; 4439 4440 pf = ICE_VSI_TO_PF(vsi); 4441 hw = ICE_VSI_TO_HW(vsi); 4442 4443 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) { 4444 lut_params.vsi_handle = vsi->idx; 4445 lut_params.lut_size = lut_size; 4446 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 4447 lut_params.lut = lut; 4448 lut_params.global_lut_id = 0; 4449 ret = ice_aq_set_rss_lut(hw, &lut_params); 4450 if (ret) { 4451 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); 4452 return -EINVAL; 4453 } 4454 } else { 4455 uint64_t *lut_dw = (uint64_t *)lut; 4456 uint16_t i, lut_size_dw = lut_size / 4; 4457 4458 for (i = 0; i < lut_size_dw; i++) 4459 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]); 4460 4461 ice_flush(hw); 4462 } 4463 4464 return 0; 4465 } 4466 4467 static int 4468 ice_rss_reta_update(struct rte_eth_dev *dev, 4469 struct rte_eth_rss_reta_entry64 *reta_conf, 4470 uint16_t reta_size) 4471 { 4472 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4473 uint16_t i, lut_size = pf->hash_lut_size; 4474 uint16_t idx, shift; 4475 uint8_t *lut; 4476 int ret; 4477 4478 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 && 4479 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 && 4480 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) { 4481 PMD_DRV_LOG(ERR, 4482 "The size of hash lookup table configured (%d)" 4483 "doesn't match the number hardware can " 4484 "supported (128, 512, 2048)", 4485 reta_size); 4486 return -EINVAL; 4487 } 4488 4489 /* It MUST use the current LUT size to get the RSS lookup table, 4490 * otherwise if will fail with -100 error code. 4491 */ 4492 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0); 4493 if (!lut) { 4494 PMD_DRV_LOG(ERR, "No memory can be allocated"); 4495 return -ENOMEM; 4496 } 4497 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size); 4498 if (ret) 4499 goto out; 4500 4501 for (i = 0; i < reta_size; i++) { 4502 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4503 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4504 if (reta_conf[idx].mask & (1ULL << shift)) 4505 lut[i] = reta_conf[idx].reta[shift]; 4506 } 4507 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size); 4508 if (ret == 0 && lut_size != reta_size) { 4509 PMD_DRV_LOG(INFO, 4510 "The size of hash lookup table is changed from (%d) to (%d)", 4511 lut_size, reta_size); 4512 pf->hash_lut_size = reta_size; 4513 } 4514 4515 out: 4516 rte_free(lut); 4517 4518 return ret; 4519 } 4520 4521 static int 4522 ice_rss_reta_query(struct rte_eth_dev *dev, 4523 struct rte_eth_rss_reta_entry64 *reta_conf, 4524 uint16_t reta_size) 4525 { 4526 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4527 uint16_t i, lut_size = pf->hash_lut_size; 4528 uint16_t idx, shift; 4529 uint8_t *lut; 4530 int ret; 4531 4532 if (reta_size != lut_size) { 4533 PMD_DRV_LOG(ERR, 4534 "The size of hash lookup table configured (%d)" 4535 "doesn't match the number hardware can " 4536 "supported (%d)", 4537 reta_size, lut_size); 4538 return -EINVAL; 4539 } 4540 4541 lut = rte_zmalloc(NULL, reta_size, 0); 4542 if (!lut) { 4543 PMD_DRV_LOG(ERR, "No memory can be allocated"); 4544 return -ENOMEM; 4545 } 4546 4547 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size); 4548 if (ret) 4549 goto out; 4550 4551 for (i = 0; i < reta_size; i++) { 4552 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4553 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4554 if (reta_conf[idx].mask & (1ULL << shift)) 4555 reta_conf[idx].reta[shift] = lut[i]; 4556 } 4557 4558 out: 4559 rte_free(lut); 4560 4561 return ret; 4562 } 4563 4564 static int 4565 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len) 4566 { 4567 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4568 int ret = 0; 4569 4570 if (!key || key_len == 0) { 4571 PMD_DRV_LOG(DEBUG, "No key to be configured"); 4572 return 0; 4573 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) * 4574 sizeof(uint32_t)) { 4575 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); 4576 return -EINVAL; 4577 } 4578 4579 struct ice_aqc_get_set_rss_keys *key_dw = 4580 (struct ice_aqc_get_set_rss_keys *)key; 4581 4582 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw); 4583 if (ret) { 4584 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ"); 4585 ret = -EINVAL; 4586 } 4587 4588 return ret; 4589 } 4590 4591 static int 4592 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len) 4593 { 4594 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4595 int ret; 4596 4597 if (!key || !key_len) 4598 return -EINVAL; 4599 4600 ret = ice_aq_get_rss_key 4601 (hw, vsi->idx, 4602 (struct ice_aqc_get_set_rss_keys *)key); 4603 if (ret) { 4604 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ"); 4605 return -EINVAL; 4606 } 4607 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); 4608 4609 return 0; 4610 } 4611 4612 static int 4613 ice_rss_hash_update(struct rte_eth_dev *dev, 4614 struct rte_eth_rss_conf *rss_conf) 4615 { 4616 enum ice_status status = ICE_SUCCESS; 4617 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4618 struct ice_vsi *vsi = pf->main_vsi; 4619 4620 /* set hash key */ 4621 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len); 4622 if (status) 4623 return status; 4624 4625 if (rss_conf->rss_hf == 0) { 4626 pf->rss_hf = 0; 4627 return 0; 4628 } 4629 4630 /* RSS hash configuration */ 4631 ice_rss_hash_set(pf, rss_conf->rss_hf); 4632 4633 return 0; 4634 } 4635 4636 static int 4637 ice_rss_hash_conf_get(struct rte_eth_dev *dev, 4638 struct rte_eth_rss_conf *rss_conf) 4639 { 4640 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4641 struct ice_vsi *vsi = pf->main_vsi; 4642 4643 ice_get_rss_key(vsi, rss_conf->rss_key, 4644 &rss_conf->rss_key_len); 4645 4646 rss_conf->rss_hf = pf->rss_hf; 4647 return 0; 4648 } 4649 4650 static int 4651 ice_promisc_enable(struct rte_eth_dev *dev) 4652 { 4653 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4654 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4655 struct ice_vsi *vsi = pf->main_vsi; 4656 enum ice_status status; 4657 uint8_t pmask; 4658 int ret = 0; 4659 4660 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX | 4661 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4662 4663 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0); 4664 switch (status) { 4665 case ICE_ERR_ALREADY_EXISTS: 4666 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled"); 4667 case ICE_SUCCESS: 4668 break; 4669 default: 4670 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status); 4671 ret = -EAGAIN; 4672 } 4673 4674 return ret; 4675 } 4676 4677 static int 4678 ice_promisc_disable(struct rte_eth_dev *dev) 4679 { 4680 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4681 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4682 struct ice_vsi *vsi = pf->main_vsi; 4683 enum ice_status status; 4684 uint8_t pmask; 4685 int ret = 0; 4686 4687 if (dev->data->all_multicast == 1) 4688 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX; 4689 else 4690 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX | 4691 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4692 4693 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0); 4694 if (status != ICE_SUCCESS) { 4695 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status); 4696 ret = -EAGAIN; 4697 } 4698 4699 return ret; 4700 } 4701 4702 static int 4703 ice_allmulti_enable(struct rte_eth_dev *dev) 4704 { 4705 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4706 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4707 struct ice_vsi *vsi = pf->main_vsi; 4708 enum ice_status status; 4709 uint8_t pmask; 4710 int ret = 0; 4711 4712 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4713 4714 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0); 4715 4716 switch (status) { 4717 case ICE_ERR_ALREADY_EXISTS: 4718 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled"); 4719 case ICE_SUCCESS: 4720 break; 4721 default: 4722 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status); 4723 ret = -EAGAIN; 4724 } 4725 4726 return ret; 4727 } 4728 4729 static int 4730 ice_allmulti_disable(struct rte_eth_dev *dev) 4731 { 4732 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4733 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4734 struct ice_vsi *vsi = pf->main_vsi; 4735 enum ice_status status; 4736 uint8_t pmask; 4737 int ret = 0; 4738 4739 if (dev->data->promiscuous == 1) 4740 return 0; /* must remain in all_multicast mode */ 4741 4742 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; 4743 4744 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0); 4745 if (status != ICE_SUCCESS) { 4746 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status); 4747 ret = -EAGAIN; 4748 } 4749 4750 return ret; 4751 } 4752 4753 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev, 4754 uint16_t queue_id) 4755 { 4756 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 4757 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 4758 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4759 uint32_t val; 4760 uint16_t msix_intr; 4761 4762 msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id); 4763 4764 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 4765 GLINT_DYN_CTL_ITR_INDX_M; 4766 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M; 4767 4768 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val); 4769 rte_intr_ack(pci_dev->intr_handle); 4770 4771 return 0; 4772 } 4773 4774 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev, 4775 uint16_t queue_id) 4776 { 4777 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); 4778 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 4779 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4780 uint16_t msix_intr; 4781 4782 msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id); 4783 4784 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M); 4785 4786 return 0; 4787 } 4788 4789 static int 4790 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 4791 { 4792 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4793 u8 ver, patch; 4794 u16 build; 4795 int ret; 4796 4797 ver = hw->flash.orom.major; 4798 patch = hw->flash.orom.patch; 4799 build = hw->flash.orom.build; 4800 4801 ret = snprintf(fw_version, fw_size, 4802 "%x.%02x 0x%08x %d.%d.%d", 4803 hw->flash.nvm.major, 4804 hw->flash.nvm.minor, 4805 hw->flash.nvm.eetrack, 4806 ver, build, patch); 4807 if (ret < 0) 4808 return -EINVAL; 4809 4810 /* add the size of '\0' */ 4811 ret += 1; 4812 if (fw_size < (size_t)ret) 4813 return ret; 4814 else 4815 return 0; 4816 } 4817 4818 static int 4819 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info) 4820 { 4821 struct ice_hw *hw; 4822 struct ice_vsi_ctx ctxt; 4823 uint8_t vlan_flags = 0; 4824 int ret; 4825 4826 if (!vsi || !info) { 4827 PMD_DRV_LOG(ERR, "invalid parameters"); 4828 return -EINVAL; 4829 } 4830 4831 if (info->on) { 4832 vsi->info.port_based_inner_vlan = info->config.pvid; 4833 /** 4834 * If insert pvid is enabled, only tagged pkts are 4835 * allowed to be sent out. 4836 */ 4837 vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID | 4838 ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED; 4839 } else { 4840 vsi->info.port_based_inner_vlan = 0; 4841 if (info->config.reject.tagged == 0) 4842 vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED; 4843 4844 if (info->config.reject.untagged == 0) 4845 vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED; 4846 } 4847 vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID | 4848 ICE_AQ_VSI_INNER_VLAN_EMODE_M); 4849 vsi->info.inner_vlan_flags |= vlan_flags; 4850 memset(&ctxt, 0, sizeof(ctxt)); 4851 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4852 ctxt.info.valid_sections = 4853 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); 4854 ctxt.vsi_num = vsi->vsi_id; 4855 4856 hw = ICE_VSI_TO_HW(vsi); 4857 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 4858 if (ret != ICE_SUCCESS) { 4859 PMD_DRV_LOG(ERR, 4860 "update VSI for VLAN insert failed, err %d", 4861 ret); 4862 return -EINVAL; 4863 } 4864 4865 vsi->info.valid_sections |= 4866 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); 4867 4868 return ret; 4869 } 4870 4871 static int 4872 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 4873 { 4874 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 4875 struct ice_vsi *vsi = pf->main_vsi; 4876 struct rte_eth_dev_data *data = pf->dev_data; 4877 struct ice_vsi_vlan_pvid_info info; 4878 int ret; 4879 4880 memset(&info, 0, sizeof(info)); 4881 info.on = on; 4882 if (info.on) { 4883 info.config.pvid = pvid; 4884 } else { 4885 info.config.reject.tagged = 4886 data->dev_conf.txmode.hw_vlan_reject_tagged; 4887 info.config.reject.untagged = 4888 data->dev_conf.txmode.hw_vlan_reject_untagged; 4889 } 4890 4891 ret = ice_vsi_vlan_pvid_set(vsi, &info); 4892 if (ret < 0) { 4893 PMD_DRV_LOG(ERR, "Failed to set pvid."); 4894 return -EINVAL; 4895 } 4896 4897 return 0; 4898 } 4899 4900 static int 4901 ice_get_eeprom_length(struct rte_eth_dev *dev) 4902 { 4903 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4904 4905 return hw->flash.flash_size; 4906 } 4907 4908 static int 4909 ice_get_eeprom(struct rte_eth_dev *dev, 4910 struct rte_dev_eeprom_info *eeprom) 4911 { 4912 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4913 enum ice_status status = ICE_SUCCESS; 4914 uint8_t *data = eeprom->data; 4915 4916 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 4917 4918 status = ice_acquire_nvm(hw, ICE_RES_READ); 4919 if (status) { 4920 PMD_DRV_LOG(ERR, "acquire nvm failed."); 4921 return -EIO; 4922 } 4923 4924 status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length, 4925 data, false); 4926 4927 ice_release_nvm(hw); 4928 4929 if (status) { 4930 PMD_DRV_LOG(ERR, "EEPROM read failed."); 4931 return -EIO; 4932 } 4933 4934 return 0; 4935 } 4936 4937 static void 4938 ice_stat_update_32(struct ice_hw *hw, 4939 uint32_t reg, 4940 bool offset_loaded, 4941 uint64_t *offset, 4942 uint64_t *stat) 4943 { 4944 uint64_t new_data; 4945 4946 new_data = (uint64_t)ICE_READ_REG(hw, reg); 4947 if (!offset_loaded) 4948 *offset = new_data; 4949 4950 if (new_data >= *offset) 4951 *stat = (uint64_t)(new_data - *offset); 4952 else 4953 *stat = (uint64_t)((new_data + 4954 ((uint64_t)1 << ICE_32_BIT_WIDTH)) 4955 - *offset); 4956 } 4957 4958 static void 4959 ice_stat_update_40(struct ice_hw *hw, 4960 uint32_t hireg, 4961 uint32_t loreg, 4962 bool offset_loaded, 4963 uint64_t *offset, 4964 uint64_t *stat) 4965 { 4966 uint64_t new_data; 4967 4968 new_data = (uint64_t)ICE_READ_REG(hw, loreg); 4969 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) << 4970 ICE_32_BIT_WIDTH; 4971 4972 if (!offset_loaded) 4973 *offset = new_data; 4974 4975 if (new_data >= *offset) 4976 *stat = new_data - *offset; 4977 else 4978 *stat = (uint64_t)((new_data + 4979 ((uint64_t)1 << ICE_40_BIT_WIDTH)) - 4980 *offset); 4981 4982 *stat &= ICE_40_BIT_MASK; 4983 } 4984 4985 /* Get all the statistics of a VSI */ 4986 static void 4987 ice_update_vsi_stats(struct ice_vsi *vsi) 4988 { 4989 struct ice_eth_stats *oes = &vsi->eth_stats_offset; 4990 struct ice_eth_stats *nes = &vsi->eth_stats; 4991 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 4992 int idx = rte_le_to_cpu_16(vsi->vsi_id); 4993 4994 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx), 4995 vsi->offset_loaded, &oes->rx_bytes, 4996 &nes->rx_bytes); 4997 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx), 4998 vsi->offset_loaded, &oes->rx_unicast, 4999 &nes->rx_unicast); 5000 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx), 5001 vsi->offset_loaded, &oes->rx_multicast, 5002 &nes->rx_multicast); 5003 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx), 5004 vsi->offset_loaded, &oes->rx_broadcast, 5005 &nes->rx_broadcast); 5006 /* enlarge the limitation when rx_bytes overflowed */ 5007 if (vsi->offset_loaded) { 5008 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes) 5009 nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 5010 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes); 5011 } 5012 vsi->old_rx_bytes = nes->rx_bytes; 5013 /* exclude CRC bytes */ 5014 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast + 5015 nes->rx_broadcast) * RTE_ETHER_CRC_LEN; 5016 5017 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded, 5018 &oes->rx_discards, &nes->rx_discards); 5019 /* GLV_REPC not supported */ 5020 /* GLV_RMPC not supported */ 5021 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded, 5022 &oes->rx_unknown_protocol, 5023 &nes->rx_unknown_protocol); 5024 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx), 5025 vsi->offset_loaded, &oes->tx_bytes, 5026 &nes->tx_bytes); 5027 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx), 5028 vsi->offset_loaded, &oes->tx_unicast, 5029 &nes->tx_unicast); 5030 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx), 5031 vsi->offset_loaded, &oes->tx_multicast, 5032 &nes->tx_multicast); 5033 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx), 5034 vsi->offset_loaded, &oes->tx_broadcast, 5035 &nes->tx_broadcast); 5036 /* GLV_TDPC not supported */ 5037 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded, 5038 &oes->tx_errors, &nes->tx_errors); 5039 /* enlarge the limitation when tx_bytes overflowed */ 5040 if (vsi->offset_loaded) { 5041 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes) 5042 nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 5043 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes); 5044 } 5045 vsi->old_tx_bytes = nes->tx_bytes; 5046 vsi->offset_loaded = true; 5047 5048 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************", 5049 vsi->vsi_id); 5050 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes); 5051 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast); 5052 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast); 5053 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast); 5054 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards); 5055 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", 5056 nes->rx_unknown_protocol); 5057 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes); 5058 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast); 5059 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast); 5060 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast); 5061 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards); 5062 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors); 5063 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************", 5064 vsi->vsi_id); 5065 } 5066 5067 static void 5068 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw) 5069 { 5070 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */ 5071 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */ 5072 5073 /* Get statistics of struct ice_eth_stats */ 5074 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport), 5075 GLPRT_GORCL(hw->port_info->lport), 5076 pf->offset_loaded, &os->eth.rx_bytes, 5077 &ns->eth.rx_bytes); 5078 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport), 5079 GLPRT_UPRCL(hw->port_info->lport), 5080 pf->offset_loaded, &os->eth.rx_unicast, 5081 &ns->eth.rx_unicast); 5082 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport), 5083 GLPRT_MPRCL(hw->port_info->lport), 5084 pf->offset_loaded, &os->eth.rx_multicast, 5085 &ns->eth.rx_multicast); 5086 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport), 5087 GLPRT_BPRCL(hw->port_info->lport), 5088 pf->offset_loaded, &os->eth.rx_broadcast, 5089 &ns->eth.rx_broadcast); 5090 ice_stat_update_32(hw, PRTRPB_RDPC, 5091 pf->offset_loaded, &os->eth.rx_discards, 5092 &ns->eth.rx_discards); 5093 /* enlarge the limitation when rx_bytes overflowed */ 5094 if (pf->offset_loaded) { 5095 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes) 5096 ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 5097 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes); 5098 } 5099 pf->old_rx_bytes = ns->eth.rx_bytes; 5100 5101 /* Workaround: CRC size should not be included in byte statistics, 5102 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx 5103 * packet. 5104 */ 5105 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + 5106 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN; 5107 5108 /* GLPRT_REPC not supported */ 5109 /* GLPRT_RMPC not supported */ 5110 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport), 5111 pf->offset_loaded, 5112 &os->eth.rx_unknown_protocol, 5113 &ns->eth.rx_unknown_protocol); 5114 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport), 5115 GLPRT_GOTCL(hw->port_info->lport), 5116 pf->offset_loaded, &os->eth.tx_bytes, 5117 &ns->eth.tx_bytes); 5118 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport), 5119 GLPRT_UPTCL(hw->port_info->lport), 5120 pf->offset_loaded, &os->eth.tx_unicast, 5121 &ns->eth.tx_unicast); 5122 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport), 5123 GLPRT_MPTCL(hw->port_info->lport), 5124 pf->offset_loaded, &os->eth.tx_multicast, 5125 &ns->eth.tx_multicast); 5126 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport), 5127 GLPRT_BPTCL(hw->port_info->lport), 5128 pf->offset_loaded, &os->eth.tx_broadcast, 5129 &ns->eth.tx_broadcast); 5130 /* enlarge the limitation when tx_bytes overflowed */ 5131 if (pf->offset_loaded) { 5132 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes) 5133 ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH; 5134 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes); 5135 } 5136 pf->old_tx_bytes = ns->eth.tx_bytes; 5137 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + 5138 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN; 5139 5140 /* GLPRT_TEPC not supported */ 5141 5142 /* additional port specific stats */ 5143 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport), 5144 pf->offset_loaded, &os->tx_dropped_link_down, 5145 &ns->tx_dropped_link_down); 5146 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport), 5147 pf->offset_loaded, &os->crc_errors, 5148 &ns->crc_errors); 5149 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport), 5150 pf->offset_loaded, &os->illegal_bytes, 5151 &ns->illegal_bytes); 5152 /* GLPRT_ERRBC not supported */ 5153 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport), 5154 pf->offset_loaded, &os->mac_local_faults, 5155 &ns->mac_local_faults); 5156 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport), 5157 pf->offset_loaded, &os->mac_remote_faults, 5158 &ns->mac_remote_faults); 5159 5160 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport), 5161 pf->offset_loaded, &os->rx_len_errors, 5162 &ns->rx_len_errors); 5163 5164 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport), 5165 pf->offset_loaded, &os->link_xon_rx, 5166 &ns->link_xon_rx); 5167 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport), 5168 pf->offset_loaded, &os->link_xoff_rx, 5169 &ns->link_xoff_rx); 5170 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport), 5171 pf->offset_loaded, &os->link_xon_tx, 5172 &ns->link_xon_tx); 5173 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport), 5174 pf->offset_loaded, &os->link_xoff_tx, 5175 &ns->link_xoff_tx); 5176 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport), 5177 GLPRT_PRC64L(hw->port_info->lport), 5178 pf->offset_loaded, &os->rx_size_64, 5179 &ns->rx_size_64); 5180 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport), 5181 GLPRT_PRC127L(hw->port_info->lport), 5182 pf->offset_loaded, &os->rx_size_127, 5183 &ns->rx_size_127); 5184 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport), 5185 GLPRT_PRC255L(hw->port_info->lport), 5186 pf->offset_loaded, &os->rx_size_255, 5187 &ns->rx_size_255); 5188 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport), 5189 GLPRT_PRC511L(hw->port_info->lport), 5190 pf->offset_loaded, &os->rx_size_511, 5191 &ns->rx_size_511); 5192 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport), 5193 GLPRT_PRC1023L(hw->port_info->lport), 5194 pf->offset_loaded, &os->rx_size_1023, 5195 &ns->rx_size_1023); 5196 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport), 5197 GLPRT_PRC1522L(hw->port_info->lport), 5198 pf->offset_loaded, &os->rx_size_1522, 5199 &ns->rx_size_1522); 5200 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport), 5201 GLPRT_PRC9522L(hw->port_info->lport), 5202 pf->offset_loaded, &os->rx_size_big, 5203 &ns->rx_size_big); 5204 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport), 5205 pf->offset_loaded, &os->rx_undersize, 5206 &ns->rx_undersize); 5207 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport), 5208 pf->offset_loaded, &os->rx_fragments, 5209 &ns->rx_fragments); 5210 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport), 5211 pf->offset_loaded, &os->rx_oversize, 5212 &ns->rx_oversize); 5213 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport), 5214 pf->offset_loaded, &os->rx_jabber, 5215 &ns->rx_jabber); 5216 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport), 5217 GLPRT_PTC64L(hw->port_info->lport), 5218 pf->offset_loaded, &os->tx_size_64, 5219 &ns->tx_size_64); 5220 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport), 5221 GLPRT_PTC127L(hw->port_info->lport), 5222 pf->offset_loaded, &os->tx_size_127, 5223 &ns->tx_size_127); 5224 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport), 5225 GLPRT_PTC255L(hw->port_info->lport), 5226 pf->offset_loaded, &os->tx_size_255, 5227 &ns->tx_size_255); 5228 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport), 5229 GLPRT_PTC511L(hw->port_info->lport), 5230 pf->offset_loaded, &os->tx_size_511, 5231 &ns->tx_size_511); 5232 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport), 5233 GLPRT_PTC1023L(hw->port_info->lport), 5234 pf->offset_loaded, &os->tx_size_1023, 5235 &ns->tx_size_1023); 5236 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport), 5237 GLPRT_PTC1522L(hw->port_info->lport), 5238 pf->offset_loaded, &os->tx_size_1522, 5239 &ns->tx_size_1522); 5240 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport), 5241 GLPRT_PTC9522L(hw->port_info->lport), 5242 pf->offset_loaded, &os->tx_size_big, 5243 &ns->tx_size_big); 5244 5245 /* GLPRT_MSPDC not supported */ 5246 /* GLPRT_XEC not supported */ 5247 5248 pf->offset_loaded = true; 5249 5250 if (pf->main_vsi) 5251 ice_update_vsi_stats(pf->main_vsi); 5252 } 5253 5254 /* Get all statistics of a port */ 5255 static int 5256 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 5257 { 5258 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5259 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5260 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */ 5261 5262 /* call read registers - updates values, now write them to struct */ 5263 ice_read_stats_registers(pf, hw); 5264 5265 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + 5266 pf->main_vsi->eth_stats.rx_multicast + 5267 pf->main_vsi->eth_stats.rx_broadcast - 5268 pf->main_vsi->eth_stats.rx_discards; 5269 stats->opackets = ns->eth.tx_unicast + 5270 ns->eth.tx_multicast + 5271 ns->eth.tx_broadcast; 5272 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes; 5273 stats->obytes = ns->eth.tx_bytes; 5274 stats->oerrors = ns->eth.tx_errors + 5275 pf->main_vsi->eth_stats.tx_errors; 5276 5277 /* Rx Errors */ 5278 stats->imissed = ns->eth.rx_discards + 5279 pf->main_vsi->eth_stats.rx_discards; 5280 stats->ierrors = ns->crc_errors + 5281 ns->rx_undersize + 5282 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; 5283 5284 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************"); 5285 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes); 5286 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast); 5287 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast); 5288 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast); 5289 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards); 5290 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"", 5291 pf->main_vsi->eth_stats.rx_discards); 5292 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", 5293 ns->eth.rx_unknown_protocol); 5294 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes); 5295 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast); 5296 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast); 5297 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast); 5298 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards); 5299 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"", 5300 pf->main_vsi->eth_stats.tx_discards); 5301 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors); 5302 5303 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"", 5304 ns->tx_dropped_link_down); 5305 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors); 5306 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"", 5307 ns->illegal_bytes); 5308 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes); 5309 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"", 5310 ns->mac_local_faults); 5311 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"", 5312 ns->mac_remote_faults); 5313 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx); 5314 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx); 5315 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx); 5316 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx); 5317 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64); 5318 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127); 5319 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255); 5320 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511); 5321 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023); 5322 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522); 5323 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big); 5324 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize); 5325 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments); 5326 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize); 5327 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber); 5328 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64); 5329 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127); 5330 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255); 5331 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511); 5332 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023); 5333 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522); 5334 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big); 5335 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors); 5336 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************"); 5337 return 0; 5338 } 5339 5340 /* Reset the statistics */ 5341 static int 5342 ice_stats_reset(struct rte_eth_dev *dev) 5343 { 5344 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5345 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5346 5347 /* Mark PF and VSI stats to update the offset, aka "reset" */ 5348 pf->offset_loaded = false; 5349 if (pf->main_vsi) 5350 pf->main_vsi->offset_loaded = false; 5351 5352 /* read the stats, reading current register values into offset */ 5353 ice_read_stats_registers(pf, hw); 5354 5355 return 0; 5356 } 5357 5358 static uint32_t 5359 ice_xstats_calc_num(void) 5360 { 5361 uint32_t num; 5362 5363 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS; 5364 5365 return num; 5366 } 5367 5368 static int 5369 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 5370 unsigned int n) 5371 { 5372 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5373 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5374 unsigned int i; 5375 unsigned int count; 5376 struct ice_hw_port_stats *hw_stats = &pf->stats; 5377 5378 count = ice_xstats_calc_num(); 5379 if (n < count) 5380 return count; 5381 5382 ice_read_stats_registers(pf, hw); 5383 5384 if (!xstats) 5385 return 0; 5386 5387 count = 0; 5388 5389 /* Get stats from ice_eth_stats struct */ 5390 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) { 5391 xstats[count].value = 5392 *(uint64_t *)((char *)&hw_stats->eth + 5393 ice_stats_strings[i].offset); 5394 xstats[count].id = count; 5395 count++; 5396 } 5397 5398 /* Get individiual stats from ice_hw_port struct */ 5399 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { 5400 xstats[count].value = 5401 *(uint64_t *)((char *)hw_stats + 5402 ice_hw_port_strings[i].offset); 5403 xstats[count].id = count; 5404 count++; 5405 } 5406 5407 return count; 5408 } 5409 5410 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 5411 struct rte_eth_xstat_name *xstats_names, 5412 __rte_unused unsigned int limit) 5413 { 5414 unsigned int count = 0; 5415 unsigned int i; 5416 5417 if (!xstats_names) 5418 return ice_xstats_calc_num(); 5419 5420 /* Note: limit checked in rte_eth_xstats_names() */ 5421 5422 /* Get stats from ice_eth_stats struct */ 5423 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) { 5424 strlcpy(xstats_names[count].name, ice_stats_strings[i].name, 5425 sizeof(xstats_names[count].name)); 5426 count++; 5427 } 5428 5429 /* Get individiual stats from ice_hw_port struct */ 5430 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { 5431 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name, 5432 sizeof(xstats_names[count].name)); 5433 count++; 5434 } 5435 5436 return count; 5437 } 5438 5439 static int 5440 ice_dev_flow_ops_get(struct rte_eth_dev *dev, 5441 const struct rte_flow_ops **ops) 5442 { 5443 if (!dev) 5444 return -EINVAL; 5445 5446 *ops = &ice_flow_ops; 5447 return 0; 5448 } 5449 5450 /* Add UDP tunneling port */ 5451 static int 5452 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 5453 struct rte_eth_udp_tunnel *udp_tunnel) 5454 { 5455 int ret = 0; 5456 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5457 5458 if (udp_tunnel == NULL) 5459 return -EINVAL; 5460 5461 switch (udp_tunnel->prot_type) { 5462 case RTE_ETH_TUNNEL_TYPE_VXLAN: 5463 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port); 5464 break; 5465 default: 5466 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 5467 ret = -EINVAL; 5468 break; 5469 } 5470 5471 return ret; 5472 } 5473 5474 /* Delete UDP tunneling port */ 5475 static int 5476 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 5477 struct rte_eth_udp_tunnel *udp_tunnel) 5478 { 5479 int ret = 0; 5480 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5481 5482 if (udp_tunnel == NULL) 5483 return -EINVAL; 5484 5485 switch (udp_tunnel->prot_type) { 5486 case RTE_ETH_TUNNEL_TYPE_VXLAN: 5487 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0); 5488 break; 5489 default: 5490 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 5491 ret = -EINVAL; 5492 break; 5493 } 5494 5495 return ret; 5496 } 5497 5498 static int 5499 ice_timesync_enable(struct rte_eth_dev *dev) 5500 { 5501 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5502 struct ice_adapter *ad = 5503 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 5504 int ret; 5505 5506 if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads & 5507 RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 5508 PMD_DRV_LOG(ERR, "Rx timestamp offload not configured"); 5509 return -1; 5510 } 5511 5512 if (hw->func_caps.ts_func_info.src_tmr_owned) { 5513 ret = ice_ptp_init_phc(hw); 5514 if (ret) { 5515 PMD_DRV_LOG(ERR, "Failed to initialize PHC"); 5516 return -1; 5517 } 5518 5519 ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810); 5520 if (ret) { 5521 PMD_DRV_LOG(ERR, 5522 "Failed to write PHC increment time value"); 5523 return -1; 5524 } 5525 } 5526 5527 /* Initialize cycle counters for system time/RX/TX timestamp */ 5528 memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter)); 5529 memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 5530 memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 5531 5532 ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK; 5533 ad->systime_tc.cc_shift = 0; 5534 ad->systime_tc.nsec_mask = 0; 5535 5536 ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK; 5537 ad->rx_tstamp_tc.cc_shift = 0; 5538 ad->rx_tstamp_tc.nsec_mask = 0; 5539 5540 ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK; 5541 ad->tx_tstamp_tc.cc_shift = 0; 5542 ad->tx_tstamp_tc.nsec_mask = 0; 5543 5544 ad->ptp_ena = 1; 5545 5546 return 0; 5547 } 5548 5549 static int 5550 ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 5551 struct timespec *timestamp, uint32_t flags) 5552 { 5553 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5554 struct ice_adapter *ad = 5555 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 5556 struct ice_rx_queue *rxq; 5557 uint32_t ts_high; 5558 uint64_t ts_ns, ns; 5559 5560 rxq = dev->data->rx_queues[flags]; 5561 5562 ts_high = rxq->time_high; 5563 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, ts_high); 5564 ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns); 5565 *timestamp = rte_ns_to_timespec(ns); 5566 5567 return 0; 5568 } 5569 5570 static int 5571 ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 5572 struct timespec *timestamp) 5573 { 5574 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5575 struct ice_adapter *ad = 5576 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 5577 uint8_t lport; 5578 uint64_t ts_ns, ns, tstamp; 5579 const uint64_t mask = 0xFFFFFFFF; 5580 int ret; 5581 5582 lport = hw->port_info->lport; 5583 5584 ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp); 5585 if (ret) { 5586 PMD_DRV_LOG(ERR, "Failed to read phy timestamp"); 5587 return -1; 5588 } 5589 5590 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, (tstamp >> 8) & mask); 5591 ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns); 5592 *timestamp = rte_ns_to_timespec(ns); 5593 5594 return 0; 5595 } 5596 5597 static int 5598 ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 5599 { 5600 struct ice_adapter *ad = 5601 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 5602 5603 ad->systime_tc.nsec += delta; 5604 ad->rx_tstamp_tc.nsec += delta; 5605 ad->tx_tstamp_tc.nsec += delta; 5606 5607 return 0; 5608 } 5609 5610 static int 5611 ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 5612 { 5613 struct ice_adapter *ad = 5614 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 5615 uint64_t ns; 5616 5617 ns = rte_timespec_to_ns(ts); 5618 5619 ad->systime_tc.nsec = ns; 5620 ad->rx_tstamp_tc.nsec = ns; 5621 ad->tx_tstamp_tc.nsec = ns; 5622 5623 return 0; 5624 } 5625 5626 static int 5627 ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 5628 { 5629 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5630 struct ice_adapter *ad = 5631 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 5632 uint32_t hi, lo, lo2; 5633 uint64_t time, ns; 5634 5635 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); 5636 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); 5637 lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); 5638 5639 if (lo2 < lo) { 5640 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); 5641 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); 5642 } 5643 5644 time = ((uint64_t)hi << 32) | lo; 5645 ns = rte_timecounter_update(&ad->systime_tc, time); 5646 *ts = rte_ns_to_timespec(ns); 5647 5648 return 0; 5649 } 5650 5651 static int 5652 ice_timesync_disable(struct rte_eth_dev *dev) 5653 { 5654 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5655 struct ice_adapter *ad = 5656 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 5657 uint64_t val; 5658 uint8_t lport; 5659 5660 lport = hw->port_info->lport; 5661 5662 ice_clear_phy_tstamp(hw, lport, 0); 5663 5664 val = ICE_READ_REG(hw, GLTSYN_ENA(0)); 5665 val &= ~GLTSYN_ENA_TSYN_ENA_M; 5666 ICE_WRITE_REG(hw, GLTSYN_ENA(0), val); 5667 5668 ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0); 5669 ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0); 5670 5671 ad->ptp_ena = 0; 5672 5673 return 0; 5674 } 5675 5676 static int 5677 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 5678 struct rte_pci_device *pci_dev) 5679 { 5680 return rte_eth_dev_pci_generic_probe(pci_dev, 5681 sizeof(struct ice_adapter), 5682 ice_dev_init); 5683 } 5684 5685 static int 5686 ice_pci_remove(struct rte_pci_device *pci_dev) 5687 { 5688 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit); 5689 } 5690 5691 static struct rte_pci_driver rte_ice_pmd = { 5692 .id_table = pci_id_ice_map, 5693 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 5694 .probe = ice_pci_probe, 5695 .remove = ice_pci_remove, 5696 }; 5697 5698 /** 5699 * Driver initialization routine. 5700 * Invoked once at EAL init time. 5701 * Register itself as the [Poll Mode] Driver of PCI devices. 5702 */ 5703 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd); 5704 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map); 5705 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci"); 5706 RTE_PMD_REGISTER_PARAM_STRING(net_ice, 5707 ICE_HW_DEBUG_MASK_ARG "=0xXXX" 5708 ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>" 5709 ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>" 5710 ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>" 5711 ICE_RX_LOW_LATENCY_ARG "=<0|1>"); 5712 5713 RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE); 5714 RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE); 5715 #ifdef RTE_ETHDEV_DEBUG_RX 5716 RTE_LOG_REGISTER_SUFFIX(ice_logtype_rx, rx, DEBUG); 5717 #endif 5718 #ifdef RTE_ETHDEV_DEBUG_TX 5719 RTE_LOG_REGISTER_SUFFIX(ice_logtype_tx, tx, DEBUG); 5720 #endif 5721