166fde1b9SAlvin Zhang /* SPDX-License-Identifier: BSD-3-Clause
266fde1b9SAlvin Zhang * Copyright(c) 2019-2020 Intel Corporation
366fde1b9SAlvin Zhang */
466fde1b9SAlvin Zhang
566fde1b9SAlvin Zhang #include <stdint.h>
64f09bc55SAlvin Zhang #include <string.h>
766fde1b9SAlvin Zhang
8e6defdfdSAlvin Zhang #include <rte_string_fns.h>
966fde1b9SAlvin Zhang #include <rte_pci.h>
1066fde1b9SAlvin Zhang #include <rte_bus_pci.h>
11df96fd0dSBruce Richardson #include <ethdev_driver.h>
12df96fd0dSBruce Richardson #include <ethdev_pci.h>
1366fde1b9SAlvin Zhang #include <rte_malloc.h>
14e6defdfdSAlvin Zhang #include <rte_alarm.h>
1566fde1b9SAlvin Zhang
1666fde1b9SAlvin Zhang #include "igc_logs.h"
17a5aeb2b9SAlvin Zhang #include "igc_txrx.h"
18746664d5SAlvin Zhang #include "igc_filter.h"
19746664d5SAlvin Zhang #include "igc_flow.h"
2066fde1b9SAlvin Zhang
2166fde1b9SAlvin Zhang #define IGC_INTEL_VENDOR_ID 0x8086
228cb7c57dSAlvin Zhang
238cb7c57dSAlvin Zhang #define IGC_FC_PAUSE_TIME 0x0680
244f09bc55SAlvin Zhang #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
254f09bc55SAlvin Zhang #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
264f09bc55SAlvin Zhang
274f09bc55SAlvin Zhang #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
284f09bc55SAlvin Zhang #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
294f09bc55SAlvin Zhang #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */
304f09bc55SAlvin Zhang #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */
314f09bc55SAlvin Zhang
324f09bc55SAlvin Zhang #define IGC_DEFAULT_RX_FREE_THRESH 32
334f09bc55SAlvin Zhang
344f09bc55SAlvin Zhang #define IGC_DEFAULT_RX_PTHRESH 8
354f09bc55SAlvin Zhang #define IGC_DEFAULT_RX_HTHRESH 8
364f09bc55SAlvin Zhang #define IGC_DEFAULT_RX_WTHRESH 4
374f09bc55SAlvin Zhang
384f09bc55SAlvin Zhang #define IGC_DEFAULT_TX_PTHRESH 8
394f09bc55SAlvin Zhang #define IGC_DEFAULT_TX_HTHRESH 1
404f09bc55SAlvin Zhang #define IGC_DEFAULT_TX_WTHRESH 16
414f09bc55SAlvin Zhang
424f09bc55SAlvin Zhang /* MSI-X other interrupt vector */
434f09bc55SAlvin Zhang #define IGC_MSIX_OTHER_INTR_VEC 0
4466fde1b9SAlvin Zhang
45a5aeb2b9SAlvin Zhang /* External VLAN Enable bit mask */
46a5aeb2b9SAlvin Zhang #define IGC_CTRL_EXT_EXT_VLAN (1u << 26)
47a5aeb2b9SAlvin Zhang
488938c4e1SAlvin Zhang /* Speed select */
498938c4e1SAlvin Zhang #define IGC_CTRL_SPEED_MASK (7u << 8)
508938c4e1SAlvin Zhang #define IGC_CTRL_SPEED_2500 (6u << 8)
518938c4e1SAlvin Zhang
525f266d0dSAlvin Zhang /* External VLAN Ether Type bit mask and shift */
535f266d0dSAlvin Zhang #define IGC_VET_EXT 0xFFFF0000
545f266d0dSAlvin Zhang #define IGC_VET_EXT_SHIFT 16
555f266d0dSAlvin Zhang
568938c4e1SAlvin Zhang /* Force EEE Auto-negotiation */
578938c4e1SAlvin Zhang #define IGC_EEER_EEE_FRC_AN (1u << 28)
588938c4e1SAlvin Zhang
59e6defdfdSAlvin Zhang /* Per Queue Good Packets Received Count */
60e6defdfdSAlvin Zhang #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx))
61e6defdfdSAlvin Zhang /* Per Queue Good Octets Received Count */
62e6defdfdSAlvin Zhang #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx))
63e6defdfdSAlvin Zhang /* Per Queue Good Octets Transmitted Count */
64e6defdfdSAlvin Zhang #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx))
65e6defdfdSAlvin Zhang /* Per Queue Multicast Packets Received Count */
66e6defdfdSAlvin Zhang #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx))
67e6defdfdSAlvin Zhang /* Transmit Queue Drop Packet Count */
68e6defdfdSAlvin Zhang #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx))
69e6defdfdSAlvin Zhang
70e6defdfdSAlvin Zhang #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
71e6defdfdSAlvin Zhang #define U32_0_IN_U64 0 /* lower bytes of u64 */
72e6defdfdSAlvin Zhang #define U32_1_IN_U64 1 /* higher bytes of u64 */
73e6defdfdSAlvin Zhang #else
74e6defdfdSAlvin Zhang #define U32_0_IN_U64 1
75e6defdfdSAlvin Zhang #define U32_1_IN_U64 0
76e6defdfdSAlvin Zhang #endif
77e6defdfdSAlvin Zhang
78e6defdfdSAlvin Zhang #define IGC_ALARM_INTERVAL 8000000u
79e6defdfdSAlvin Zhang /* us, about 13.6s some per-queue registers will wrap around back to 0. */
80e6defdfdSAlvin Zhang
81a5aeb2b9SAlvin Zhang static const struct rte_eth_desc_lim rx_desc_lim = {
82a5aeb2b9SAlvin Zhang .nb_max = IGC_MAX_RXD,
83a5aeb2b9SAlvin Zhang .nb_min = IGC_MIN_RXD,
84a5aeb2b9SAlvin Zhang .nb_align = IGC_RXD_ALIGN,
85a5aeb2b9SAlvin Zhang };
86a5aeb2b9SAlvin Zhang
87a5aeb2b9SAlvin Zhang static const struct rte_eth_desc_lim tx_desc_lim = {
88a5aeb2b9SAlvin Zhang .nb_max = IGC_MAX_TXD,
89a5aeb2b9SAlvin Zhang .nb_min = IGC_MIN_TXD,
90a5aeb2b9SAlvin Zhang .nb_align = IGC_TXD_ALIGN,
91a5aeb2b9SAlvin Zhang .nb_seg_max = IGC_TX_MAX_SEG,
92a5aeb2b9SAlvin Zhang .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,
93a5aeb2b9SAlvin Zhang };
94a5aeb2b9SAlvin Zhang
9566fde1b9SAlvin Zhang static const struct rte_pci_id pci_id_igc_map[] = {
9666fde1b9SAlvin Zhang { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
9766fde1b9SAlvin Zhang { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) },
9866fde1b9SAlvin Zhang { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) },
9966fde1b9SAlvin Zhang { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) },
10066fde1b9SAlvin Zhang { .vendor_id = 0, /* sentinel */ },
10166fde1b9SAlvin Zhang };
10266fde1b9SAlvin Zhang
103e6defdfdSAlvin Zhang /* store statistics names and its offset in stats structure */
104e6defdfdSAlvin Zhang struct rte_igc_xstats_name_off {
105e6defdfdSAlvin Zhang char name[RTE_ETH_XSTATS_NAME_SIZE];
106e6defdfdSAlvin Zhang unsigned int offset;
107e6defdfdSAlvin Zhang };
108e6defdfdSAlvin Zhang
109e6defdfdSAlvin Zhang static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {
110e6defdfdSAlvin Zhang {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)},
111e6defdfdSAlvin Zhang {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)},
112e6defdfdSAlvin Zhang {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)},
113e6defdfdSAlvin Zhang {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)},
114e6defdfdSAlvin Zhang {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)},
115e6defdfdSAlvin Zhang {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)},
116e6defdfdSAlvin Zhang {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats,
117e6defdfdSAlvin Zhang ecol)},
118e6defdfdSAlvin Zhang {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)},
119e6defdfdSAlvin Zhang {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)},
120e6defdfdSAlvin Zhang {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)},
121e6defdfdSAlvin Zhang {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)},
122e6defdfdSAlvin Zhang {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)},
123e6defdfdSAlvin Zhang {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)},
124e6defdfdSAlvin Zhang {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)},
125e6defdfdSAlvin Zhang {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)},
126e6defdfdSAlvin Zhang {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)},
127e6defdfdSAlvin Zhang {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)},
128e6defdfdSAlvin Zhang {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats,
129e6defdfdSAlvin Zhang fcruc)},
130e6defdfdSAlvin Zhang {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)},
131e6defdfdSAlvin Zhang {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)},
132e6defdfdSAlvin Zhang {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)},
133e6defdfdSAlvin Zhang {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)},
134e6defdfdSAlvin Zhang {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
135e6defdfdSAlvin Zhang prc1023)},
136e6defdfdSAlvin Zhang {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats,
137e6defdfdSAlvin Zhang prc1522)},
138e6defdfdSAlvin Zhang {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)},
139e6defdfdSAlvin Zhang {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)},
140e6defdfdSAlvin Zhang {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)},
141e6defdfdSAlvin Zhang {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)},
142e6defdfdSAlvin Zhang {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)},
143e6defdfdSAlvin Zhang {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)},
144e6defdfdSAlvin Zhang {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)},
145e6defdfdSAlvin Zhang {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)},
146e6defdfdSAlvin Zhang {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)},
147e6defdfdSAlvin Zhang {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)},
148e6defdfdSAlvin Zhang {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)},
149e6defdfdSAlvin Zhang {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)},
150e6defdfdSAlvin Zhang {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)},
151e6defdfdSAlvin Zhang {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)},
152e6defdfdSAlvin Zhang {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)},
153e6defdfdSAlvin Zhang {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)},
154e6defdfdSAlvin Zhang {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)},
155e6defdfdSAlvin Zhang {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)},
156e6defdfdSAlvin Zhang {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
157e6defdfdSAlvin Zhang ptc1023)},
158e6defdfdSAlvin Zhang {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats,
159e6defdfdSAlvin Zhang ptc1522)},
160e6defdfdSAlvin Zhang {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)},
161e6defdfdSAlvin Zhang {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)},
162e6defdfdSAlvin Zhang {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)},
163e6defdfdSAlvin Zhang {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)},
164e6defdfdSAlvin Zhang {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)},
165e6defdfdSAlvin Zhang {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)},
166e6defdfdSAlvin Zhang {"rx_descriptor_lower_threshold",
167e6defdfdSAlvin Zhang offsetof(struct igc_hw_stats, icrxdmtc)},
168e6defdfdSAlvin Zhang };
169e6defdfdSAlvin Zhang
170e6defdfdSAlvin Zhang #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \
171e6defdfdSAlvin Zhang sizeof(rte_igc_stats_strings[0]))
172e6defdfdSAlvin Zhang
17366fde1b9SAlvin Zhang static int eth_igc_configure(struct rte_eth_dev *dev);
17466fde1b9SAlvin Zhang static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
17562024eb8SIvan Ilchenko static int eth_igc_stop(struct rte_eth_dev *dev);
17666fde1b9SAlvin Zhang static int eth_igc_start(struct rte_eth_dev *dev);
1774f09bc55SAlvin Zhang static int eth_igc_set_link_up(struct rte_eth_dev *dev);
1784f09bc55SAlvin Zhang static int eth_igc_set_link_down(struct rte_eth_dev *dev);
179b142387bSThomas Monjalon static int eth_igc_close(struct rte_eth_dev *dev);
18066fde1b9SAlvin Zhang static int eth_igc_reset(struct rte_eth_dev *dev);
18166fde1b9SAlvin Zhang static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
18266fde1b9SAlvin Zhang static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
1834f09bc55SAlvin Zhang static int eth_igc_fw_version_get(struct rte_eth_dev *dev,
1844f09bc55SAlvin Zhang char *fw_version, size_t fw_size);
18566fde1b9SAlvin Zhang static int eth_igc_infos_get(struct rte_eth_dev *dev,
18666fde1b9SAlvin Zhang struct rte_eth_dev_info *dev_info);
1874f09bc55SAlvin Zhang static int eth_igc_led_on(struct rte_eth_dev *dev);
1884f09bc55SAlvin Zhang static int eth_igc_led_off(struct rte_eth_dev *dev);
189a5aeb2b9SAlvin Zhang static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);
190a5aeb2b9SAlvin Zhang static int eth_igc_rar_set(struct rte_eth_dev *dev,
191a5aeb2b9SAlvin Zhang struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);
192a5aeb2b9SAlvin Zhang static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);
193a5aeb2b9SAlvin Zhang static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
194a5aeb2b9SAlvin Zhang struct rte_ether_addr *addr);
195a5aeb2b9SAlvin Zhang static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
196a5aeb2b9SAlvin Zhang struct rte_ether_addr *mc_addr_set,
197a5aeb2b9SAlvin Zhang uint32_t nb_mc_addr);
198a5aeb2b9SAlvin Zhang static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);
199a5aeb2b9SAlvin Zhang static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);
200a5aeb2b9SAlvin Zhang static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
201e6defdfdSAlvin Zhang static int eth_igc_stats_get(struct rte_eth_dev *dev,
202e6defdfdSAlvin Zhang struct rte_eth_stats *rte_stats);
203e6defdfdSAlvin Zhang static int eth_igc_xstats_get(struct rte_eth_dev *dev,
204e6defdfdSAlvin Zhang struct rte_eth_xstat *xstats, unsigned int n);
205e6defdfdSAlvin Zhang static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev,
206e6defdfdSAlvin Zhang const uint64_t *ids,
207e6defdfdSAlvin Zhang uint64_t *values, unsigned int n);
208e6defdfdSAlvin Zhang static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,
209e6defdfdSAlvin Zhang struct rte_eth_xstat_name *xstats_names,
210e6defdfdSAlvin Zhang unsigned int size);
211e6defdfdSAlvin Zhang static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
2128c9f976fSAndrew Rybchenko const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
213e6defdfdSAlvin Zhang unsigned int limit);
214e6defdfdSAlvin Zhang static int eth_igc_xstats_reset(struct rte_eth_dev *dev);
215e6defdfdSAlvin Zhang static int
216e6defdfdSAlvin Zhang eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
217e6defdfdSAlvin Zhang uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx);
2189417098fSAlvin Zhang static int
2199417098fSAlvin Zhang eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
2209417098fSAlvin Zhang static int
2219417098fSAlvin Zhang eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
2220d415cd8SAlvin Zhang static int
2230d415cd8SAlvin Zhang eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
2240d415cd8SAlvin Zhang static int
2250d415cd8SAlvin Zhang eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
226bd3fcf0dSAlvin Zhang static int eth_igc_rss_reta_update(struct rte_eth_dev *dev,
227bd3fcf0dSAlvin Zhang struct rte_eth_rss_reta_entry64 *reta_conf,
228bd3fcf0dSAlvin Zhang uint16_t reta_size);
229bd3fcf0dSAlvin Zhang static int eth_igc_rss_reta_query(struct rte_eth_dev *dev,
230bd3fcf0dSAlvin Zhang struct rte_eth_rss_reta_entry64 *reta_conf,
231bd3fcf0dSAlvin Zhang uint16_t reta_size);
232bd3fcf0dSAlvin Zhang static int eth_igc_rss_hash_update(struct rte_eth_dev *dev,
233bd3fcf0dSAlvin Zhang struct rte_eth_rss_conf *rss_conf);
234bd3fcf0dSAlvin Zhang static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
235bd3fcf0dSAlvin Zhang struct rte_eth_rss_conf *rss_conf);
2365f266d0dSAlvin Zhang static int
2375f266d0dSAlvin Zhang eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
2385f266d0dSAlvin Zhang static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
2395f266d0dSAlvin Zhang static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
2405f266d0dSAlvin Zhang enum rte_vlan_type vlan_type, uint16_t tpid);
24166fde1b9SAlvin Zhang
24266fde1b9SAlvin Zhang static const struct eth_dev_ops eth_igc_ops = {
24366fde1b9SAlvin Zhang .dev_configure = eth_igc_configure,
24466fde1b9SAlvin Zhang .link_update = eth_igc_link_update,
24566fde1b9SAlvin Zhang .dev_stop = eth_igc_stop,
24666fde1b9SAlvin Zhang .dev_start = eth_igc_start,
24766fde1b9SAlvin Zhang .dev_close = eth_igc_close,
24866fde1b9SAlvin Zhang .dev_reset = eth_igc_reset,
2494f09bc55SAlvin Zhang .dev_set_link_up = eth_igc_set_link_up,
2504f09bc55SAlvin Zhang .dev_set_link_down = eth_igc_set_link_down,
25166fde1b9SAlvin Zhang .promiscuous_enable = eth_igc_promiscuous_enable,
25266fde1b9SAlvin Zhang .promiscuous_disable = eth_igc_promiscuous_disable,
253a5aeb2b9SAlvin Zhang .allmulticast_enable = eth_igc_allmulticast_enable,
254a5aeb2b9SAlvin Zhang .allmulticast_disable = eth_igc_allmulticast_disable,
2554f09bc55SAlvin Zhang .fw_version_get = eth_igc_fw_version_get,
25666fde1b9SAlvin Zhang .dev_infos_get = eth_igc_infos_get,
2574f09bc55SAlvin Zhang .dev_led_on = eth_igc_led_on,
2584f09bc55SAlvin Zhang .dev_led_off = eth_igc_led_off,
259a5aeb2b9SAlvin Zhang .dev_supported_ptypes_get = eth_igc_supported_ptypes_get,
260a5aeb2b9SAlvin Zhang .mtu_set = eth_igc_mtu_set,
261a5aeb2b9SAlvin Zhang .mac_addr_add = eth_igc_rar_set,
262a5aeb2b9SAlvin Zhang .mac_addr_remove = eth_igc_rar_clear,
263a5aeb2b9SAlvin Zhang .mac_addr_set = eth_igc_default_mac_addr_set,
264a5aeb2b9SAlvin Zhang .set_mc_addr_list = eth_igc_set_mc_addr_list,
2654f09bc55SAlvin Zhang
26666fde1b9SAlvin Zhang .rx_queue_setup = eth_igc_rx_queue_setup,
2674f09bc55SAlvin Zhang .rx_queue_release = eth_igc_rx_queue_release,
26866fde1b9SAlvin Zhang .tx_queue_setup = eth_igc_tx_queue_setup,
2694f09bc55SAlvin Zhang .tx_queue_release = eth_igc_tx_queue_release,
270a5aeb2b9SAlvin Zhang .tx_done_cleanup = eth_igc_tx_done_cleanup,
271a5aeb2b9SAlvin Zhang .rxq_info_get = eth_igc_rxq_info_get,
272a5aeb2b9SAlvin Zhang .txq_info_get = eth_igc_txq_info_get,
273e6defdfdSAlvin Zhang .stats_get = eth_igc_stats_get,
274e6defdfdSAlvin Zhang .xstats_get = eth_igc_xstats_get,
275e6defdfdSAlvin Zhang .xstats_get_by_id = eth_igc_xstats_get_by_id,
276e6defdfdSAlvin Zhang .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id,
277e6defdfdSAlvin Zhang .xstats_get_names = eth_igc_xstats_get_names,
278e6defdfdSAlvin Zhang .stats_reset = eth_igc_xstats_reset,
279e6defdfdSAlvin Zhang .xstats_reset = eth_igc_xstats_reset,
280e6defdfdSAlvin Zhang .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,
2819417098fSAlvin Zhang .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable,
2829417098fSAlvin Zhang .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable,
2830d415cd8SAlvin Zhang .flow_ctrl_get = eth_igc_flow_ctrl_get,
2840d415cd8SAlvin Zhang .flow_ctrl_set = eth_igc_flow_ctrl_set,
285bd3fcf0dSAlvin Zhang .reta_update = eth_igc_rss_reta_update,
286bd3fcf0dSAlvin Zhang .reta_query = eth_igc_rss_reta_query,
287bd3fcf0dSAlvin Zhang .rss_hash_update = eth_igc_rss_hash_update,
288bd3fcf0dSAlvin Zhang .rss_hash_conf_get = eth_igc_rss_hash_conf_get,
2895f266d0dSAlvin Zhang .vlan_filter_set = eth_igc_vlan_filter_set,
2905f266d0dSAlvin Zhang .vlan_offload_set = eth_igc_vlan_offload_set,
2915f266d0dSAlvin Zhang .vlan_tpid_set = eth_igc_vlan_tpid_set,
2925f266d0dSAlvin Zhang .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set,
293fb7ad441SThomas Monjalon .flow_ops_get = eth_igc_flow_ops_get,
29466fde1b9SAlvin Zhang };
29566fde1b9SAlvin Zhang
2964f09bc55SAlvin Zhang /*
2974f09bc55SAlvin Zhang * multiple queue mode checking
2984f09bc55SAlvin Zhang */
2994f09bc55SAlvin Zhang static int
igc_check_mq_mode(struct rte_eth_dev * dev)3004f09bc55SAlvin Zhang igc_check_mq_mode(struct rte_eth_dev *dev)
3014f09bc55SAlvin Zhang {
3024f09bc55SAlvin Zhang enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
3034f09bc55SAlvin Zhang enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
3044f09bc55SAlvin Zhang
3054f09bc55SAlvin Zhang if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
3064f09bc55SAlvin Zhang PMD_INIT_LOG(ERR, "SRIOV is not supported.");
3074f09bc55SAlvin Zhang return -EINVAL;
3084f09bc55SAlvin Zhang }
3094f09bc55SAlvin Zhang
310295968d1SFerruh Yigit if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
311295968d1SFerruh Yigit rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
3124f09bc55SAlvin Zhang /* RSS together with VMDq not supported*/
3134f09bc55SAlvin Zhang PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
3144f09bc55SAlvin Zhang rx_mq_mode);
3154f09bc55SAlvin Zhang return -EINVAL;
3164f09bc55SAlvin Zhang }
3174f09bc55SAlvin Zhang
3184f09bc55SAlvin Zhang /* To no break software that set invalid mode, only display
3194f09bc55SAlvin Zhang * warning if invalid mode is used.
3204f09bc55SAlvin Zhang */
321295968d1SFerruh Yigit if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
3224f09bc55SAlvin Zhang PMD_INIT_LOG(WARNING,
3234f09bc55SAlvin Zhang "TX mode %d is not supported. Due to meaningless in this driver, just ignore",
3244f09bc55SAlvin Zhang tx_mq_mode);
3254f09bc55SAlvin Zhang
3264f09bc55SAlvin Zhang return 0;
3274f09bc55SAlvin Zhang }
3284f09bc55SAlvin Zhang
32966fde1b9SAlvin Zhang static int
eth_igc_configure(struct rte_eth_dev * dev)33066fde1b9SAlvin Zhang eth_igc_configure(struct rte_eth_dev *dev)
33166fde1b9SAlvin Zhang {
3324f09bc55SAlvin Zhang struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
3334f09bc55SAlvin Zhang int ret;
3344f09bc55SAlvin Zhang
33566fde1b9SAlvin Zhang PMD_INIT_FUNC_TRACE();
3364f09bc55SAlvin Zhang
337295968d1SFerruh Yigit if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
338295968d1SFerruh Yigit dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
33947319fb4SAlvin Zhang
3404f09bc55SAlvin Zhang ret = igc_check_mq_mode(dev);
3414f09bc55SAlvin Zhang if (ret != 0)
3424f09bc55SAlvin Zhang return ret;
3434f09bc55SAlvin Zhang
3444f09bc55SAlvin Zhang intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
34566fde1b9SAlvin Zhang return 0;
34666fde1b9SAlvin Zhang }
34766fde1b9SAlvin Zhang
34866fde1b9SAlvin Zhang static int
eth_igc_set_link_up(struct rte_eth_dev * dev)3494f09bc55SAlvin Zhang eth_igc_set_link_up(struct rte_eth_dev *dev)
35066fde1b9SAlvin Zhang {
3514f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
3524f09bc55SAlvin Zhang
3534f09bc55SAlvin Zhang if (hw->phy.media_type == igc_media_type_copper)
3544f09bc55SAlvin Zhang igc_power_up_phy(hw);
3554f09bc55SAlvin Zhang else
3564f09bc55SAlvin Zhang igc_power_up_fiber_serdes_link(hw);
35766fde1b9SAlvin Zhang return 0;
35866fde1b9SAlvin Zhang }
35966fde1b9SAlvin Zhang
3604f09bc55SAlvin Zhang static int
eth_igc_set_link_down(struct rte_eth_dev * dev)3614f09bc55SAlvin Zhang eth_igc_set_link_down(struct rte_eth_dev *dev)
3624f09bc55SAlvin Zhang {
3634f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
3644f09bc55SAlvin Zhang
3654f09bc55SAlvin Zhang if (hw->phy.media_type == igc_media_type_copper)
3664f09bc55SAlvin Zhang igc_power_down_phy(hw);
3674f09bc55SAlvin Zhang else
3684f09bc55SAlvin Zhang igc_shutdown_fiber_serdes_link(hw);
3694f09bc55SAlvin Zhang return 0;
3704f09bc55SAlvin Zhang }
3714f09bc55SAlvin Zhang
3724f09bc55SAlvin Zhang /*
3734f09bc55SAlvin Zhang * disable other interrupt
3744f09bc55SAlvin Zhang */
3754f09bc55SAlvin Zhang static void
igc_intr_other_disable(struct rte_eth_dev * dev)3764f09bc55SAlvin Zhang igc_intr_other_disable(struct rte_eth_dev *dev)
3774f09bc55SAlvin Zhang {
3784f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
3794f09bc55SAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
380d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3814f09bc55SAlvin Zhang
3824f09bc55SAlvin Zhang if (rte_intr_allow_others(intr_handle) &&
3834f09bc55SAlvin Zhang dev->data->dev_conf.intr_conf.lsc) {
3844f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC);
3854f09bc55SAlvin Zhang }
3864f09bc55SAlvin Zhang
3874f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_IMC, ~0);
3884f09bc55SAlvin Zhang IGC_WRITE_FLUSH(hw);
3894f09bc55SAlvin Zhang }
3904f09bc55SAlvin Zhang
3914f09bc55SAlvin Zhang /*
3924f09bc55SAlvin Zhang * enable other interrupt
3934f09bc55SAlvin Zhang */
3944f09bc55SAlvin Zhang static inline void
igc_intr_other_enable(struct rte_eth_dev * dev)3954f09bc55SAlvin Zhang igc_intr_other_enable(struct rte_eth_dev *dev)
3964f09bc55SAlvin Zhang {
3974f09bc55SAlvin Zhang struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
3984f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
3994f09bc55SAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
400d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
4014f09bc55SAlvin Zhang
4024f09bc55SAlvin Zhang if (rte_intr_allow_others(intr_handle) &&
4034f09bc55SAlvin Zhang dev->data->dev_conf.intr_conf.lsc) {
4044f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC);
4054f09bc55SAlvin Zhang }
4064f09bc55SAlvin Zhang
4074f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_IMS, intr->mask);
4084f09bc55SAlvin Zhang IGC_WRITE_FLUSH(hw);
4094f09bc55SAlvin Zhang }
4104f09bc55SAlvin Zhang
4114f09bc55SAlvin Zhang /*
4124f09bc55SAlvin Zhang * It reads ICR and gets interrupt causes, check it and set a bit flag
4134f09bc55SAlvin Zhang * to update link status.
4144f09bc55SAlvin Zhang */
4154f09bc55SAlvin Zhang static void
eth_igc_interrupt_get_status(struct rte_eth_dev * dev)4164f09bc55SAlvin Zhang eth_igc_interrupt_get_status(struct rte_eth_dev *dev)
4174f09bc55SAlvin Zhang {
4184f09bc55SAlvin Zhang uint32_t icr;
4194f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
4204f09bc55SAlvin Zhang struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
4214f09bc55SAlvin Zhang
4224f09bc55SAlvin Zhang /* read-on-clear nic registers here */
4234f09bc55SAlvin Zhang icr = IGC_READ_REG(hw, IGC_ICR);
4244f09bc55SAlvin Zhang
4254f09bc55SAlvin Zhang intr->flags = 0;
4264f09bc55SAlvin Zhang if (icr & IGC_ICR_LSC)
4274f09bc55SAlvin Zhang intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
4284f09bc55SAlvin Zhang }
4294f09bc55SAlvin Zhang
4304f09bc55SAlvin Zhang /* return 0 means link status changed, -1 means not changed */
4314f09bc55SAlvin Zhang static int
eth_igc_link_update(struct rte_eth_dev * dev,int wait_to_complete)4324f09bc55SAlvin Zhang eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4334f09bc55SAlvin Zhang {
4344f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
4354f09bc55SAlvin Zhang struct rte_eth_link link;
4364f09bc55SAlvin Zhang int link_check, count;
4374f09bc55SAlvin Zhang
4384f09bc55SAlvin Zhang link_check = 0;
4394f09bc55SAlvin Zhang hw->mac.get_link_status = 1;
4404f09bc55SAlvin Zhang
4414f09bc55SAlvin Zhang /* possible wait-to-complete in up to 9 seconds */
4424f09bc55SAlvin Zhang for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {
4434f09bc55SAlvin Zhang /* Read the real link status */
4444f09bc55SAlvin Zhang switch (hw->phy.media_type) {
4454f09bc55SAlvin Zhang case igc_media_type_copper:
4464f09bc55SAlvin Zhang /* Do the work to read phy */
4474f09bc55SAlvin Zhang igc_check_for_link(hw);
4484f09bc55SAlvin Zhang link_check = !hw->mac.get_link_status;
4494f09bc55SAlvin Zhang break;
4504f09bc55SAlvin Zhang
4514f09bc55SAlvin Zhang case igc_media_type_fiber:
4524f09bc55SAlvin Zhang igc_check_for_link(hw);
4534f09bc55SAlvin Zhang link_check = (IGC_READ_REG(hw, IGC_STATUS) &
4544f09bc55SAlvin Zhang IGC_STATUS_LU);
4554f09bc55SAlvin Zhang break;
4564f09bc55SAlvin Zhang
4574f09bc55SAlvin Zhang case igc_media_type_internal_serdes:
4584f09bc55SAlvin Zhang igc_check_for_link(hw);
4594f09bc55SAlvin Zhang link_check = hw->mac.serdes_has_link;
4604f09bc55SAlvin Zhang break;
4614f09bc55SAlvin Zhang
4624f09bc55SAlvin Zhang default:
4634f09bc55SAlvin Zhang break;
4644f09bc55SAlvin Zhang }
4654f09bc55SAlvin Zhang if (link_check || wait_to_complete == 0)
4664f09bc55SAlvin Zhang break;
4674f09bc55SAlvin Zhang rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);
4684f09bc55SAlvin Zhang }
4694f09bc55SAlvin Zhang memset(&link, 0, sizeof(link));
4704f09bc55SAlvin Zhang
4714f09bc55SAlvin Zhang /* Now we check if a transition has happened */
4724f09bc55SAlvin Zhang if (link_check) {
4734f09bc55SAlvin Zhang uint16_t duplex, speed;
4744f09bc55SAlvin Zhang hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
4754f09bc55SAlvin Zhang link.link_duplex = (duplex == FULL_DUPLEX) ?
476295968d1SFerruh Yigit RTE_ETH_LINK_FULL_DUPLEX :
477295968d1SFerruh Yigit RTE_ETH_LINK_HALF_DUPLEX;
4784f09bc55SAlvin Zhang link.link_speed = speed;
479295968d1SFerruh Yigit link.link_status = RTE_ETH_LINK_UP;
4804f09bc55SAlvin Zhang link.link_autoneg = !(dev->data->dev_conf.link_speeds &
481295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_FIXED);
4824f09bc55SAlvin Zhang
4834f09bc55SAlvin Zhang if (speed == SPEED_2500) {
4844f09bc55SAlvin Zhang uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
4854f09bc55SAlvin Zhang if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {
4864f09bc55SAlvin Zhang tipg &= ~IGC_TIPG_IPGT_MASK;
4874f09bc55SAlvin Zhang tipg |= 0x0b;
4884f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_TIPG, tipg);
4894f09bc55SAlvin Zhang }
4904f09bc55SAlvin Zhang }
4914f09bc55SAlvin Zhang } else {
4924f09bc55SAlvin Zhang link.link_speed = 0;
493295968d1SFerruh Yigit link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
494295968d1SFerruh Yigit link.link_status = RTE_ETH_LINK_DOWN;
495295968d1SFerruh Yigit link.link_autoneg = RTE_ETH_LINK_FIXED;
4964f09bc55SAlvin Zhang }
4974f09bc55SAlvin Zhang
4984f09bc55SAlvin Zhang return rte_eth_linkstatus_set(dev, &link);
4994f09bc55SAlvin Zhang }
5004f09bc55SAlvin Zhang
5014f09bc55SAlvin Zhang /*
5024f09bc55SAlvin Zhang * It executes link_update after knowing an interrupt is present.
5034f09bc55SAlvin Zhang */
5044f09bc55SAlvin Zhang static void
eth_igc_interrupt_action(struct rte_eth_dev * dev)5054f09bc55SAlvin Zhang eth_igc_interrupt_action(struct rte_eth_dev *dev)
5064f09bc55SAlvin Zhang {
5074f09bc55SAlvin Zhang struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
5084f09bc55SAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5094f09bc55SAlvin Zhang struct rte_eth_link link;
5104f09bc55SAlvin Zhang int ret;
5114f09bc55SAlvin Zhang
5124f09bc55SAlvin Zhang if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {
5134f09bc55SAlvin Zhang intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5144f09bc55SAlvin Zhang
5154f09bc55SAlvin Zhang /* set get_link_status to check register later */
5164f09bc55SAlvin Zhang ret = eth_igc_link_update(dev, 0);
5174f09bc55SAlvin Zhang
5184f09bc55SAlvin Zhang /* check if link has changed */
5194f09bc55SAlvin Zhang if (ret < 0)
5204f09bc55SAlvin Zhang return;
5214f09bc55SAlvin Zhang
5224f09bc55SAlvin Zhang rte_eth_linkstatus_get(dev, &link);
5234f09bc55SAlvin Zhang if (link.link_status)
5244f09bc55SAlvin Zhang PMD_DRV_LOG(INFO,
5254f09bc55SAlvin Zhang " Port %d: Link Up - speed %u Mbps - %s",
5264f09bc55SAlvin Zhang dev->data->port_id,
5274f09bc55SAlvin Zhang (unsigned int)link.link_speed,
528295968d1SFerruh Yigit link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
5294f09bc55SAlvin Zhang "full-duplex" : "half-duplex");
5304f09bc55SAlvin Zhang else
5314f09bc55SAlvin Zhang PMD_DRV_LOG(INFO, " Port %d: Link Down",
5324f09bc55SAlvin Zhang dev->data->port_id);
5334f09bc55SAlvin Zhang
5344f09bc55SAlvin Zhang PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
5354f09bc55SAlvin Zhang pci_dev->addr.domain,
5364f09bc55SAlvin Zhang pci_dev->addr.bus,
5374f09bc55SAlvin Zhang pci_dev->addr.devid,
5384f09bc55SAlvin Zhang pci_dev->addr.function);
5395723fbedSFerruh Yigit rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
5404f09bc55SAlvin Zhang }
5414f09bc55SAlvin Zhang }
5424f09bc55SAlvin Zhang
5434f09bc55SAlvin Zhang /*
5444f09bc55SAlvin Zhang * Interrupt handler which shall be registered at first.
5454f09bc55SAlvin Zhang *
5464f09bc55SAlvin Zhang * @handle
5474f09bc55SAlvin Zhang * Pointer to interrupt handle.
5484f09bc55SAlvin Zhang * @param
5494f09bc55SAlvin Zhang * The address of parameter (struct rte_eth_dev *) registered before.
5504f09bc55SAlvin Zhang */
5514f09bc55SAlvin Zhang static void
eth_igc_interrupt_handler(void * param)5524f09bc55SAlvin Zhang eth_igc_interrupt_handler(void *param)
5534f09bc55SAlvin Zhang {
5544f09bc55SAlvin Zhang struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5554f09bc55SAlvin Zhang
5564f09bc55SAlvin Zhang eth_igc_interrupt_get_status(dev);
5574f09bc55SAlvin Zhang eth_igc_interrupt_action(dev);
5584f09bc55SAlvin Zhang }
5594f09bc55SAlvin Zhang
560e6defdfdSAlvin Zhang static void igc_read_queue_stats_register(struct rte_eth_dev *dev);
561e6defdfdSAlvin Zhang
562e6defdfdSAlvin Zhang /*
563e6defdfdSAlvin Zhang * Update the queue status every IGC_ALARM_INTERVAL time.
564e6defdfdSAlvin Zhang * @param
565e6defdfdSAlvin Zhang * The address of parameter (struct rte_eth_dev *) registered before.
566e6defdfdSAlvin Zhang */
567e6defdfdSAlvin Zhang static void
igc_update_queue_stats_handler(void * param)568e6defdfdSAlvin Zhang igc_update_queue_stats_handler(void *param)
569e6defdfdSAlvin Zhang {
570e6defdfdSAlvin Zhang struct rte_eth_dev *dev = param;
571e6defdfdSAlvin Zhang igc_read_queue_stats_register(dev);
572e6defdfdSAlvin Zhang rte_eal_alarm_set(IGC_ALARM_INTERVAL,
573e6defdfdSAlvin Zhang igc_update_queue_stats_handler, dev);
574e6defdfdSAlvin Zhang }
575e6defdfdSAlvin Zhang
5764f09bc55SAlvin Zhang /*
577a5aeb2b9SAlvin Zhang * rx,tx enable/disable
578a5aeb2b9SAlvin Zhang */
579a5aeb2b9SAlvin Zhang static void
eth_igc_rxtx_control(struct rte_eth_dev * dev,bool enable)580a5aeb2b9SAlvin Zhang eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)
581a5aeb2b9SAlvin Zhang {
582a5aeb2b9SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
583a5aeb2b9SAlvin Zhang uint32_t tctl, rctl;
584a5aeb2b9SAlvin Zhang
585a5aeb2b9SAlvin Zhang tctl = IGC_READ_REG(hw, IGC_TCTL);
586a5aeb2b9SAlvin Zhang rctl = IGC_READ_REG(hw, IGC_RCTL);
587a5aeb2b9SAlvin Zhang
588a5aeb2b9SAlvin Zhang if (enable) {
589a5aeb2b9SAlvin Zhang /* enable Tx/Rx */
590a5aeb2b9SAlvin Zhang tctl |= IGC_TCTL_EN;
591a5aeb2b9SAlvin Zhang rctl |= IGC_RCTL_EN;
592a5aeb2b9SAlvin Zhang } else {
593a5aeb2b9SAlvin Zhang /* disable Tx/Rx */
594a5aeb2b9SAlvin Zhang tctl &= ~IGC_TCTL_EN;
595a5aeb2b9SAlvin Zhang rctl &= ~IGC_RCTL_EN;
596a5aeb2b9SAlvin Zhang }
597a5aeb2b9SAlvin Zhang IGC_WRITE_REG(hw, IGC_TCTL, tctl);
598a5aeb2b9SAlvin Zhang IGC_WRITE_REG(hw, IGC_RCTL, rctl);
599a5aeb2b9SAlvin Zhang IGC_WRITE_FLUSH(hw);
600a5aeb2b9SAlvin Zhang }
601a5aeb2b9SAlvin Zhang
602a5aeb2b9SAlvin Zhang /*
6034f09bc55SAlvin Zhang * This routine disables all traffic on the adapter by issuing a
6044f09bc55SAlvin Zhang * global reset on the MAC.
6054f09bc55SAlvin Zhang */
60662024eb8SIvan Ilchenko static int
eth_igc_stop(struct rte_eth_dev * dev)60766fde1b9SAlvin Zhang eth_igc_stop(struct rte_eth_dev *dev)
60866fde1b9SAlvin Zhang {
6094f09bc55SAlvin Zhang struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
6104f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
6114f09bc55SAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
612d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
6134f09bc55SAlvin Zhang struct rte_eth_link link;
6144f09bc55SAlvin Zhang
615b8f5d2aeSThomas Monjalon dev->data->dev_started = 0;
6164f09bc55SAlvin Zhang adapter->stopped = 1;
6174f09bc55SAlvin Zhang
618a5aeb2b9SAlvin Zhang /* disable receive and transmit */
619a5aeb2b9SAlvin Zhang eth_igc_rxtx_control(dev, false);
620a5aeb2b9SAlvin Zhang
6214f09bc55SAlvin Zhang /* disable all MSI-X interrupts */
6224f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
6234f09bc55SAlvin Zhang IGC_WRITE_FLUSH(hw);
6244f09bc55SAlvin Zhang
6254f09bc55SAlvin Zhang /* clear all MSI-X interrupts */
6264f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
6274f09bc55SAlvin Zhang
6284f09bc55SAlvin Zhang igc_intr_other_disable(dev);
6294f09bc55SAlvin Zhang
630e6defdfdSAlvin Zhang rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
631e6defdfdSAlvin Zhang
6324f09bc55SAlvin Zhang /* disable intr eventfd mapping */
6334f09bc55SAlvin Zhang rte_intr_disable(intr_handle);
6344f09bc55SAlvin Zhang
6354f09bc55SAlvin Zhang igc_reset_hw(hw);
6364f09bc55SAlvin Zhang
6374f09bc55SAlvin Zhang /* disable all wake up */
6384f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_WUC, 0);
6394f09bc55SAlvin Zhang
6408938c4e1SAlvin Zhang /* disable checking EEE operation in MAC loopback mode */
6418938c4e1SAlvin Zhang igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
6428938c4e1SAlvin Zhang
6434f09bc55SAlvin Zhang /* Set bit for Go Link disconnect */
6444f09bc55SAlvin Zhang igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
6454f09bc55SAlvin Zhang IGC_82580_PM_GO_LINKD);
6464f09bc55SAlvin Zhang
6474f09bc55SAlvin Zhang /* Power down the phy. Needed to make the link go Down */
6484f09bc55SAlvin Zhang eth_igc_set_link_down(dev);
6494f09bc55SAlvin Zhang
650a5aeb2b9SAlvin Zhang igc_dev_clear_queues(dev);
651a5aeb2b9SAlvin Zhang
6524f09bc55SAlvin Zhang /* clear the recorded link status */
6534f09bc55SAlvin Zhang memset(&link, 0, sizeof(link));
6544f09bc55SAlvin Zhang rte_eth_linkstatus_set(dev, &link);
6554f09bc55SAlvin Zhang
6564f09bc55SAlvin Zhang if (!rte_intr_allow_others(intr_handle))
6574f09bc55SAlvin Zhang /* resume to the default handler */
6584f09bc55SAlvin Zhang rte_intr_callback_register(intr_handle,
6594f09bc55SAlvin Zhang eth_igc_interrupt_handler,
6604f09bc55SAlvin Zhang (void *)dev);
6614f09bc55SAlvin Zhang
6624f09bc55SAlvin Zhang /* Clean datapath event and queue/vec mapping */
6634f09bc55SAlvin Zhang rte_intr_efd_disable(intr_handle);
664d61138d4SHarman Kalra rte_intr_vec_list_free(intr_handle);
66562024eb8SIvan Ilchenko
66662024eb8SIvan Ilchenko return 0;
6679417098fSAlvin Zhang }
6689417098fSAlvin Zhang
6699417098fSAlvin Zhang /*
6709417098fSAlvin Zhang * write interrupt vector allocation register
6719417098fSAlvin Zhang * @hw
6729417098fSAlvin Zhang * board private structure
6739417098fSAlvin Zhang * @queue_index
6749417098fSAlvin Zhang * queue index, valid 0,1,2,3
6759417098fSAlvin Zhang * @tx
6769417098fSAlvin Zhang * tx:1, rx:0
6779417098fSAlvin Zhang * @msix_vector
6789417098fSAlvin Zhang * msix-vector, valid 0,1,2,3,4
6799417098fSAlvin Zhang */
6809417098fSAlvin Zhang static void
igc_write_ivar(struct igc_hw * hw,uint8_t queue_index,bool tx,uint8_t msix_vector)6819417098fSAlvin Zhang igc_write_ivar(struct igc_hw *hw, uint8_t queue_index,
6829417098fSAlvin Zhang bool tx, uint8_t msix_vector)
6839417098fSAlvin Zhang {
6849417098fSAlvin Zhang uint8_t offset = 0;
6859417098fSAlvin Zhang uint8_t reg_index = queue_index >> 1;
6869417098fSAlvin Zhang uint32_t val;
6879417098fSAlvin Zhang
6889417098fSAlvin Zhang /*
6899417098fSAlvin Zhang * IVAR(0)
6909417098fSAlvin Zhang * bit31...24 bit23...16 bit15...8 bit7...0
6919417098fSAlvin Zhang * TX1 RX1 TX0 RX0
6929417098fSAlvin Zhang *
6939417098fSAlvin Zhang * IVAR(1)
6949417098fSAlvin Zhang * bit31...24 bit23...16 bit15...8 bit7...0
6959417098fSAlvin Zhang * TX3 RX3 TX2 RX2
6969417098fSAlvin Zhang */
6979417098fSAlvin Zhang
6989417098fSAlvin Zhang if (tx)
6999417098fSAlvin Zhang offset = 8;
7009417098fSAlvin Zhang
7019417098fSAlvin Zhang if (queue_index & 1)
7029417098fSAlvin Zhang offset += 16;
7039417098fSAlvin Zhang
7049417098fSAlvin Zhang val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index);
7059417098fSAlvin Zhang
7069417098fSAlvin Zhang /* clear bits */
7079417098fSAlvin Zhang val &= ~((uint32_t)0xFF << offset);
7089417098fSAlvin Zhang
7099417098fSAlvin Zhang /* write vector and valid bit */
7109417098fSAlvin Zhang val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset;
7119417098fSAlvin Zhang
7129417098fSAlvin Zhang IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val);
7134f09bc55SAlvin Zhang }
7144f09bc55SAlvin Zhang
7154f09bc55SAlvin Zhang /* Sets up the hardware to generate MSI-X interrupts properly
7164f09bc55SAlvin Zhang * @hw
7174f09bc55SAlvin Zhang * board private structure
7184f09bc55SAlvin Zhang */
7194f09bc55SAlvin Zhang static void
igc_configure_msix_intr(struct rte_eth_dev * dev)7204f09bc55SAlvin Zhang igc_configure_msix_intr(struct rte_eth_dev *dev)
7214f09bc55SAlvin Zhang {
7224f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
7234f09bc55SAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
724d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
7254f09bc55SAlvin Zhang
7264f09bc55SAlvin Zhang uint32_t intr_mask;
7279417098fSAlvin Zhang uint32_t vec = IGC_MISC_VEC_ID;
7289417098fSAlvin Zhang uint32_t base = IGC_MISC_VEC_ID;
7299417098fSAlvin Zhang uint32_t misc_shift = 0;
730*aedd054cSHarman Kalra int i, nb_efd;
7314f09bc55SAlvin Zhang
7324f09bc55SAlvin Zhang /* won't configure msix register if no mapping is done
7334f09bc55SAlvin Zhang * between intr vector and event fd
7344f09bc55SAlvin Zhang */
7359417098fSAlvin Zhang if (!rte_intr_dp_is_en(intr_handle))
7364f09bc55SAlvin Zhang return;
7374f09bc55SAlvin Zhang
7389417098fSAlvin Zhang if (rte_intr_allow_others(intr_handle)) {
7399417098fSAlvin Zhang base = IGC_RX_VEC_START;
7409417098fSAlvin Zhang vec = base;
7419417098fSAlvin Zhang misc_shift = 1;
7429417098fSAlvin Zhang }
7439417098fSAlvin Zhang
7444f09bc55SAlvin Zhang /* turn on MSI-X capability first */
7454f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
7464f09bc55SAlvin Zhang IGC_GPIE_PBA | IGC_GPIE_EIAME |
7474f09bc55SAlvin Zhang IGC_GPIE_NSICR);
748*aedd054cSHarman Kalra
749*aedd054cSHarman Kalra nb_efd = rte_intr_nb_efd_get(intr_handle);
750*aedd054cSHarman Kalra if (nb_efd < 0)
751*aedd054cSHarman Kalra return;
752*aedd054cSHarman Kalra
753*aedd054cSHarman Kalra intr_mask = RTE_LEN2MASK(nb_efd, uint32_t) << misc_shift;
7544f09bc55SAlvin Zhang
7559417098fSAlvin Zhang if (dev->data->dev_conf.intr_conf.lsc)
7569417098fSAlvin Zhang intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC);
7574f09bc55SAlvin Zhang
7584f09bc55SAlvin Zhang /* enable msix auto-clear */
7594f09bc55SAlvin Zhang igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask);
7604f09bc55SAlvin Zhang
7614f09bc55SAlvin Zhang /* set other cause interrupt vector */
7624f09bc55SAlvin Zhang igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC,
7634f09bc55SAlvin Zhang (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8);
7644f09bc55SAlvin Zhang
7654f09bc55SAlvin Zhang /* enable auto-mask */
7664f09bc55SAlvin Zhang igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask);
7674f09bc55SAlvin Zhang
7689417098fSAlvin Zhang for (i = 0; i < dev->data->nb_rx_queues; i++) {
7699417098fSAlvin Zhang igc_write_ivar(hw, i, 0, vec);
770d61138d4SHarman Kalra rte_intr_vec_list_index_set(intr_handle, i, vec);
771d61138d4SHarman Kalra if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
7729417098fSAlvin Zhang vec++;
7739417098fSAlvin Zhang }
7749417098fSAlvin Zhang
7754f09bc55SAlvin Zhang IGC_WRITE_FLUSH(hw);
7764f09bc55SAlvin Zhang }
7774f09bc55SAlvin Zhang
7784f09bc55SAlvin Zhang /**
7794f09bc55SAlvin Zhang * It enables the interrupt mask and then enable the interrupt.
7804f09bc55SAlvin Zhang *
7814f09bc55SAlvin Zhang * @dev
7824f09bc55SAlvin Zhang * Pointer to struct rte_eth_dev.
7834f09bc55SAlvin Zhang * @on
7844f09bc55SAlvin Zhang * Enable or Disable
7854f09bc55SAlvin Zhang */
7864f09bc55SAlvin Zhang static void
igc_lsc_interrupt_setup(struct rte_eth_dev * dev,uint8_t on)7874f09bc55SAlvin Zhang igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
7884f09bc55SAlvin Zhang {
7894f09bc55SAlvin Zhang struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
7904f09bc55SAlvin Zhang
7914f09bc55SAlvin Zhang if (on)
7924f09bc55SAlvin Zhang intr->mask |= IGC_ICR_LSC;
7934f09bc55SAlvin Zhang else
7944f09bc55SAlvin Zhang intr->mask &= ~IGC_ICR_LSC;
79566fde1b9SAlvin Zhang }
79666fde1b9SAlvin Zhang
7978cb7c57dSAlvin Zhang /*
7989417098fSAlvin Zhang * It enables the interrupt.
7999417098fSAlvin Zhang * It will be called once only during nic initialized.
8009417098fSAlvin Zhang */
8019417098fSAlvin Zhang static void
igc_rxq_interrupt_setup(struct rte_eth_dev * dev)8029417098fSAlvin Zhang igc_rxq_interrupt_setup(struct rte_eth_dev *dev)
8039417098fSAlvin Zhang {
8049417098fSAlvin Zhang uint32_t mask;
8059417098fSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
8069417098fSAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
807d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
8089417098fSAlvin Zhang int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
809*aedd054cSHarman Kalra int nb_efd;
8109417098fSAlvin Zhang
8119417098fSAlvin Zhang /* won't configure msix register if no mapping is done
8129417098fSAlvin Zhang * between intr vector and event fd
8139417098fSAlvin Zhang */
8149417098fSAlvin Zhang if (!rte_intr_dp_is_en(intr_handle))
8159417098fSAlvin Zhang return;
8169417098fSAlvin Zhang
817*aedd054cSHarman Kalra nb_efd = rte_intr_nb_efd_get(intr_handle);
818*aedd054cSHarman Kalra if (nb_efd < 0)
819*aedd054cSHarman Kalra return;
820*aedd054cSHarman Kalra
821*aedd054cSHarman Kalra mask = RTE_LEN2MASK(nb_efd, uint32_t) << misc_shift;
8229417098fSAlvin Zhang IGC_WRITE_REG(hw, IGC_EIMS, mask);
8239417098fSAlvin Zhang }
8249417098fSAlvin Zhang
8259417098fSAlvin Zhang /*
8268cb7c57dSAlvin Zhang * Get hardware rx-buffer size.
8278cb7c57dSAlvin Zhang */
8288cb7c57dSAlvin Zhang static inline int
igc_get_rx_buffer_size(struct igc_hw * hw)8298cb7c57dSAlvin Zhang igc_get_rx_buffer_size(struct igc_hw *hw)
8308cb7c57dSAlvin Zhang {
8318cb7c57dSAlvin Zhang return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
8328cb7c57dSAlvin Zhang }
8338cb7c57dSAlvin Zhang
8348cb7c57dSAlvin Zhang /*
8358cb7c57dSAlvin Zhang * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
8368cb7c57dSAlvin Zhang * For ASF and Pass Through versions of f/w this means
8378cb7c57dSAlvin Zhang * that the driver is loaded.
8388cb7c57dSAlvin Zhang */
8398cb7c57dSAlvin Zhang static void
igc_hw_control_acquire(struct igc_hw * hw)8408cb7c57dSAlvin Zhang igc_hw_control_acquire(struct igc_hw *hw)
8418cb7c57dSAlvin Zhang {
8428cb7c57dSAlvin Zhang uint32_t ctrl_ext;
8438cb7c57dSAlvin Zhang
8448cb7c57dSAlvin Zhang /* Let firmware know the driver has taken over */
8458cb7c57dSAlvin Zhang ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
8468cb7c57dSAlvin Zhang IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
8478cb7c57dSAlvin Zhang }
8488cb7c57dSAlvin Zhang
8498cb7c57dSAlvin Zhang /*
8508cb7c57dSAlvin Zhang * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
8518cb7c57dSAlvin Zhang * For ASF and Pass Through versions of f/w this means that the
8528cb7c57dSAlvin Zhang * driver is no longer loaded.
8538cb7c57dSAlvin Zhang */
8548cb7c57dSAlvin Zhang static void
igc_hw_control_release(struct igc_hw * hw)8558cb7c57dSAlvin Zhang igc_hw_control_release(struct igc_hw *hw)
8568cb7c57dSAlvin Zhang {
8578cb7c57dSAlvin Zhang uint32_t ctrl_ext;
8588cb7c57dSAlvin Zhang
8598cb7c57dSAlvin Zhang /* Let firmware taken over control of h/w */
8608cb7c57dSAlvin Zhang ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
8618cb7c57dSAlvin Zhang IGC_WRITE_REG(hw, IGC_CTRL_EXT,
8628cb7c57dSAlvin Zhang ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
8638cb7c57dSAlvin Zhang }
8648cb7c57dSAlvin Zhang
8658cb7c57dSAlvin Zhang static int
igc_hardware_init(struct igc_hw * hw)8668cb7c57dSAlvin Zhang igc_hardware_init(struct igc_hw *hw)
8678cb7c57dSAlvin Zhang {
8688cb7c57dSAlvin Zhang uint32_t rx_buf_size;
8698cb7c57dSAlvin Zhang int diag;
8708cb7c57dSAlvin Zhang
8718cb7c57dSAlvin Zhang /* Let the firmware know the OS is in control */
8728cb7c57dSAlvin Zhang igc_hw_control_acquire(hw);
8738cb7c57dSAlvin Zhang
8748cb7c57dSAlvin Zhang /* Issue a global reset */
8758cb7c57dSAlvin Zhang igc_reset_hw(hw);
8768cb7c57dSAlvin Zhang
8778cb7c57dSAlvin Zhang /* disable all wake up */
8788cb7c57dSAlvin Zhang IGC_WRITE_REG(hw, IGC_WUC, 0);
8798cb7c57dSAlvin Zhang
8808cb7c57dSAlvin Zhang /*
8818cb7c57dSAlvin Zhang * Hardware flow control
8828cb7c57dSAlvin Zhang * - High water mark should allow for at least two standard size (1518)
8838cb7c57dSAlvin Zhang * frames to be received after sending an XOFF.
8848cb7c57dSAlvin Zhang * - Low water mark works best when it is very near the high water mark.
8858cb7c57dSAlvin Zhang * This allows the receiver to restart by sending XON when it has
8868cb7c57dSAlvin Zhang * drained a bit. Here we use an arbitrary value of 1500 which will
8878cb7c57dSAlvin Zhang * restart after one full frame is pulled from the buffer. There
8888cb7c57dSAlvin Zhang * could be several smaller frames in the buffer and if so they will
8898cb7c57dSAlvin Zhang * not trigger the XON until their total number reduces the buffer
8908cb7c57dSAlvin Zhang * by 1500.
8918cb7c57dSAlvin Zhang */
8928cb7c57dSAlvin Zhang rx_buf_size = igc_get_rx_buffer_size(hw);
8938cb7c57dSAlvin Zhang hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
8948cb7c57dSAlvin Zhang hw->fc.low_water = hw->fc.high_water - 1500;
8958cb7c57dSAlvin Zhang hw->fc.pause_time = IGC_FC_PAUSE_TIME;
8968cb7c57dSAlvin Zhang hw->fc.send_xon = 1;
8978cb7c57dSAlvin Zhang hw->fc.requested_mode = igc_fc_full;
8988cb7c57dSAlvin Zhang
8998cb7c57dSAlvin Zhang diag = igc_init_hw(hw);
9008cb7c57dSAlvin Zhang if (diag < 0)
9018cb7c57dSAlvin Zhang return diag;
9028cb7c57dSAlvin Zhang
9038cb7c57dSAlvin Zhang igc_get_phy_info(hw);
9048cb7c57dSAlvin Zhang igc_check_for_link(hw);
9058cb7c57dSAlvin Zhang
9068cb7c57dSAlvin Zhang return 0;
9078cb7c57dSAlvin Zhang }
9088cb7c57dSAlvin Zhang
90966fde1b9SAlvin Zhang static int
eth_igc_start(struct rte_eth_dev * dev)91066fde1b9SAlvin Zhang eth_igc_start(struct rte_eth_dev *dev)
91166fde1b9SAlvin Zhang {
9124f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
9134f09bc55SAlvin Zhang struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
9144f09bc55SAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
915d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
9164f09bc55SAlvin Zhang uint32_t *speeds;
917a5aeb2b9SAlvin Zhang int ret;
9184f09bc55SAlvin Zhang
91966fde1b9SAlvin Zhang PMD_INIT_FUNC_TRACE();
9204f09bc55SAlvin Zhang
9214f09bc55SAlvin Zhang /* disable all MSI-X interrupts */
9224f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
9234f09bc55SAlvin Zhang IGC_WRITE_FLUSH(hw);
9244f09bc55SAlvin Zhang
9254f09bc55SAlvin Zhang /* clear all MSI-X interrupts */
9264f09bc55SAlvin Zhang IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
9274f09bc55SAlvin Zhang
9284f09bc55SAlvin Zhang /* disable uio/vfio intr/eventfd mapping */
9294f09bc55SAlvin Zhang if (!adapter->stopped)
9304f09bc55SAlvin Zhang rte_intr_disable(intr_handle);
9314f09bc55SAlvin Zhang
9324f09bc55SAlvin Zhang /* Power up the phy. Needed to make the link go Up */
9334f09bc55SAlvin Zhang eth_igc_set_link_up(dev);
9344f09bc55SAlvin Zhang
9354f09bc55SAlvin Zhang /* Put the address into the Receive Address Array */
9364f09bc55SAlvin Zhang igc_rar_set(hw, hw->mac.addr, 0);
9374f09bc55SAlvin Zhang
9384f09bc55SAlvin Zhang /* Initialize the hardware */
9394f09bc55SAlvin Zhang if (igc_hardware_init(hw)) {
9404f09bc55SAlvin Zhang PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
9414f09bc55SAlvin Zhang return -EIO;
9424f09bc55SAlvin Zhang }
9434f09bc55SAlvin Zhang adapter->stopped = 0;
9444f09bc55SAlvin Zhang
9459417098fSAlvin Zhang /* check and configure queue intr-vector mapping */
9469417098fSAlvin Zhang if (rte_intr_cap_multiple(intr_handle) &&
9479417098fSAlvin Zhang dev->data->dev_conf.intr_conf.rxq) {
9489417098fSAlvin Zhang uint32_t intr_vector = dev->data->nb_rx_queues;
9499417098fSAlvin Zhang if (rte_intr_efd_enable(intr_handle, intr_vector))
9509417098fSAlvin Zhang return -1;
9519417098fSAlvin Zhang }
9529417098fSAlvin Zhang
953d61138d4SHarman Kalra if (rte_intr_dp_is_en(intr_handle)) {
954d61138d4SHarman Kalra if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
955d61138d4SHarman Kalra dev->data->nb_rx_queues)) {
9569417098fSAlvin Zhang PMD_DRV_LOG(ERR,
9579417098fSAlvin Zhang "Failed to allocate %d rx_queues intr_vec",
9589417098fSAlvin Zhang dev->data->nb_rx_queues);
9599417098fSAlvin Zhang return -ENOMEM;
9609417098fSAlvin Zhang }
9619417098fSAlvin Zhang }
9629417098fSAlvin Zhang
9639417098fSAlvin Zhang /* configure msix for rx interrupt */
9644f09bc55SAlvin Zhang igc_configure_msix_intr(dev);
9654f09bc55SAlvin Zhang
966a5aeb2b9SAlvin Zhang igc_tx_init(dev);
967a5aeb2b9SAlvin Zhang
968a5aeb2b9SAlvin Zhang /* This can fail when allocating mbufs for descriptor rings */
969a5aeb2b9SAlvin Zhang ret = igc_rx_init(dev);
970a5aeb2b9SAlvin Zhang if (ret) {
971a5aeb2b9SAlvin Zhang PMD_DRV_LOG(ERR, "Unable to initialize RX hardware");
972a5aeb2b9SAlvin Zhang igc_dev_clear_queues(dev);
973a5aeb2b9SAlvin Zhang return ret;
974a5aeb2b9SAlvin Zhang }
975a5aeb2b9SAlvin Zhang
9764f09bc55SAlvin Zhang igc_clear_hw_cntrs_base_generic(hw);
9774f09bc55SAlvin Zhang
9785f266d0dSAlvin Zhang /* VLAN Offload Settings */
9795f266d0dSAlvin Zhang eth_igc_vlan_offload_set(dev,
980295968d1SFerruh Yigit RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
981295968d1SFerruh Yigit RTE_ETH_VLAN_EXTEND_MASK);
9825f266d0dSAlvin Zhang
9834f09bc55SAlvin Zhang /* Setup link speed and duplex */
9844f09bc55SAlvin Zhang speeds = &dev->data->dev_conf.link_speeds;
985295968d1SFerruh Yigit if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
9864f09bc55SAlvin Zhang hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
9874f09bc55SAlvin Zhang hw->mac.autoneg = 1;
9884f09bc55SAlvin Zhang } else {
989a5aeb2b9SAlvin Zhang int num_speeds = 0;
9904f09bc55SAlvin Zhang
991295968d1SFerruh Yigit if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
992a4d5f9f9SAlvin Zhang PMD_DRV_LOG(ERR,
993a4d5f9f9SAlvin Zhang "Force speed mode currently not supported");
994a4d5f9f9SAlvin Zhang igc_dev_clear_queues(dev);
995a4d5f9f9SAlvin Zhang return -EINVAL;
996a4d5f9f9SAlvin Zhang }
997a4d5f9f9SAlvin Zhang
9984f09bc55SAlvin Zhang hw->phy.autoneg_advertised = 0;
999a4d5f9f9SAlvin Zhang hw->mac.autoneg = 1;
10004f09bc55SAlvin Zhang
1001295968d1SFerruh Yigit if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
1002295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
1003295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
10044f09bc55SAlvin Zhang num_speeds = -1;
10054f09bc55SAlvin Zhang goto error_invalid_config;
10064f09bc55SAlvin Zhang }
1007295968d1SFerruh Yigit if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
10084f09bc55SAlvin Zhang hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
10094f09bc55SAlvin Zhang num_speeds++;
10104f09bc55SAlvin Zhang }
1011295968d1SFerruh Yigit if (*speeds & RTE_ETH_LINK_SPEED_10M) {
10124f09bc55SAlvin Zhang hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
10134f09bc55SAlvin Zhang num_speeds++;
10144f09bc55SAlvin Zhang }
1015295968d1SFerruh Yigit if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
10164f09bc55SAlvin Zhang hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
10174f09bc55SAlvin Zhang num_speeds++;
10184f09bc55SAlvin Zhang }
1019295968d1SFerruh Yigit if (*speeds & RTE_ETH_LINK_SPEED_100M) {
10204f09bc55SAlvin Zhang hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
10214f09bc55SAlvin Zhang num_speeds++;
10224f09bc55SAlvin Zhang }
1023295968d1SFerruh Yigit if (*speeds & RTE_ETH_LINK_SPEED_1G) {
10244f09bc55SAlvin Zhang hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
10254f09bc55SAlvin Zhang num_speeds++;
10264f09bc55SAlvin Zhang }
1027295968d1SFerruh Yigit if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
10284f09bc55SAlvin Zhang hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
10294f09bc55SAlvin Zhang num_speeds++;
10304f09bc55SAlvin Zhang }
1031a4d5f9f9SAlvin Zhang if (num_speeds == 0)
10324f09bc55SAlvin Zhang goto error_invalid_config;
10334f09bc55SAlvin Zhang }
10344f09bc55SAlvin Zhang
10354f09bc55SAlvin Zhang igc_setup_link(hw);
10364f09bc55SAlvin Zhang
10374f09bc55SAlvin Zhang if (rte_intr_allow_others(intr_handle)) {
10384f09bc55SAlvin Zhang /* check if lsc interrupt is enabled */
10394f09bc55SAlvin Zhang if (dev->data->dev_conf.intr_conf.lsc)
10404f09bc55SAlvin Zhang igc_lsc_interrupt_setup(dev, 1);
10414f09bc55SAlvin Zhang else
10424f09bc55SAlvin Zhang igc_lsc_interrupt_setup(dev, 0);
10434f09bc55SAlvin Zhang } else {
10444f09bc55SAlvin Zhang rte_intr_callback_unregister(intr_handle,
10454f09bc55SAlvin Zhang eth_igc_interrupt_handler,
10464f09bc55SAlvin Zhang (void *)dev);
10474f09bc55SAlvin Zhang if (dev->data->dev_conf.intr_conf.lsc)
10484f09bc55SAlvin Zhang PMD_DRV_LOG(INFO,
10494f09bc55SAlvin Zhang "LSC won't enable because of no intr multiplex");
10504f09bc55SAlvin Zhang }
10514f09bc55SAlvin Zhang
10524f09bc55SAlvin Zhang /* enable uio/vfio intr/eventfd mapping */
10534f09bc55SAlvin Zhang rte_intr_enable(intr_handle);
10544f09bc55SAlvin Zhang
1055e6defdfdSAlvin Zhang rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1056e6defdfdSAlvin Zhang igc_update_queue_stats_handler, dev);
1057e6defdfdSAlvin Zhang
10589417098fSAlvin Zhang /* check if rxq interrupt is enabled */
10599417098fSAlvin Zhang if (dev->data->dev_conf.intr_conf.rxq &&
10609417098fSAlvin Zhang rte_intr_dp_is_en(intr_handle))
10619417098fSAlvin Zhang igc_rxq_interrupt_setup(dev);
10629417098fSAlvin Zhang
10634f09bc55SAlvin Zhang /* resume enabled intr since hw reset */
10644f09bc55SAlvin Zhang igc_intr_other_enable(dev);
10654f09bc55SAlvin Zhang
1066a5aeb2b9SAlvin Zhang eth_igc_rxtx_control(dev, true);
10674f09bc55SAlvin Zhang eth_igc_link_update(dev, 0);
10684f09bc55SAlvin Zhang
10698938c4e1SAlvin Zhang /* configure MAC-loopback mode */
10708938c4e1SAlvin Zhang if (dev->data->dev_conf.lpbk_mode == 1) {
10718938c4e1SAlvin Zhang uint32_t reg_val;
10728938c4e1SAlvin Zhang
10738938c4e1SAlvin Zhang reg_val = IGC_READ_REG(hw, IGC_CTRL);
10748938c4e1SAlvin Zhang reg_val &= ~IGC_CTRL_SPEED_MASK;
10758938c4e1SAlvin Zhang reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD |
10768938c4e1SAlvin Zhang IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500;
10778938c4e1SAlvin Zhang IGC_WRITE_REG(hw, IGC_CTRL, reg_val);
10788938c4e1SAlvin Zhang
10798938c4e1SAlvin Zhang igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
10808938c4e1SAlvin Zhang }
10818938c4e1SAlvin Zhang
108266fde1b9SAlvin Zhang return 0;
10834f09bc55SAlvin Zhang
10844f09bc55SAlvin Zhang error_invalid_config:
10854f09bc55SAlvin Zhang PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
10864f09bc55SAlvin Zhang dev->data->dev_conf.link_speeds, dev->data->port_id);
1087a5aeb2b9SAlvin Zhang igc_dev_clear_queues(dev);
10884f09bc55SAlvin Zhang return -EINVAL;
108966fde1b9SAlvin Zhang }
109066fde1b9SAlvin Zhang
10918cb7c57dSAlvin Zhang static int
igc_reset_swfw_lock(struct igc_hw * hw)10928cb7c57dSAlvin Zhang igc_reset_swfw_lock(struct igc_hw *hw)
10938cb7c57dSAlvin Zhang {
10948cb7c57dSAlvin Zhang int ret_val;
10958cb7c57dSAlvin Zhang
10968cb7c57dSAlvin Zhang /*
10978cb7c57dSAlvin Zhang * Do mac ops initialization manually here, since we will need
10988cb7c57dSAlvin Zhang * some function pointers set by this call.
10998cb7c57dSAlvin Zhang */
11008cb7c57dSAlvin Zhang ret_val = igc_init_mac_params(hw);
11018cb7c57dSAlvin Zhang if (ret_val)
11028cb7c57dSAlvin Zhang return ret_val;
11038cb7c57dSAlvin Zhang
11048cb7c57dSAlvin Zhang /*
11058cb7c57dSAlvin Zhang * SMBI lock should not fail in this early stage. If this is the case,
11068cb7c57dSAlvin Zhang * it is due to an improper exit of the application.
11078cb7c57dSAlvin Zhang * So force the release of the faulty lock.
11088cb7c57dSAlvin Zhang */
11098cb7c57dSAlvin Zhang if (igc_get_hw_semaphore_generic(hw) < 0)
11108cb7c57dSAlvin Zhang PMD_DRV_LOG(DEBUG, "SMBI lock released");
11118cb7c57dSAlvin Zhang
11128cb7c57dSAlvin Zhang igc_put_hw_semaphore_generic(hw);
11138cb7c57dSAlvin Zhang
11148cb7c57dSAlvin Zhang if (hw->mac.ops.acquire_swfw_sync != NULL) {
11158cb7c57dSAlvin Zhang uint16_t mask;
11168cb7c57dSAlvin Zhang
11178cb7c57dSAlvin Zhang /*
11188cb7c57dSAlvin Zhang * Phy lock should not fail in this early stage.
11198cb7c57dSAlvin Zhang * If this is the case, it is due to an improper exit of the
11208cb7c57dSAlvin Zhang * application. So force the release of the faulty lock.
11218cb7c57dSAlvin Zhang */
11228cb7c57dSAlvin Zhang mask = IGC_SWFW_PHY0_SM;
11238cb7c57dSAlvin Zhang if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
11248cb7c57dSAlvin Zhang PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
11258cb7c57dSAlvin Zhang hw->bus.func);
11268cb7c57dSAlvin Zhang }
11278cb7c57dSAlvin Zhang hw->mac.ops.release_swfw_sync(hw, mask);
11288cb7c57dSAlvin Zhang
11298cb7c57dSAlvin Zhang /*
11308cb7c57dSAlvin Zhang * This one is more tricky since it is common to all ports; but
11318cb7c57dSAlvin Zhang * swfw_sync retries last long enough (1s) to be almost sure
11328cb7c57dSAlvin Zhang * that if lock can not be taken it is due to an improper lock
11338cb7c57dSAlvin Zhang * of the semaphore.
11348cb7c57dSAlvin Zhang */
11358cb7c57dSAlvin Zhang mask = IGC_SWFW_EEP_SM;
11368cb7c57dSAlvin Zhang if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
11378cb7c57dSAlvin Zhang PMD_DRV_LOG(DEBUG, "SWFW common locks released");
11388cb7c57dSAlvin Zhang
11398cb7c57dSAlvin Zhang hw->mac.ops.release_swfw_sync(hw, mask);
11408cb7c57dSAlvin Zhang }
11418cb7c57dSAlvin Zhang
11428cb7c57dSAlvin Zhang return IGC_SUCCESS;
11438cb7c57dSAlvin Zhang }
11448cb7c57dSAlvin Zhang
1145a5aeb2b9SAlvin Zhang /*
1146a5aeb2b9SAlvin Zhang * free all rx/tx queues.
1147a5aeb2b9SAlvin Zhang */
1148a5aeb2b9SAlvin Zhang static void
igc_dev_free_queues(struct rte_eth_dev * dev)1149a5aeb2b9SAlvin Zhang igc_dev_free_queues(struct rte_eth_dev *dev)
1150a5aeb2b9SAlvin Zhang {
1151a5aeb2b9SAlvin Zhang uint16_t i;
1152a5aeb2b9SAlvin Zhang
1153a5aeb2b9SAlvin Zhang for (i = 0; i < dev->data->nb_rx_queues; i++) {
11547483341aSXueming Li eth_igc_rx_queue_release(dev, i);
1155a5aeb2b9SAlvin Zhang dev->data->rx_queues[i] = NULL;
1156a5aeb2b9SAlvin Zhang }
1157a5aeb2b9SAlvin Zhang dev->data->nb_rx_queues = 0;
1158a5aeb2b9SAlvin Zhang
1159a5aeb2b9SAlvin Zhang for (i = 0; i < dev->data->nb_tx_queues; i++) {
11607483341aSXueming Li eth_igc_tx_queue_release(dev, i);
1161a5aeb2b9SAlvin Zhang dev->data->tx_queues[i] = NULL;
1162a5aeb2b9SAlvin Zhang }
1163a5aeb2b9SAlvin Zhang dev->data->nb_tx_queues = 0;
1164a5aeb2b9SAlvin Zhang }
1165a5aeb2b9SAlvin Zhang
1166b142387bSThomas Monjalon static int
eth_igc_close(struct rte_eth_dev * dev)116766fde1b9SAlvin Zhang eth_igc_close(struct rte_eth_dev *dev)
116866fde1b9SAlvin Zhang {
11694f09bc55SAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1170d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
11718cb7c57dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
11724f09bc55SAlvin Zhang struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
11734f09bc55SAlvin Zhang int retry = 0;
117462024eb8SIvan Ilchenko int ret = 0;
11758cb7c57dSAlvin Zhang
117666fde1b9SAlvin Zhang PMD_INIT_FUNC_TRACE();
117730410493SThomas Monjalon if (rte_eal_process_type() != RTE_PROC_PRIMARY)
117830410493SThomas Monjalon return 0;
11798cb7c57dSAlvin Zhang
11804f09bc55SAlvin Zhang if (!adapter->stopped)
118162024eb8SIvan Ilchenko ret = eth_igc_stop(dev);
11824f09bc55SAlvin Zhang
1183746664d5SAlvin Zhang igc_flow_flush(dev, NULL);
1184746664d5SAlvin Zhang igc_clear_all_filter(dev);
1185746664d5SAlvin Zhang
11864f09bc55SAlvin Zhang igc_intr_other_disable(dev);
11874f09bc55SAlvin Zhang do {
11884f09bc55SAlvin Zhang int ret = rte_intr_callback_unregister(intr_handle,
11894f09bc55SAlvin Zhang eth_igc_interrupt_handler, dev);
11904f09bc55SAlvin Zhang if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
11914f09bc55SAlvin Zhang break;
11924f09bc55SAlvin Zhang
11934f09bc55SAlvin Zhang PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
11944f09bc55SAlvin Zhang DELAY(200 * 1000); /* delay 200ms */
11954f09bc55SAlvin Zhang } while (retry++ < 5);
11964f09bc55SAlvin Zhang
11978cb7c57dSAlvin Zhang igc_phy_hw_reset(hw);
11988cb7c57dSAlvin Zhang igc_hw_control_release(hw);
1199a5aeb2b9SAlvin Zhang igc_dev_free_queues(dev);
12008cb7c57dSAlvin Zhang
12018cb7c57dSAlvin Zhang /* Reset any pending lock */
12028cb7c57dSAlvin Zhang igc_reset_swfw_lock(hw);
1203b142387bSThomas Monjalon
120462024eb8SIvan Ilchenko return ret;
12058cb7c57dSAlvin Zhang }
12068cb7c57dSAlvin Zhang
12078cb7c57dSAlvin Zhang static void
igc_identify_hardware(struct rte_eth_dev * dev,struct rte_pci_device * pci_dev)12088cb7c57dSAlvin Zhang igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
12098cb7c57dSAlvin Zhang {
12108cb7c57dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
12118cb7c57dSAlvin Zhang
12128cb7c57dSAlvin Zhang hw->vendor_id = pci_dev->id.vendor_id;
12138cb7c57dSAlvin Zhang hw->device_id = pci_dev->id.device_id;
12148cb7c57dSAlvin Zhang hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
12158cb7c57dSAlvin Zhang hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
121666fde1b9SAlvin Zhang }
121766fde1b9SAlvin Zhang
121866fde1b9SAlvin Zhang static int
eth_igc_dev_init(struct rte_eth_dev * dev)121966fde1b9SAlvin Zhang eth_igc_dev_init(struct rte_eth_dev *dev)
122066fde1b9SAlvin Zhang {
122166fde1b9SAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
12224f09bc55SAlvin Zhang struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
12238cb7c57dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1224e6defdfdSAlvin Zhang int i, error = 0;
122566fde1b9SAlvin Zhang
122666fde1b9SAlvin Zhang PMD_INIT_FUNC_TRACE();
122766fde1b9SAlvin Zhang dev->dev_ops = ð_igc_ops;
1228cbfc6111SFerruh Yigit dev->rx_queue_count = eth_igc_rx_queue_count;
1229cbfc6111SFerruh Yigit dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
1230cbfc6111SFerruh Yigit dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
123166fde1b9SAlvin Zhang
123266fde1b9SAlvin Zhang /*
123366fde1b9SAlvin Zhang * for secondary processes, we don't initialize any further as primary
123466fde1b9SAlvin Zhang * has already done this work. Only check we don't need a different
123566fde1b9SAlvin Zhang * RX function.
123666fde1b9SAlvin Zhang */
123766fde1b9SAlvin Zhang if (rte_eal_process_type() != RTE_PROC_PRIMARY)
123866fde1b9SAlvin Zhang return 0;
123966fde1b9SAlvin Zhang
12408cb7c57dSAlvin Zhang rte_eth_copy_pci_info(dev, pci_dev);
1241f30e69b4SFerruh Yigit dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
12428cb7c57dSAlvin Zhang
12438cb7c57dSAlvin Zhang hw->back = pci_dev;
12448cb7c57dSAlvin Zhang hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
12458cb7c57dSAlvin Zhang
12468cb7c57dSAlvin Zhang igc_identify_hardware(dev, pci_dev);
12478cb7c57dSAlvin Zhang if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
12488cb7c57dSAlvin Zhang error = -EIO;
12498cb7c57dSAlvin Zhang goto err_late;
12508cb7c57dSAlvin Zhang }
12518cb7c57dSAlvin Zhang
12528cb7c57dSAlvin Zhang igc_get_bus_info(hw);
12538cb7c57dSAlvin Zhang
12548cb7c57dSAlvin Zhang /* Reset any pending lock */
12558cb7c57dSAlvin Zhang if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
12568cb7c57dSAlvin Zhang error = -EIO;
12578cb7c57dSAlvin Zhang goto err_late;
12588cb7c57dSAlvin Zhang }
12598cb7c57dSAlvin Zhang
12608cb7c57dSAlvin Zhang /* Finish initialization */
12618cb7c57dSAlvin Zhang if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
12628cb7c57dSAlvin Zhang error = -EIO;
12638cb7c57dSAlvin Zhang goto err_late;
12648cb7c57dSAlvin Zhang }
12658cb7c57dSAlvin Zhang
12668cb7c57dSAlvin Zhang hw->mac.autoneg = 1;
12678cb7c57dSAlvin Zhang hw->phy.autoneg_wait_to_complete = 0;
12688cb7c57dSAlvin Zhang hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
12698cb7c57dSAlvin Zhang
12708cb7c57dSAlvin Zhang /* Copper options */
12718cb7c57dSAlvin Zhang if (hw->phy.media_type == igc_media_type_copper) {
12728cb7c57dSAlvin Zhang hw->phy.mdix = 0; /* AUTO_ALL_MODES */
12738cb7c57dSAlvin Zhang hw->phy.disable_polarity_correction = 0;
12748cb7c57dSAlvin Zhang hw->phy.ms_type = igc_ms_hw_default;
12758cb7c57dSAlvin Zhang }
12768cb7c57dSAlvin Zhang
12778cb7c57dSAlvin Zhang /*
12788cb7c57dSAlvin Zhang * Start from a known state, this is important in reading the nvm
12798cb7c57dSAlvin Zhang * and mac from that.
12808cb7c57dSAlvin Zhang */
12818cb7c57dSAlvin Zhang igc_reset_hw(hw);
12828cb7c57dSAlvin Zhang
12838cb7c57dSAlvin Zhang /* Make sure we have a good EEPROM before we read from it */
12848cb7c57dSAlvin Zhang if (igc_validate_nvm_checksum(hw) < 0) {
12858cb7c57dSAlvin Zhang /*
12868cb7c57dSAlvin Zhang * Some PCI-E parts fail the first check due to
12878cb7c57dSAlvin Zhang * the link being in sleep state, call it again,
12888cb7c57dSAlvin Zhang * if it fails a second time its a real issue.
12898cb7c57dSAlvin Zhang */
12908cb7c57dSAlvin Zhang if (igc_validate_nvm_checksum(hw) < 0) {
12918cb7c57dSAlvin Zhang PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
12928cb7c57dSAlvin Zhang error = -EIO;
12938cb7c57dSAlvin Zhang goto err_late;
12948cb7c57dSAlvin Zhang }
12958cb7c57dSAlvin Zhang }
12968cb7c57dSAlvin Zhang
12978cb7c57dSAlvin Zhang /* Read the permanent MAC address out of the EEPROM */
12988cb7c57dSAlvin Zhang if (igc_read_mac_addr(hw) != 0) {
12998cb7c57dSAlvin Zhang PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
13008cb7c57dSAlvin Zhang error = -EIO;
13018cb7c57dSAlvin Zhang goto err_late;
13028cb7c57dSAlvin Zhang }
13038cb7c57dSAlvin Zhang
13048cb7c57dSAlvin Zhang /* Allocate memory for storing MAC addresses */
130566fde1b9SAlvin Zhang dev->data->mac_addrs = rte_zmalloc("igc",
13068cb7c57dSAlvin Zhang RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
130766fde1b9SAlvin Zhang if (dev->data->mac_addrs == NULL) {
130866fde1b9SAlvin Zhang PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
13098cb7c57dSAlvin Zhang RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
13108cb7c57dSAlvin Zhang error = -ENOMEM;
13118cb7c57dSAlvin Zhang goto err_late;
13128cb7c57dSAlvin Zhang }
13138cb7c57dSAlvin Zhang
13148cb7c57dSAlvin Zhang /* Copy the permanent MAC address */
13158cb7c57dSAlvin Zhang rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
13168cb7c57dSAlvin Zhang &dev->data->mac_addrs[0]);
13178cb7c57dSAlvin Zhang
13188cb7c57dSAlvin Zhang /* Now initialize the hardware */
13198cb7c57dSAlvin Zhang if (igc_hardware_init(hw) != 0) {
13208cb7c57dSAlvin Zhang PMD_INIT_LOG(ERR, "Hardware initialization failed");
13218cb7c57dSAlvin Zhang rte_free(dev->data->mac_addrs);
13228cb7c57dSAlvin Zhang dev->data->mac_addrs = NULL;
13238cb7c57dSAlvin Zhang error = -ENODEV;
13248cb7c57dSAlvin Zhang goto err_late;
132566fde1b9SAlvin Zhang }
132666fde1b9SAlvin Zhang
13278cb7c57dSAlvin Zhang hw->mac.get_link_status = 1;
13284f09bc55SAlvin Zhang igc->stopped = 0;
13298cb7c57dSAlvin Zhang
13308cb7c57dSAlvin Zhang /* Indicate SOL/IDER usage */
13318cb7c57dSAlvin Zhang if (igc_check_reset_block(hw) < 0)
13328cb7c57dSAlvin Zhang PMD_INIT_LOG(ERR,
13338cb7c57dSAlvin Zhang "PHY reset is blocked due to SOL/IDER session.");
13348cb7c57dSAlvin Zhang
133566fde1b9SAlvin Zhang PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
133666fde1b9SAlvin Zhang dev->data->port_id, pci_dev->id.vendor_id,
133766fde1b9SAlvin Zhang pci_dev->id.device_id);
133866fde1b9SAlvin Zhang
1339d61138d4SHarman Kalra rte_intr_callback_register(pci_dev->intr_handle,
13404f09bc55SAlvin Zhang eth_igc_interrupt_handler, (void *)dev);
13414f09bc55SAlvin Zhang
13424f09bc55SAlvin Zhang /* enable uio/vfio intr/eventfd mapping */
1343d61138d4SHarman Kalra rte_intr_enable(pci_dev->intr_handle);
13444f09bc55SAlvin Zhang
13454f09bc55SAlvin Zhang /* enable support intr */
13464f09bc55SAlvin Zhang igc_intr_other_enable(dev);
13474f09bc55SAlvin Zhang
1348e6defdfdSAlvin Zhang /* initiate queue status */
1349e6defdfdSAlvin Zhang for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1350e6defdfdSAlvin Zhang igc->txq_stats_map[i] = -1;
1351e6defdfdSAlvin Zhang igc->rxq_stats_map[i] = -1;
1352e6defdfdSAlvin Zhang }
13539417098fSAlvin Zhang
1354746664d5SAlvin Zhang igc_flow_init(dev);
1355746664d5SAlvin Zhang igc_clear_all_filter(dev);
135666fde1b9SAlvin Zhang return 0;
13578cb7c57dSAlvin Zhang
13588cb7c57dSAlvin Zhang err_late:
13598cb7c57dSAlvin Zhang igc_hw_control_release(hw);
13608cb7c57dSAlvin Zhang return error;
136166fde1b9SAlvin Zhang }
136266fde1b9SAlvin Zhang
136366fde1b9SAlvin Zhang static int
eth_igc_dev_uninit(__rte_unused struct rte_eth_dev * eth_dev)136466fde1b9SAlvin Zhang eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
136566fde1b9SAlvin Zhang {
136666fde1b9SAlvin Zhang PMD_INIT_FUNC_TRACE();
136766fde1b9SAlvin Zhang eth_igc_close(eth_dev);
136866fde1b9SAlvin Zhang return 0;
136966fde1b9SAlvin Zhang }
137066fde1b9SAlvin Zhang
137166fde1b9SAlvin Zhang static int
eth_igc_reset(struct rte_eth_dev * dev)137266fde1b9SAlvin Zhang eth_igc_reset(struct rte_eth_dev *dev)
137366fde1b9SAlvin Zhang {
137466fde1b9SAlvin Zhang int ret;
137566fde1b9SAlvin Zhang
137666fde1b9SAlvin Zhang PMD_INIT_FUNC_TRACE();
137766fde1b9SAlvin Zhang
137866fde1b9SAlvin Zhang ret = eth_igc_dev_uninit(dev);
137966fde1b9SAlvin Zhang if (ret)
138066fde1b9SAlvin Zhang return ret;
138166fde1b9SAlvin Zhang
138266fde1b9SAlvin Zhang return eth_igc_dev_init(dev);
138366fde1b9SAlvin Zhang }
138466fde1b9SAlvin Zhang
138566fde1b9SAlvin Zhang static int
eth_igc_promiscuous_enable(struct rte_eth_dev * dev)138666fde1b9SAlvin Zhang eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
138766fde1b9SAlvin Zhang {
1388a5aeb2b9SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1389a5aeb2b9SAlvin Zhang uint32_t rctl;
1390a5aeb2b9SAlvin Zhang
1391a5aeb2b9SAlvin Zhang rctl = IGC_READ_REG(hw, IGC_RCTL);
1392a5aeb2b9SAlvin Zhang rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1393a5aeb2b9SAlvin Zhang IGC_WRITE_REG(hw, IGC_RCTL, rctl);
139466fde1b9SAlvin Zhang return 0;
139566fde1b9SAlvin Zhang }
139666fde1b9SAlvin Zhang
139766fde1b9SAlvin Zhang static int
eth_igc_promiscuous_disable(struct rte_eth_dev * dev)139866fde1b9SAlvin Zhang eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
139966fde1b9SAlvin Zhang {
1400a5aeb2b9SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1401a5aeb2b9SAlvin Zhang uint32_t rctl;
1402a5aeb2b9SAlvin Zhang
1403a5aeb2b9SAlvin Zhang rctl = IGC_READ_REG(hw, IGC_RCTL);
1404a5aeb2b9SAlvin Zhang rctl &= (~IGC_RCTL_UPE);
1405a5aeb2b9SAlvin Zhang if (dev->data->all_multicast == 1)
1406a5aeb2b9SAlvin Zhang rctl |= IGC_RCTL_MPE;
1407a5aeb2b9SAlvin Zhang else
1408a5aeb2b9SAlvin Zhang rctl &= (~IGC_RCTL_MPE);
1409a5aeb2b9SAlvin Zhang IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1410a5aeb2b9SAlvin Zhang return 0;
1411a5aeb2b9SAlvin Zhang }
1412a5aeb2b9SAlvin Zhang
1413a5aeb2b9SAlvin Zhang static int
eth_igc_allmulticast_enable(struct rte_eth_dev * dev)1414a5aeb2b9SAlvin Zhang eth_igc_allmulticast_enable(struct rte_eth_dev *dev)
1415a5aeb2b9SAlvin Zhang {
1416a5aeb2b9SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1417a5aeb2b9SAlvin Zhang uint32_t rctl;
1418a5aeb2b9SAlvin Zhang
1419a5aeb2b9SAlvin Zhang rctl = IGC_READ_REG(hw, IGC_RCTL);
1420a5aeb2b9SAlvin Zhang rctl |= IGC_RCTL_MPE;
1421a5aeb2b9SAlvin Zhang IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1422a5aeb2b9SAlvin Zhang return 0;
1423a5aeb2b9SAlvin Zhang }
1424a5aeb2b9SAlvin Zhang
1425a5aeb2b9SAlvin Zhang static int
eth_igc_allmulticast_disable(struct rte_eth_dev * dev)1426a5aeb2b9SAlvin Zhang eth_igc_allmulticast_disable(struct rte_eth_dev *dev)
1427a5aeb2b9SAlvin Zhang {
1428a5aeb2b9SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1429a5aeb2b9SAlvin Zhang uint32_t rctl;
1430a5aeb2b9SAlvin Zhang
1431a5aeb2b9SAlvin Zhang if (dev->data->promiscuous == 1)
1432a5aeb2b9SAlvin Zhang return 0; /* must remain in all_multicast mode */
1433a5aeb2b9SAlvin Zhang
1434a5aeb2b9SAlvin Zhang rctl = IGC_READ_REG(hw, IGC_RCTL);
1435a5aeb2b9SAlvin Zhang rctl &= (~IGC_RCTL_MPE);
1436a5aeb2b9SAlvin Zhang IGC_WRITE_REG(hw, IGC_RCTL, rctl);
143766fde1b9SAlvin Zhang return 0;
143866fde1b9SAlvin Zhang }
143966fde1b9SAlvin Zhang
144066fde1b9SAlvin Zhang static int
eth_igc_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)14414f09bc55SAlvin Zhang eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
14424f09bc55SAlvin Zhang size_t fw_size)
14434f09bc55SAlvin Zhang {
14444f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
14454f09bc55SAlvin Zhang struct igc_fw_version fw;
14464f09bc55SAlvin Zhang int ret;
14474f09bc55SAlvin Zhang
14484f09bc55SAlvin Zhang igc_get_fw_version(hw, &fw);
14494f09bc55SAlvin Zhang
14504f09bc55SAlvin Zhang /* if option rom is valid, display its version too */
14514f09bc55SAlvin Zhang if (fw.or_valid) {
14524f09bc55SAlvin Zhang ret = snprintf(fw_version, fw_size,
14534f09bc55SAlvin Zhang "%d.%d, 0x%08x, %d.%d.%d",
14544f09bc55SAlvin Zhang fw.eep_major, fw.eep_minor, fw.etrack_id,
14554f09bc55SAlvin Zhang fw.or_major, fw.or_build, fw.or_patch);
14564f09bc55SAlvin Zhang /* no option rom */
14574f09bc55SAlvin Zhang } else {
14584f09bc55SAlvin Zhang if (fw.etrack_id != 0X0000) {
14594f09bc55SAlvin Zhang ret = snprintf(fw_version, fw_size,
14604f09bc55SAlvin Zhang "%d.%d, 0x%08x",
14614f09bc55SAlvin Zhang fw.eep_major, fw.eep_minor,
14624f09bc55SAlvin Zhang fw.etrack_id);
14634f09bc55SAlvin Zhang } else {
14644f09bc55SAlvin Zhang ret = snprintf(fw_version, fw_size,
14654f09bc55SAlvin Zhang "%d.%d.%d",
14664f09bc55SAlvin Zhang fw.eep_major, fw.eep_minor,
14674f09bc55SAlvin Zhang fw.eep_build);
14684f09bc55SAlvin Zhang }
14694f09bc55SAlvin Zhang }
1470d345d6c9SFerruh Yigit if (ret < 0)
1471d345d6c9SFerruh Yigit return -EINVAL;
14724f09bc55SAlvin Zhang
14734f09bc55SAlvin Zhang ret += 1; /* add the size of '\0' */
1474d345d6c9SFerruh Yigit if (fw_size < (size_t)ret)
14754f09bc55SAlvin Zhang return ret;
14764f09bc55SAlvin Zhang else
14774f09bc55SAlvin Zhang return 0;
14784f09bc55SAlvin Zhang }
14794f09bc55SAlvin Zhang
14804f09bc55SAlvin Zhang static int
eth_igc_infos_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)148166fde1b9SAlvin Zhang eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
148266fde1b9SAlvin Zhang {
14834f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
14844f09bc55SAlvin Zhang
14854f09bc55SAlvin Zhang dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
14864f09bc55SAlvin Zhang dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
14874f09bc55SAlvin Zhang dev_info->max_mac_addrs = hw->mac.rar_entry_count;
14882fe6f1b7SDmitry Kozlyuk dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
1489a5aeb2b9SAlvin Zhang dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
1490a5aeb2b9SAlvin Zhang dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
1491295968d1SFerruh Yigit dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1492a5aeb2b9SAlvin Zhang
149366fde1b9SAlvin Zhang dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
149466fde1b9SAlvin Zhang dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
14954f09bc55SAlvin Zhang dev_info->max_vmdq_pools = 0;
14964f09bc55SAlvin Zhang
1497a5aeb2b9SAlvin Zhang dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
1498295968d1SFerruh Yigit dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1499a5aeb2b9SAlvin Zhang dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
1500a5aeb2b9SAlvin Zhang
1501a5aeb2b9SAlvin Zhang dev_info->default_rxconf = (struct rte_eth_rxconf) {
1502a5aeb2b9SAlvin Zhang .rx_thresh = {
1503a5aeb2b9SAlvin Zhang .pthresh = IGC_DEFAULT_RX_PTHRESH,
1504a5aeb2b9SAlvin Zhang .hthresh = IGC_DEFAULT_RX_HTHRESH,
1505a5aeb2b9SAlvin Zhang .wthresh = IGC_DEFAULT_RX_WTHRESH,
1506a5aeb2b9SAlvin Zhang },
1507a5aeb2b9SAlvin Zhang .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,
1508a5aeb2b9SAlvin Zhang .rx_drop_en = 0,
1509a5aeb2b9SAlvin Zhang .offloads = 0,
1510a5aeb2b9SAlvin Zhang };
1511a5aeb2b9SAlvin Zhang
1512a5aeb2b9SAlvin Zhang dev_info->default_txconf = (struct rte_eth_txconf) {
1513a5aeb2b9SAlvin Zhang .tx_thresh = {
1514a5aeb2b9SAlvin Zhang .pthresh = IGC_DEFAULT_TX_PTHRESH,
1515a5aeb2b9SAlvin Zhang .hthresh = IGC_DEFAULT_TX_HTHRESH,
1516a5aeb2b9SAlvin Zhang .wthresh = IGC_DEFAULT_TX_WTHRESH,
1517a5aeb2b9SAlvin Zhang },
1518a5aeb2b9SAlvin Zhang .offloads = 0,
1519a5aeb2b9SAlvin Zhang };
1520a5aeb2b9SAlvin Zhang
1521a5aeb2b9SAlvin Zhang dev_info->rx_desc_lim = rx_desc_lim;
1522a5aeb2b9SAlvin Zhang dev_info->tx_desc_lim = tx_desc_lim;
1523a5aeb2b9SAlvin Zhang
1524295968d1SFerruh Yigit dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
1525295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
1526295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
15274f09bc55SAlvin Zhang
15284f09bc55SAlvin Zhang dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
15294f09bc55SAlvin Zhang dev_info->min_mtu = RTE_ETHER_MIN_MTU;
153066fde1b9SAlvin Zhang return 0;
153166fde1b9SAlvin Zhang }
153266fde1b9SAlvin Zhang
153366fde1b9SAlvin Zhang static int
eth_igc_led_on(struct rte_eth_dev * dev)15344f09bc55SAlvin Zhang eth_igc_led_on(struct rte_eth_dev *dev)
15354f09bc55SAlvin Zhang {
15364f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
15374f09bc55SAlvin Zhang
15384f09bc55SAlvin Zhang return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
15394f09bc55SAlvin Zhang }
15404f09bc55SAlvin Zhang
15414f09bc55SAlvin Zhang static int
eth_igc_led_off(struct rte_eth_dev * dev)15424f09bc55SAlvin Zhang eth_igc_led_off(struct rte_eth_dev *dev)
15434f09bc55SAlvin Zhang {
15444f09bc55SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
15454f09bc55SAlvin Zhang
15464f09bc55SAlvin Zhang return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
15474f09bc55SAlvin Zhang }
15484f09bc55SAlvin Zhang
1549a5aeb2b9SAlvin Zhang static const uint32_t *
eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev * dev)1550a5aeb2b9SAlvin Zhang eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
155166fde1b9SAlvin Zhang {
1552a5aeb2b9SAlvin Zhang static const uint32_t ptypes[] = {
1553a5aeb2b9SAlvin Zhang /* refers to rx_desc_pkt_info_to_pkt_type() */
1554a5aeb2b9SAlvin Zhang RTE_PTYPE_L2_ETHER,
1555a5aeb2b9SAlvin Zhang RTE_PTYPE_L3_IPV4,
1556a5aeb2b9SAlvin Zhang RTE_PTYPE_L3_IPV4_EXT,
1557a5aeb2b9SAlvin Zhang RTE_PTYPE_L3_IPV6,
1558a5aeb2b9SAlvin Zhang RTE_PTYPE_L3_IPV6_EXT,
1559a5aeb2b9SAlvin Zhang RTE_PTYPE_L4_TCP,
1560a5aeb2b9SAlvin Zhang RTE_PTYPE_L4_UDP,
1561a5aeb2b9SAlvin Zhang RTE_PTYPE_L4_SCTP,
1562a5aeb2b9SAlvin Zhang RTE_PTYPE_TUNNEL_IP,
1563a5aeb2b9SAlvin Zhang RTE_PTYPE_INNER_L3_IPV6,
1564a5aeb2b9SAlvin Zhang RTE_PTYPE_INNER_L3_IPV6_EXT,
1565a5aeb2b9SAlvin Zhang RTE_PTYPE_INNER_L4_TCP,
1566a5aeb2b9SAlvin Zhang RTE_PTYPE_INNER_L4_UDP,
1567a5aeb2b9SAlvin Zhang RTE_PTYPE_UNKNOWN
1568a5aeb2b9SAlvin Zhang };
1569a5aeb2b9SAlvin Zhang
1570a5aeb2b9SAlvin Zhang return ptypes;
1571a5aeb2b9SAlvin Zhang }
1572a5aeb2b9SAlvin Zhang
1573a5aeb2b9SAlvin Zhang static int
eth_igc_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)1574a5aeb2b9SAlvin Zhang eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1575a5aeb2b9SAlvin Zhang {
1576a5aeb2b9SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1577a5aeb2b9SAlvin Zhang uint32_t frame_size = mtu + IGC_ETH_OVERHEAD;
1578a5aeb2b9SAlvin Zhang uint32_t rctl;
1579a5aeb2b9SAlvin Zhang
1580a5aeb2b9SAlvin Zhang /* if extend vlan has been enabled */
1581a5aeb2b9SAlvin Zhang if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
1582a5aeb2b9SAlvin Zhang frame_size += VLAN_TAG_SIZE;
1583a5aeb2b9SAlvin Zhang
1584a5aeb2b9SAlvin Zhang /*
15855db232faSDapeng Yu * If device is started, refuse mtu that requires the support of
15865db232faSDapeng Yu * scattered packets when this feature has not been enabled before.
1587a5aeb2b9SAlvin Zhang */
15885db232faSDapeng Yu if (dev->data->dev_started && !dev->data->scattered_rx &&
15895db232faSDapeng Yu frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
15905db232faSDapeng Yu PMD_INIT_LOG(ERR, "Stop port first.");
1591a5aeb2b9SAlvin Zhang return -EINVAL;
15925db232faSDapeng Yu }
1593a5aeb2b9SAlvin Zhang
1594a5aeb2b9SAlvin Zhang rctl = IGC_READ_REG(hw, IGC_RCTL);
1595dd4e429cSFerruh Yigit if (mtu > RTE_ETHER_MTU)
1596a5aeb2b9SAlvin Zhang rctl |= IGC_RCTL_LPE;
1597dd4e429cSFerruh Yigit else
1598a5aeb2b9SAlvin Zhang rctl &= ~IGC_RCTL_LPE;
1599a5aeb2b9SAlvin Zhang IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1600a5aeb2b9SAlvin Zhang
16011bb4a528SFerruh Yigit IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
1602a5aeb2b9SAlvin Zhang
160366fde1b9SAlvin Zhang return 0;
160466fde1b9SAlvin Zhang }
160566fde1b9SAlvin Zhang
160666fde1b9SAlvin Zhang static int
eth_igc_rar_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool)1607a5aeb2b9SAlvin Zhang eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1608a5aeb2b9SAlvin Zhang uint32_t index, uint32_t pool)
160966fde1b9SAlvin Zhang {
1610a5aeb2b9SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1611a5aeb2b9SAlvin Zhang
1612a5aeb2b9SAlvin Zhang igc_rar_set(hw, mac_addr->addr_bytes, index);
1613a5aeb2b9SAlvin Zhang RTE_SET_USED(pool);
161466fde1b9SAlvin Zhang return 0;
161566fde1b9SAlvin Zhang }
161666fde1b9SAlvin Zhang
1617a5aeb2b9SAlvin Zhang static void
eth_igc_rar_clear(struct rte_eth_dev * dev,uint32_t index)1618a5aeb2b9SAlvin Zhang eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)
16194f09bc55SAlvin Zhang {
1620a5aeb2b9SAlvin Zhang uint8_t addr[RTE_ETHER_ADDR_LEN];
1621a5aeb2b9SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1622a5aeb2b9SAlvin Zhang
1623a5aeb2b9SAlvin Zhang memset(addr, 0, sizeof(addr));
1624a5aeb2b9SAlvin Zhang igc_rar_set(hw, addr, index);
16254f09bc55SAlvin Zhang }
16264f09bc55SAlvin Zhang
1627a5aeb2b9SAlvin Zhang static int
eth_igc_default_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * addr)1628a5aeb2b9SAlvin Zhang eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
1629a5aeb2b9SAlvin Zhang struct rte_ether_addr *addr)
16304f09bc55SAlvin Zhang {
1631a5aeb2b9SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1632a5aeb2b9SAlvin Zhang igc_rar_set(hw, addr->addr_bytes, 0);
1633a5aeb2b9SAlvin Zhang return 0;
1634a5aeb2b9SAlvin Zhang }
1635a5aeb2b9SAlvin Zhang
1636a5aeb2b9SAlvin Zhang static int
eth_igc_set_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)1637a5aeb2b9SAlvin Zhang eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
1638a5aeb2b9SAlvin Zhang struct rte_ether_addr *mc_addr_set,
1639a5aeb2b9SAlvin Zhang uint32_t nb_mc_addr)
1640a5aeb2b9SAlvin Zhang {
1641a5aeb2b9SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1642a5aeb2b9SAlvin Zhang igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
1643a5aeb2b9SAlvin Zhang return 0;
16444f09bc55SAlvin Zhang }
16454f09bc55SAlvin Zhang
1646e6defdfdSAlvin Zhang /*
1647e6defdfdSAlvin Zhang * Read hardware registers
1648e6defdfdSAlvin Zhang */
1649e6defdfdSAlvin Zhang static void
igc_read_stats_registers(struct igc_hw * hw,struct igc_hw_stats * stats)1650e6defdfdSAlvin Zhang igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)
1651e6defdfdSAlvin Zhang {
1652e6defdfdSAlvin Zhang int pause_frames;
1653e6defdfdSAlvin Zhang
1654e6defdfdSAlvin Zhang uint64_t old_gprc = stats->gprc;
1655e6defdfdSAlvin Zhang uint64_t old_gptc = stats->gptc;
1656e6defdfdSAlvin Zhang uint64_t old_tpr = stats->tpr;
1657e6defdfdSAlvin Zhang uint64_t old_tpt = stats->tpt;
1658e6defdfdSAlvin Zhang uint64_t old_rpthc = stats->rpthc;
1659e6defdfdSAlvin Zhang uint64_t old_hgptc = stats->hgptc;
1660e6defdfdSAlvin Zhang
1661e6defdfdSAlvin Zhang stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);
1662e6defdfdSAlvin Zhang stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);
1663e6defdfdSAlvin Zhang stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);
1664e6defdfdSAlvin Zhang stats->mpc += IGC_READ_REG(hw, IGC_MPC);
1665e6defdfdSAlvin Zhang stats->scc += IGC_READ_REG(hw, IGC_SCC);
1666e6defdfdSAlvin Zhang stats->ecol += IGC_READ_REG(hw, IGC_ECOL);
1667e6defdfdSAlvin Zhang
1668e6defdfdSAlvin Zhang stats->mcc += IGC_READ_REG(hw, IGC_MCC);
1669e6defdfdSAlvin Zhang stats->latecol += IGC_READ_REG(hw, IGC_LATECOL);
1670e6defdfdSAlvin Zhang stats->colc += IGC_READ_REG(hw, IGC_COLC);
1671e6defdfdSAlvin Zhang
1672e6defdfdSAlvin Zhang stats->dc += IGC_READ_REG(hw, IGC_DC);
1673e6defdfdSAlvin Zhang stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);
1674e6defdfdSAlvin Zhang stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);
1675e6defdfdSAlvin Zhang stats->rlec += IGC_READ_REG(hw, IGC_RLEC);
1676e6defdfdSAlvin Zhang stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);
1677e6defdfdSAlvin Zhang stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);
1678e6defdfdSAlvin Zhang
1679e6defdfdSAlvin Zhang /*
1680e6defdfdSAlvin Zhang * For watchdog management we need to know if we have been
1681e6defdfdSAlvin Zhang * paused during the last interval, so capture that here.
1682e6defdfdSAlvin Zhang */
1683e6defdfdSAlvin Zhang pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);
1684e6defdfdSAlvin Zhang stats->xoffrxc += pause_frames;
1685e6defdfdSAlvin Zhang stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);
1686e6defdfdSAlvin Zhang stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);
1687e6defdfdSAlvin Zhang stats->prc64 += IGC_READ_REG(hw, IGC_PRC64);
1688e6defdfdSAlvin Zhang stats->prc127 += IGC_READ_REG(hw, IGC_PRC127);
1689e6defdfdSAlvin Zhang stats->prc255 += IGC_READ_REG(hw, IGC_PRC255);
1690e6defdfdSAlvin Zhang stats->prc511 += IGC_READ_REG(hw, IGC_PRC511);
1691e6defdfdSAlvin Zhang stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);
1692e6defdfdSAlvin Zhang stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);
1693e6defdfdSAlvin Zhang stats->gprc += IGC_READ_REG(hw, IGC_GPRC);
1694e6defdfdSAlvin Zhang stats->bprc += IGC_READ_REG(hw, IGC_BPRC);
1695e6defdfdSAlvin Zhang stats->mprc += IGC_READ_REG(hw, IGC_MPRC);
1696e6defdfdSAlvin Zhang stats->gptc += IGC_READ_REG(hw, IGC_GPTC);
1697e6defdfdSAlvin Zhang
1698e6defdfdSAlvin Zhang /* For the 64-bit byte counters the low dword must be read first. */
1699e6defdfdSAlvin Zhang /* Both registers clear on the read of the high dword */
1700e6defdfdSAlvin Zhang
1701e6defdfdSAlvin Zhang /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1702e6defdfdSAlvin Zhang stats->gorc += IGC_READ_REG(hw, IGC_GORCL);
1703e6defdfdSAlvin Zhang stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);
1704e6defdfdSAlvin Zhang stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1705e6defdfdSAlvin Zhang stats->gotc += IGC_READ_REG(hw, IGC_GOTCL);
1706e6defdfdSAlvin Zhang stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);
1707e6defdfdSAlvin Zhang stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1708e6defdfdSAlvin Zhang
1709e6defdfdSAlvin Zhang stats->rnbc += IGC_READ_REG(hw, IGC_RNBC);
1710e6defdfdSAlvin Zhang stats->ruc += IGC_READ_REG(hw, IGC_RUC);
1711e6defdfdSAlvin Zhang stats->rfc += IGC_READ_REG(hw, IGC_RFC);
1712e6defdfdSAlvin Zhang stats->roc += IGC_READ_REG(hw, IGC_ROC);
1713e6defdfdSAlvin Zhang stats->rjc += IGC_READ_REG(hw, IGC_RJC);
1714e6defdfdSAlvin Zhang
1715e6defdfdSAlvin Zhang stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);
1716e6defdfdSAlvin Zhang stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);
1717e6defdfdSAlvin Zhang stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);
1718e6defdfdSAlvin Zhang stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);
1719e6defdfdSAlvin Zhang stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);
1720e6defdfdSAlvin Zhang stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);
1721e6defdfdSAlvin Zhang stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);
1722e6defdfdSAlvin Zhang
1723e6defdfdSAlvin Zhang stats->tpr += IGC_READ_REG(hw, IGC_TPR);
1724e6defdfdSAlvin Zhang stats->tpt += IGC_READ_REG(hw, IGC_TPT);
1725e6defdfdSAlvin Zhang
1726e6defdfdSAlvin Zhang stats->tor += IGC_READ_REG(hw, IGC_TORL);
1727e6defdfdSAlvin Zhang stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);
1728e6defdfdSAlvin Zhang stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1729e6defdfdSAlvin Zhang stats->tot += IGC_READ_REG(hw, IGC_TOTL);
1730e6defdfdSAlvin Zhang stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);
1731e6defdfdSAlvin Zhang stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1732e6defdfdSAlvin Zhang
1733e6defdfdSAlvin Zhang stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);
1734e6defdfdSAlvin Zhang stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);
1735e6defdfdSAlvin Zhang stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);
1736e6defdfdSAlvin Zhang stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);
1737e6defdfdSAlvin Zhang stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);
1738e6defdfdSAlvin Zhang stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);
1739e6defdfdSAlvin Zhang stats->mptc += IGC_READ_REG(hw, IGC_MPTC);
1740e6defdfdSAlvin Zhang stats->bptc += IGC_READ_REG(hw, IGC_BPTC);
1741e6defdfdSAlvin Zhang stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);
1742e6defdfdSAlvin Zhang
1743e6defdfdSAlvin Zhang stats->iac += IGC_READ_REG(hw, IGC_IAC);
1744e6defdfdSAlvin Zhang stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);
1745e6defdfdSAlvin Zhang stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);
1746e6defdfdSAlvin Zhang stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);
1747e6defdfdSAlvin Zhang
1748e6defdfdSAlvin Zhang /* Host to Card Statistics */
1749e6defdfdSAlvin Zhang stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);
1750e6defdfdSAlvin Zhang stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);
1751e6defdfdSAlvin Zhang stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1752e6defdfdSAlvin Zhang stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);
1753e6defdfdSAlvin Zhang stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);
1754e6defdfdSAlvin Zhang stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1755e6defdfdSAlvin Zhang stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);
1756e6defdfdSAlvin Zhang }
1757e6defdfdSAlvin Zhang
1758e6defdfdSAlvin Zhang /*
1759e6defdfdSAlvin Zhang * Write 0 to all queue status registers
1760e6defdfdSAlvin Zhang */
1761e6defdfdSAlvin Zhang static void
igc_reset_queue_stats_register(struct igc_hw * hw)1762e6defdfdSAlvin Zhang igc_reset_queue_stats_register(struct igc_hw *hw)
1763e6defdfdSAlvin Zhang {
1764e6defdfdSAlvin Zhang int i;
1765e6defdfdSAlvin Zhang
1766e6defdfdSAlvin Zhang for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1767e6defdfdSAlvin Zhang IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);
1768e6defdfdSAlvin Zhang IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);
1769e6defdfdSAlvin Zhang IGC_WRITE_REG(hw, IGC_PQGORC(i), 0);
1770e6defdfdSAlvin Zhang IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);
1771e6defdfdSAlvin Zhang IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);
1772e6defdfdSAlvin Zhang IGC_WRITE_REG(hw, IGC_RQDPC(i), 0);
1773e6defdfdSAlvin Zhang IGC_WRITE_REG(hw, IGC_TQDPC(i), 0);
1774e6defdfdSAlvin Zhang }
1775e6defdfdSAlvin Zhang }
1776e6defdfdSAlvin Zhang
1777e6defdfdSAlvin Zhang /*
1778e6defdfdSAlvin Zhang * Read all hardware queue status registers
1779e6defdfdSAlvin Zhang */
1780e6defdfdSAlvin Zhang static void
igc_read_queue_stats_register(struct rte_eth_dev * dev)1781e6defdfdSAlvin Zhang igc_read_queue_stats_register(struct rte_eth_dev *dev)
1782e6defdfdSAlvin Zhang {
1783e6defdfdSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1784e6defdfdSAlvin Zhang struct igc_hw_queue_stats *queue_stats =
1785e6defdfdSAlvin Zhang IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1786e6defdfdSAlvin Zhang int i;
1787e6defdfdSAlvin Zhang
1788e6defdfdSAlvin Zhang /*
1789e6defdfdSAlvin Zhang * This register is not cleared on read. Furthermore, the register wraps
1790e6defdfdSAlvin Zhang * around back to 0x00000000 on the next increment when reaching a value
1791e6defdfdSAlvin Zhang * of 0xFFFFFFFF and then continues normal count operation.
1792e6defdfdSAlvin Zhang */
1793e6defdfdSAlvin Zhang for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1794e6defdfdSAlvin Zhang union {
1795e6defdfdSAlvin Zhang u64 ddword;
1796e6defdfdSAlvin Zhang u32 dword[2];
1797e6defdfdSAlvin Zhang } value;
1798e6defdfdSAlvin Zhang u32 tmp;
1799e6defdfdSAlvin Zhang
1800e6defdfdSAlvin Zhang /*
1801e6defdfdSAlvin Zhang * Read the register first, if the value is smaller than that
1802e6defdfdSAlvin Zhang * previous read, that mean the register has been overflowed,
1803e6defdfdSAlvin Zhang * then we add the high 4 bytes by 1 and replace the low 4
1804e6defdfdSAlvin Zhang * bytes by the new value.
1805e6defdfdSAlvin Zhang */
1806e6defdfdSAlvin Zhang tmp = IGC_READ_REG(hw, IGC_PQGPRC(i));
1807e6defdfdSAlvin Zhang value.ddword = queue_stats->pqgprc[i];
1808e6defdfdSAlvin Zhang if (value.dword[U32_0_IN_U64] > tmp)
1809e6defdfdSAlvin Zhang value.dword[U32_1_IN_U64]++;
1810e6defdfdSAlvin Zhang value.dword[U32_0_IN_U64] = tmp;
1811e6defdfdSAlvin Zhang queue_stats->pqgprc[i] = value.ddword;
1812e6defdfdSAlvin Zhang
1813e6defdfdSAlvin Zhang tmp = IGC_READ_REG(hw, IGC_PQGPTC(i));
1814e6defdfdSAlvin Zhang value.ddword = queue_stats->pqgptc[i];
1815e6defdfdSAlvin Zhang if (value.dword[U32_0_IN_U64] > tmp)
1816e6defdfdSAlvin Zhang value.dword[U32_1_IN_U64]++;
1817e6defdfdSAlvin Zhang value.dword[U32_0_IN_U64] = tmp;
1818e6defdfdSAlvin Zhang queue_stats->pqgptc[i] = value.ddword;
1819e6defdfdSAlvin Zhang
1820e6defdfdSAlvin Zhang tmp = IGC_READ_REG(hw, IGC_PQGORC(i));
1821e6defdfdSAlvin Zhang value.ddword = queue_stats->pqgorc[i];
1822e6defdfdSAlvin Zhang if (value.dword[U32_0_IN_U64] > tmp)
1823e6defdfdSAlvin Zhang value.dword[U32_1_IN_U64]++;
1824e6defdfdSAlvin Zhang value.dword[U32_0_IN_U64] = tmp;
1825e6defdfdSAlvin Zhang queue_stats->pqgorc[i] = value.ddword;
1826e6defdfdSAlvin Zhang
1827e6defdfdSAlvin Zhang tmp = IGC_READ_REG(hw, IGC_PQGOTC(i));
1828e6defdfdSAlvin Zhang value.ddword = queue_stats->pqgotc[i];
1829e6defdfdSAlvin Zhang if (value.dword[U32_0_IN_U64] > tmp)
1830e6defdfdSAlvin Zhang value.dword[U32_1_IN_U64]++;
1831e6defdfdSAlvin Zhang value.dword[U32_0_IN_U64] = tmp;
1832e6defdfdSAlvin Zhang queue_stats->pqgotc[i] = value.ddword;
1833e6defdfdSAlvin Zhang
1834e6defdfdSAlvin Zhang tmp = IGC_READ_REG(hw, IGC_PQMPRC(i));
1835e6defdfdSAlvin Zhang value.ddword = queue_stats->pqmprc[i];
1836e6defdfdSAlvin Zhang if (value.dword[U32_0_IN_U64] > tmp)
1837e6defdfdSAlvin Zhang value.dword[U32_1_IN_U64]++;
1838e6defdfdSAlvin Zhang value.dword[U32_0_IN_U64] = tmp;
1839e6defdfdSAlvin Zhang queue_stats->pqmprc[i] = value.ddword;
1840e6defdfdSAlvin Zhang
1841e6defdfdSAlvin Zhang tmp = IGC_READ_REG(hw, IGC_RQDPC(i));
1842e6defdfdSAlvin Zhang value.ddword = queue_stats->rqdpc[i];
1843e6defdfdSAlvin Zhang if (value.dword[U32_0_IN_U64] > tmp)
1844e6defdfdSAlvin Zhang value.dword[U32_1_IN_U64]++;
1845e6defdfdSAlvin Zhang value.dword[U32_0_IN_U64] = tmp;
1846e6defdfdSAlvin Zhang queue_stats->rqdpc[i] = value.ddword;
1847e6defdfdSAlvin Zhang
1848e6defdfdSAlvin Zhang tmp = IGC_READ_REG(hw, IGC_TQDPC(i));
1849e6defdfdSAlvin Zhang value.ddword = queue_stats->tqdpc[i];
1850e6defdfdSAlvin Zhang if (value.dword[U32_0_IN_U64] > tmp)
1851e6defdfdSAlvin Zhang value.dword[U32_1_IN_U64]++;
1852e6defdfdSAlvin Zhang value.dword[U32_0_IN_U64] = tmp;
1853e6defdfdSAlvin Zhang queue_stats->tqdpc[i] = value.ddword;
1854e6defdfdSAlvin Zhang }
1855e6defdfdSAlvin Zhang }
1856e6defdfdSAlvin Zhang
1857e6defdfdSAlvin Zhang static int
eth_igc_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * rte_stats)1858e6defdfdSAlvin Zhang eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1859e6defdfdSAlvin Zhang {
1860e6defdfdSAlvin Zhang struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1861e6defdfdSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1862e6defdfdSAlvin Zhang struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev);
1863e6defdfdSAlvin Zhang struct igc_hw_queue_stats *queue_stats =
1864e6defdfdSAlvin Zhang IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1865e6defdfdSAlvin Zhang int i;
1866e6defdfdSAlvin Zhang
1867e6defdfdSAlvin Zhang /*
1868e6defdfdSAlvin Zhang * Cancel status handler since it will read the queue status registers
1869e6defdfdSAlvin Zhang */
1870e6defdfdSAlvin Zhang rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1871e6defdfdSAlvin Zhang
1872e6defdfdSAlvin Zhang /* Read status register */
1873e6defdfdSAlvin Zhang igc_read_queue_stats_register(dev);
1874e6defdfdSAlvin Zhang igc_read_stats_registers(hw, stats);
1875e6defdfdSAlvin Zhang
1876e6defdfdSAlvin Zhang if (rte_stats == NULL) {
1877e6defdfdSAlvin Zhang /* Restart queue status handler */
1878e6defdfdSAlvin Zhang rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1879e6defdfdSAlvin Zhang igc_update_queue_stats_handler, dev);
1880e6defdfdSAlvin Zhang return -EINVAL;
1881e6defdfdSAlvin Zhang }
1882e6defdfdSAlvin Zhang
1883e6defdfdSAlvin Zhang /* Rx Errors */
1884e6defdfdSAlvin Zhang rte_stats->imissed = stats->mpc;
1885c69abf62SAlvin Zhang rte_stats->ierrors = stats->crcerrs + stats->rlec +
1886e6defdfdSAlvin Zhang stats->rxerrc + stats->algnerrc;
1887e6defdfdSAlvin Zhang
1888e6defdfdSAlvin Zhang /* Tx Errors */
1889e6defdfdSAlvin Zhang rte_stats->oerrors = stats->ecol + stats->latecol;
1890e6defdfdSAlvin Zhang
1891e6defdfdSAlvin Zhang rte_stats->ipackets = stats->gprc;
1892e6defdfdSAlvin Zhang rte_stats->opackets = stats->gptc;
1893e6defdfdSAlvin Zhang rte_stats->ibytes = stats->gorc;
1894e6defdfdSAlvin Zhang rte_stats->obytes = stats->gotc;
1895e6defdfdSAlvin Zhang
1896e6defdfdSAlvin Zhang /* Get per-queue statuses */
1897e6defdfdSAlvin Zhang for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1898e6defdfdSAlvin Zhang /* GET TX queue statuses */
1899e6defdfdSAlvin Zhang int map_id = igc->txq_stats_map[i];
1900e6defdfdSAlvin Zhang if (map_id >= 0) {
1901e6defdfdSAlvin Zhang rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i];
1902e6defdfdSAlvin Zhang rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i];
1903e6defdfdSAlvin Zhang }
1904e6defdfdSAlvin Zhang /* Get RX queue statuses */
1905e6defdfdSAlvin Zhang map_id = igc->rxq_stats_map[i];
1906e6defdfdSAlvin Zhang if (map_id >= 0) {
1907e6defdfdSAlvin Zhang rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i];
1908e6defdfdSAlvin Zhang rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i];
1909e6defdfdSAlvin Zhang rte_stats->q_errors[map_id] += queue_stats->rqdpc[i];
1910e6defdfdSAlvin Zhang }
1911e6defdfdSAlvin Zhang }
1912e6defdfdSAlvin Zhang
1913e6defdfdSAlvin Zhang /* Restart queue status handler */
1914e6defdfdSAlvin Zhang rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1915e6defdfdSAlvin Zhang igc_update_queue_stats_handler, dev);
1916e6defdfdSAlvin Zhang return 0;
1917e6defdfdSAlvin Zhang }
1918e6defdfdSAlvin Zhang
1919e6defdfdSAlvin Zhang static int
eth_igc_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)1920e6defdfdSAlvin Zhang eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1921e6defdfdSAlvin Zhang unsigned int n)
1922e6defdfdSAlvin Zhang {
1923e6defdfdSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1924e6defdfdSAlvin Zhang struct igc_hw_stats *hw_stats =
1925e6defdfdSAlvin Zhang IGC_DEV_PRIVATE_STATS(dev);
1926e6defdfdSAlvin Zhang unsigned int i;
1927e6defdfdSAlvin Zhang
1928e6defdfdSAlvin Zhang igc_read_stats_registers(hw, hw_stats);
1929e6defdfdSAlvin Zhang
1930e6defdfdSAlvin Zhang if (n < IGC_NB_XSTATS)
1931e6defdfdSAlvin Zhang return IGC_NB_XSTATS;
1932e6defdfdSAlvin Zhang
1933e6defdfdSAlvin Zhang /* If this is a reset xstats is NULL, and we have cleared the
1934e6defdfdSAlvin Zhang * registers by reading them.
1935e6defdfdSAlvin Zhang */
1936e6defdfdSAlvin Zhang if (!xstats)
1937e6defdfdSAlvin Zhang return 0;
1938e6defdfdSAlvin Zhang
1939e6defdfdSAlvin Zhang /* Extended stats */
1940e6defdfdSAlvin Zhang for (i = 0; i < IGC_NB_XSTATS; i++) {
1941e6defdfdSAlvin Zhang xstats[i].id = i;
1942e6defdfdSAlvin Zhang xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1943e6defdfdSAlvin Zhang rte_igc_stats_strings[i].offset);
1944e6defdfdSAlvin Zhang }
1945e6defdfdSAlvin Zhang
1946e6defdfdSAlvin Zhang return IGC_NB_XSTATS;
1947e6defdfdSAlvin Zhang }
1948e6defdfdSAlvin Zhang
1949e6defdfdSAlvin Zhang static int
eth_igc_xstats_reset(struct rte_eth_dev * dev)1950e6defdfdSAlvin Zhang eth_igc_xstats_reset(struct rte_eth_dev *dev)
1951e6defdfdSAlvin Zhang {
1952e6defdfdSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1953e6defdfdSAlvin Zhang struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
1954e6defdfdSAlvin Zhang struct igc_hw_queue_stats *queue_stats =
1955e6defdfdSAlvin Zhang IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1956e6defdfdSAlvin Zhang
1957e6defdfdSAlvin Zhang /* Cancel queue status handler for avoid conflict */
1958e6defdfdSAlvin Zhang rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1959e6defdfdSAlvin Zhang
1960e6defdfdSAlvin Zhang /* HW registers are cleared on read */
1961e6defdfdSAlvin Zhang igc_reset_queue_stats_register(hw);
1962e6defdfdSAlvin Zhang igc_read_stats_registers(hw, hw_stats);
1963e6defdfdSAlvin Zhang
1964e6defdfdSAlvin Zhang /* Reset software totals */
1965e6defdfdSAlvin Zhang memset(hw_stats, 0, sizeof(*hw_stats));
1966e6defdfdSAlvin Zhang memset(queue_stats, 0, sizeof(*queue_stats));
1967e6defdfdSAlvin Zhang
1968e6defdfdSAlvin Zhang /* Restart the queue status handler */
1969e6defdfdSAlvin Zhang rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,
1970e6defdfdSAlvin Zhang dev);
1971e6defdfdSAlvin Zhang
1972e6defdfdSAlvin Zhang return 0;
1973e6defdfdSAlvin Zhang }
1974e6defdfdSAlvin Zhang
1975e6defdfdSAlvin Zhang static int
eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,unsigned int size)1976e6defdfdSAlvin Zhang eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1977e6defdfdSAlvin Zhang struct rte_eth_xstat_name *xstats_names, unsigned int size)
1978e6defdfdSAlvin Zhang {
1979e6defdfdSAlvin Zhang unsigned int i;
1980e6defdfdSAlvin Zhang
1981e6defdfdSAlvin Zhang if (xstats_names == NULL)
1982e6defdfdSAlvin Zhang return IGC_NB_XSTATS;
1983e6defdfdSAlvin Zhang
1984e6defdfdSAlvin Zhang if (size < IGC_NB_XSTATS) {
1985e6defdfdSAlvin Zhang PMD_DRV_LOG(ERR, "not enough buffers!");
1986e6defdfdSAlvin Zhang return IGC_NB_XSTATS;
1987e6defdfdSAlvin Zhang }
1988e6defdfdSAlvin Zhang
1989e6defdfdSAlvin Zhang for (i = 0; i < IGC_NB_XSTATS; i++)
1990e6defdfdSAlvin Zhang strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,
1991e6defdfdSAlvin Zhang sizeof(xstats_names[i].name));
1992e6defdfdSAlvin Zhang
1993e6defdfdSAlvin Zhang return IGC_NB_XSTATS;
1994e6defdfdSAlvin Zhang }
1995e6defdfdSAlvin Zhang
1996e6defdfdSAlvin Zhang static int
eth_igc_xstats_get_names_by_id(struct rte_eth_dev * dev,const uint64_t * ids,struct rte_eth_xstat_name * xstats_names,unsigned int limit)1997e6defdfdSAlvin Zhang eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
19988c9f976fSAndrew Rybchenko const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
1999e6defdfdSAlvin Zhang unsigned int limit)
2000e6defdfdSAlvin Zhang {
2001e6defdfdSAlvin Zhang unsigned int i;
2002e6defdfdSAlvin Zhang
2003e6defdfdSAlvin Zhang if (!ids)
2004e6defdfdSAlvin Zhang return eth_igc_xstats_get_names(dev, xstats_names, limit);
2005e6defdfdSAlvin Zhang
2006e6defdfdSAlvin Zhang for (i = 0; i < limit; i++) {
2007e6defdfdSAlvin Zhang if (ids[i] >= IGC_NB_XSTATS) {
2008e6defdfdSAlvin Zhang PMD_DRV_LOG(ERR, "id value isn't valid");
2009e6defdfdSAlvin Zhang return -EINVAL;
2010e6defdfdSAlvin Zhang }
2011e6defdfdSAlvin Zhang strlcpy(xstats_names[i].name,
2012e6defdfdSAlvin Zhang rte_igc_stats_strings[ids[i]].name,
2013e6defdfdSAlvin Zhang sizeof(xstats_names[i].name));
2014e6defdfdSAlvin Zhang }
2015e6defdfdSAlvin Zhang return limit;
2016e6defdfdSAlvin Zhang }
2017e6defdfdSAlvin Zhang
2018e6defdfdSAlvin Zhang static int
eth_igc_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,unsigned int n)2019e6defdfdSAlvin Zhang eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2020e6defdfdSAlvin Zhang uint64_t *values, unsigned int n)
2021e6defdfdSAlvin Zhang {
2022e6defdfdSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2023e6defdfdSAlvin Zhang struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
2024e6defdfdSAlvin Zhang unsigned int i;
2025e6defdfdSAlvin Zhang
2026e6defdfdSAlvin Zhang igc_read_stats_registers(hw, hw_stats);
2027e6defdfdSAlvin Zhang
2028e6defdfdSAlvin Zhang if (!ids) {
2029e6defdfdSAlvin Zhang if (n < IGC_NB_XSTATS)
2030e6defdfdSAlvin Zhang return IGC_NB_XSTATS;
2031e6defdfdSAlvin Zhang
2032e6defdfdSAlvin Zhang /* If this is a reset xstats is NULL, and we have cleared the
2033e6defdfdSAlvin Zhang * registers by reading them.
2034e6defdfdSAlvin Zhang */
2035e6defdfdSAlvin Zhang if (!values)
2036e6defdfdSAlvin Zhang return 0;
2037e6defdfdSAlvin Zhang
2038e6defdfdSAlvin Zhang /* Extended stats */
2039e6defdfdSAlvin Zhang for (i = 0; i < IGC_NB_XSTATS; i++)
2040e6defdfdSAlvin Zhang values[i] = *(uint64_t *)(((char *)hw_stats) +
2041e6defdfdSAlvin Zhang rte_igc_stats_strings[i].offset);
2042e6defdfdSAlvin Zhang
2043e6defdfdSAlvin Zhang return IGC_NB_XSTATS;
2044e6defdfdSAlvin Zhang
2045e6defdfdSAlvin Zhang } else {
2046e6defdfdSAlvin Zhang for (i = 0; i < n; i++) {
2047e6defdfdSAlvin Zhang if (ids[i] >= IGC_NB_XSTATS) {
2048e6defdfdSAlvin Zhang PMD_DRV_LOG(ERR, "id value isn't valid");
2049e6defdfdSAlvin Zhang return -EINVAL;
2050e6defdfdSAlvin Zhang }
2051e6defdfdSAlvin Zhang values[i] = *(uint64_t *)(((char *)hw_stats) +
2052e6defdfdSAlvin Zhang rte_igc_stats_strings[ids[i]].offset);
2053e6defdfdSAlvin Zhang }
2054e6defdfdSAlvin Zhang return n;
2055e6defdfdSAlvin Zhang }
2056e6defdfdSAlvin Zhang }
2057e6defdfdSAlvin Zhang
2058e6defdfdSAlvin Zhang static int
eth_igc_queue_stats_mapping_set(struct rte_eth_dev * dev,uint16_t queue_id,uint8_t stat_idx,uint8_t is_rx)2059e6defdfdSAlvin Zhang eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
2060e6defdfdSAlvin Zhang uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx)
2061e6defdfdSAlvin Zhang {
2062e6defdfdSAlvin Zhang struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
2063e6defdfdSAlvin Zhang
2064e6defdfdSAlvin Zhang /* check queue id is valid */
2065e6defdfdSAlvin Zhang if (queue_id >= IGC_QUEUE_PAIRS_NUM) {
2066e6defdfdSAlvin Zhang PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u",
2067e6defdfdSAlvin Zhang queue_id, IGC_QUEUE_PAIRS_NUM - 1);
2068e6defdfdSAlvin Zhang return -EINVAL;
2069e6defdfdSAlvin Zhang }
2070e6defdfdSAlvin Zhang
2071e6defdfdSAlvin Zhang /* store the mapping status id */
2072e6defdfdSAlvin Zhang if (is_rx)
2073e6defdfdSAlvin Zhang igc->rxq_stats_map[queue_id] = stat_idx;
2074e6defdfdSAlvin Zhang else
2075e6defdfdSAlvin Zhang igc->txq_stats_map[queue_id] = stat_idx;
2076e6defdfdSAlvin Zhang
2077e6defdfdSAlvin Zhang return 0;
2078e6defdfdSAlvin Zhang }
2079e6defdfdSAlvin Zhang
208066fde1b9SAlvin Zhang static int
eth_igc_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)20819417098fSAlvin Zhang eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
20829417098fSAlvin Zhang {
20839417098fSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
20849417098fSAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2085d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
20869417098fSAlvin Zhang uint32_t vec = IGC_MISC_VEC_ID;
20879417098fSAlvin Zhang
20889417098fSAlvin Zhang if (rte_intr_allow_others(intr_handle))
20899417098fSAlvin Zhang vec = IGC_RX_VEC_START;
20909417098fSAlvin Zhang
20919417098fSAlvin Zhang uint32_t mask = 1u << (queue_id + vec);
20929417098fSAlvin Zhang
20939417098fSAlvin Zhang IGC_WRITE_REG(hw, IGC_EIMC, mask);
20949417098fSAlvin Zhang IGC_WRITE_FLUSH(hw);
20959417098fSAlvin Zhang
20969417098fSAlvin Zhang return 0;
20979417098fSAlvin Zhang }
20989417098fSAlvin Zhang
20999417098fSAlvin Zhang static int
eth_igc_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)21009417098fSAlvin Zhang eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
21019417098fSAlvin Zhang {
21029417098fSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
21039417098fSAlvin Zhang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2104d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
21059417098fSAlvin Zhang uint32_t vec = IGC_MISC_VEC_ID;
21069417098fSAlvin Zhang
21079417098fSAlvin Zhang if (rte_intr_allow_others(intr_handle))
21089417098fSAlvin Zhang vec = IGC_RX_VEC_START;
21099417098fSAlvin Zhang
21109417098fSAlvin Zhang uint32_t mask = 1u << (queue_id + vec);
21119417098fSAlvin Zhang
21129417098fSAlvin Zhang IGC_WRITE_REG(hw, IGC_EIMS, mask);
21139417098fSAlvin Zhang IGC_WRITE_FLUSH(hw);
21149417098fSAlvin Zhang
21159417098fSAlvin Zhang rte_intr_enable(intr_handle);
21169417098fSAlvin Zhang
21179417098fSAlvin Zhang return 0;
21189417098fSAlvin Zhang }
21199417098fSAlvin Zhang
21209417098fSAlvin Zhang static int
eth_igc_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)21210d415cd8SAlvin Zhang eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
21220d415cd8SAlvin Zhang {
21230d415cd8SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
21240d415cd8SAlvin Zhang uint32_t ctrl;
21250d415cd8SAlvin Zhang int tx_pause;
21260d415cd8SAlvin Zhang int rx_pause;
21270d415cd8SAlvin Zhang
21280d415cd8SAlvin Zhang fc_conf->pause_time = hw->fc.pause_time;
21290d415cd8SAlvin Zhang fc_conf->high_water = hw->fc.high_water;
21300d415cd8SAlvin Zhang fc_conf->low_water = hw->fc.low_water;
21310d415cd8SAlvin Zhang fc_conf->send_xon = hw->fc.send_xon;
21320d415cd8SAlvin Zhang fc_conf->autoneg = hw->mac.autoneg;
21330d415cd8SAlvin Zhang
21340d415cd8SAlvin Zhang /*
21350d415cd8SAlvin Zhang * Return rx_pause and tx_pause status according to actual setting of
21360d415cd8SAlvin Zhang * the TFCE and RFCE bits in the CTRL register.
21370d415cd8SAlvin Zhang */
21380d415cd8SAlvin Zhang ctrl = IGC_READ_REG(hw, IGC_CTRL);
21390d415cd8SAlvin Zhang if (ctrl & IGC_CTRL_TFCE)
21400d415cd8SAlvin Zhang tx_pause = 1;
21410d415cd8SAlvin Zhang else
21420d415cd8SAlvin Zhang tx_pause = 0;
21430d415cd8SAlvin Zhang
21440d415cd8SAlvin Zhang if (ctrl & IGC_CTRL_RFCE)
21450d415cd8SAlvin Zhang rx_pause = 1;
21460d415cd8SAlvin Zhang else
21470d415cd8SAlvin Zhang rx_pause = 0;
21480d415cd8SAlvin Zhang
21490d415cd8SAlvin Zhang if (rx_pause && tx_pause)
2150295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_FULL;
21510d415cd8SAlvin Zhang else if (rx_pause)
2152295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
21530d415cd8SAlvin Zhang else if (tx_pause)
2154295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
21550d415cd8SAlvin Zhang else
2156295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_NONE;
21570d415cd8SAlvin Zhang
21580d415cd8SAlvin Zhang return 0;
21590d415cd8SAlvin Zhang }
21600d415cd8SAlvin Zhang
21610d415cd8SAlvin Zhang static int
eth_igc_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)21620d415cd8SAlvin Zhang eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
21630d415cd8SAlvin Zhang {
21640d415cd8SAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
21650d415cd8SAlvin Zhang uint32_t rx_buf_size;
21660d415cd8SAlvin Zhang uint32_t max_high_water;
21670d415cd8SAlvin Zhang uint32_t rctl;
21680d415cd8SAlvin Zhang int err;
21690d415cd8SAlvin Zhang
21700d415cd8SAlvin Zhang if (fc_conf->autoneg != hw->mac.autoneg)
21710d415cd8SAlvin Zhang return -ENOTSUP;
21720d415cd8SAlvin Zhang
21730d415cd8SAlvin Zhang rx_buf_size = igc_get_rx_buffer_size(hw);
21740d415cd8SAlvin Zhang PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
21750d415cd8SAlvin Zhang
21760d415cd8SAlvin Zhang /* At least reserve one Ethernet frame for watermark */
21770d415cd8SAlvin Zhang max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
21780d415cd8SAlvin Zhang if (fc_conf->high_water > max_high_water ||
21790d415cd8SAlvin Zhang fc_conf->high_water < fc_conf->low_water) {
21800d415cd8SAlvin Zhang PMD_DRV_LOG(ERR,
21810d415cd8SAlvin Zhang "Incorrect high(%u)/low(%u) water value, max is %u",
21820d415cd8SAlvin Zhang fc_conf->high_water, fc_conf->low_water,
21830d415cd8SAlvin Zhang max_high_water);
21840d415cd8SAlvin Zhang return -EINVAL;
21850d415cd8SAlvin Zhang }
21860d415cd8SAlvin Zhang
21870d415cd8SAlvin Zhang switch (fc_conf->mode) {
2188295968d1SFerruh Yigit case RTE_ETH_FC_NONE:
21890d415cd8SAlvin Zhang hw->fc.requested_mode = igc_fc_none;
21900d415cd8SAlvin Zhang break;
2191295968d1SFerruh Yigit case RTE_ETH_FC_RX_PAUSE:
21920d415cd8SAlvin Zhang hw->fc.requested_mode = igc_fc_rx_pause;
21930d415cd8SAlvin Zhang break;
2194295968d1SFerruh Yigit case RTE_ETH_FC_TX_PAUSE:
21950d415cd8SAlvin Zhang hw->fc.requested_mode = igc_fc_tx_pause;
21960d415cd8SAlvin Zhang break;
2197295968d1SFerruh Yigit case RTE_ETH_FC_FULL:
21980d415cd8SAlvin Zhang hw->fc.requested_mode = igc_fc_full;
21990d415cd8SAlvin Zhang break;
22000d415cd8SAlvin Zhang default:
22010d415cd8SAlvin Zhang PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode);
22020d415cd8SAlvin Zhang return -EINVAL;
22030d415cd8SAlvin Zhang }
22040d415cd8SAlvin Zhang
22050d415cd8SAlvin Zhang hw->fc.pause_time = fc_conf->pause_time;
22060d415cd8SAlvin Zhang hw->fc.high_water = fc_conf->high_water;
22070d415cd8SAlvin Zhang hw->fc.low_water = fc_conf->low_water;
22080d415cd8SAlvin Zhang hw->fc.send_xon = fc_conf->send_xon;
22090d415cd8SAlvin Zhang
22100d415cd8SAlvin Zhang err = igc_setup_link_generic(hw);
22110d415cd8SAlvin Zhang if (err == IGC_SUCCESS) {
22120d415cd8SAlvin Zhang /**
22130d415cd8SAlvin Zhang * check if we want to forward MAC frames - driver doesn't have
22140d415cd8SAlvin Zhang * native capability to do that, so we'll write the registers
22150d415cd8SAlvin Zhang * ourselves
22160d415cd8SAlvin Zhang **/
22170d415cd8SAlvin Zhang rctl = IGC_READ_REG(hw, IGC_RCTL);
22180d415cd8SAlvin Zhang
22190d415cd8SAlvin Zhang /* set or clear MFLCN.PMCF bit depending on configuration */
22200d415cd8SAlvin Zhang if (fc_conf->mac_ctrl_frame_fwd != 0)
22210d415cd8SAlvin Zhang rctl |= IGC_RCTL_PMCF;
22220d415cd8SAlvin Zhang else
22230d415cd8SAlvin Zhang rctl &= ~IGC_RCTL_PMCF;
22240d415cd8SAlvin Zhang
22250d415cd8SAlvin Zhang IGC_WRITE_REG(hw, IGC_RCTL, rctl);
22260d415cd8SAlvin Zhang IGC_WRITE_FLUSH(hw);
22270d415cd8SAlvin Zhang
22280d415cd8SAlvin Zhang return 0;
22290d415cd8SAlvin Zhang }
22300d415cd8SAlvin Zhang
22310d415cd8SAlvin Zhang PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err);
22320d415cd8SAlvin Zhang return -EIO;
22330d415cd8SAlvin Zhang }
22340d415cd8SAlvin Zhang
22350d415cd8SAlvin Zhang static int
eth_igc_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)2236bd3fcf0dSAlvin Zhang eth_igc_rss_reta_update(struct rte_eth_dev *dev,
2237bd3fcf0dSAlvin Zhang struct rte_eth_rss_reta_entry64 *reta_conf,
2238bd3fcf0dSAlvin Zhang uint16_t reta_size)
2239bd3fcf0dSAlvin Zhang {
2240bd3fcf0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2241bd3fcf0dSAlvin Zhang uint16_t i;
2242bd3fcf0dSAlvin Zhang
2243295968d1SFerruh Yigit if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2244bd3fcf0dSAlvin Zhang PMD_DRV_LOG(ERR,
2245bd3fcf0dSAlvin Zhang "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2246295968d1SFerruh Yigit reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2247bd3fcf0dSAlvin Zhang return -EINVAL;
2248bd3fcf0dSAlvin Zhang }
2249bd3fcf0dSAlvin Zhang
2250295968d1SFerruh Yigit RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
225123d8a664SAlvin Zhang
2252bd3fcf0dSAlvin Zhang /* set redirection table */
2253295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2254bd3fcf0dSAlvin Zhang union igc_rss_reta_reg reta, reg;
2255bd3fcf0dSAlvin Zhang uint16_t idx, shift;
2256bd3fcf0dSAlvin Zhang uint8_t j, mask;
2257bd3fcf0dSAlvin Zhang
2258295968d1SFerruh Yigit idx = i / RTE_ETH_RETA_GROUP_SIZE;
2259295968d1SFerruh Yigit shift = i % RTE_ETH_RETA_GROUP_SIZE;
2260bd3fcf0dSAlvin Zhang mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2261bd3fcf0dSAlvin Zhang IGC_RSS_RDT_REG_SIZE_MASK);
2262bd3fcf0dSAlvin Zhang
2263bd3fcf0dSAlvin Zhang /* if no need to update the register */
226423d8a664SAlvin Zhang if (!mask ||
2265295968d1SFerruh Yigit shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2266bd3fcf0dSAlvin Zhang continue;
2267bd3fcf0dSAlvin Zhang
2268bd3fcf0dSAlvin Zhang /* check mask whether need to read the register value first */
2269bd3fcf0dSAlvin Zhang if (mask == IGC_RSS_RDT_REG_SIZE_MASK)
2270bd3fcf0dSAlvin Zhang reg.dword = 0;
2271bd3fcf0dSAlvin Zhang else
2272bd3fcf0dSAlvin Zhang reg.dword = IGC_READ_REG_LE_VALUE(hw,
2273bd3fcf0dSAlvin Zhang IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2274bd3fcf0dSAlvin Zhang
2275bd3fcf0dSAlvin Zhang /* update the register */
227623d8a664SAlvin Zhang RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2277bd3fcf0dSAlvin Zhang for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2278bd3fcf0dSAlvin Zhang if (mask & (1u << j))
2279bd3fcf0dSAlvin Zhang reta.bytes[j] =
2280bd3fcf0dSAlvin Zhang (uint8_t)reta_conf[idx].reta[shift + j];
2281bd3fcf0dSAlvin Zhang else
2282bd3fcf0dSAlvin Zhang reta.bytes[j] = reg.bytes[j];
2283bd3fcf0dSAlvin Zhang }
2284bd3fcf0dSAlvin Zhang IGC_WRITE_REG_LE_VALUE(hw,
2285bd3fcf0dSAlvin Zhang IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword);
2286bd3fcf0dSAlvin Zhang }
2287bd3fcf0dSAlvin Zhang
2288bd3fcf0dSAlvin Zhang return 0;
2289bd3fcf0dSAlvin Zhang }
2290bd3fcf0dSAlvin Zhang
2291bd3fcf0dSAlvin Zhang static int
eth_igc_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)2292bd3fcf0dSAlvin Zhang eth_igc_rss_reta_query(struct rte_eth_dev *dev,
2293bd3fcf0dSAlvin Zhang struct rte_eth_rss_reta_entry64 *reta_conf,
2294bd3fcf0dSAlvin Zhang uint16_t reta_size)
2295bd3fcf0dSAlvin Zhang {
2296bd3fcf0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2297bd3fcf0dSAlvin Zhang uint16_t i;
2298bd3fcf0dSAlvin Zhang
2299295968d1SFerruh Yigit if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2300bd3fcf0dSAlvin Zhang PMD_DRV_LOG(ERR,
2301bd3fcf0dSAlvin Zhang "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2302295968d1SFerruh Yigit reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2303bd3fcf0dSAlvin Zhang return -EINVAL;
2304bd3fcf0dSAlvin Zhang }
2305bd3fcf0dSAlvin Zhang
2306295968d1SFerruh Yigit RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
230723d8a664SAlvin Zhang
2308bd3fcf0dSAlvin Zhang /* read redirection table */
2309295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2310bd3fcf0dSAlvin Zhang union igc_rss_reta_reg reta;
2311bd3fcf0dSAlvin Zhang uint16_t idx, shift;
2312bd3fcf0dSAlvin Zhang uint8_t j, mask;
2313bd3fcf0dSAlvin Zhang
2314295968d1SFerruh Yigit idx = i / RTE_ETH_RETA_GROUP_SIZE;
2315295968d1SFerruh Yigit shift = i % RTE_ETH_RETA_GROUP_SIZE;
2316bd3fcf0dSAlvin Zhang mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2317bd3fcf0dSAlvin Zhang IGC_RSS_RDT_REG_SIZE_MASK);
2318bd3fcf0dSAlvin Zhang
2319bd3fcf0dSAlvin Zhang /* if no need to read register */
232023d8a664SAlvin Zhang if (!mask ||
2321295968d1SFerruh Yigit shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2322bd3fcf0dSAlvin Zhang continue;
2323bd3fcf0dSAlvin Zhang
2324bd3fcf0dSAlvin Zhang /* read register and get the queue index */
232523d8a664SAlvin Zhang RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2326bd3fcf0dSAlvin Zhang reta.dword = IGC_READ_REG_LE_VALUE(hw,
2327bd3fcf0dSAlvin Zhang IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2328bd3fcf0dSAlvin Zhang for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2329bd3fcf0dSAlvin Zhang if (mask & (1u << j))
2330bd3fcf0dSAlvin Zhang reta_conf[idx].reta[shift + j] = reta.bytes[j];
2331bd3fcf0dSAlvin Zhang }
2332bd3fcf0dSAlvin Zhang }
2333bd3fcf0dSAlvin Zhang
2334bd3fcf0dSAlvin Zhang return 0;
2335bd3fcf0dSAlvin Zhang }
2336bd3fcf0dSAlvin Zhang
2337bd3fcf0dSAlvin Zhang static int
eth_igc_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)2338bd3fcf0dSAlvin Zhang eth_igc_rss_hash_update(struct rte_eth_dev *dev,
2339bd3fcf0dSAlvin Zhang struct rte_eth_rss_conf *rss_conf)
2340bd3fcf0dSAlvin Zhang {
2341bd3fcf0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2342bd3fcf0dSAlvin Zhang igc_hw_rss_hash_set(hw, rss_conf);
2343bd3fcf0dSAlvin Zhang return 0;
2344bd3fcf0dSAlvin Zhang }
2345bd3fcf0dSAlvin Zhang
2346bd3fcf0dSAlvin Zhang static int
eth_igc_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)2347bd3fcf0dSAlvin Zhang eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
2348bd3fcf0dSAlvin Zhang struct rte_eth_rss_conf *rss_conf)
2349bd3fcf0dSAlvin Zhang {
2350bd3fcf0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2351bd3fcf0dSAlvin Zhang uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
2352bd3fcf0dSAlvin Zhang uint32_t mrqc;
2353bd3fcf0dSAlvin Zhang uint64_t rss_hf;
2354bd3fcf0dSAlvin Zhang
2355bd3fcf0dSAlvin Zhang if (hash_key != NULL) {
2356bd3fcf0dSAlvin Zhang int i;
2357bd3fcf0dSAlvin Zhang
2358bd3fcf0dSAlvin Zhang /* if not enough space for store hash key */
2359bd3fcf0dSAlvin Zhang if (rss_conf->rss_key_len != IGC_HKEY_SIZE) {
2360bd3fcf0dSAlvin Zhang PMD_DRV_LOG(ERR,
2361bd3fcf0dSAlvin Zhang "RSS hash key size %u in parameter doesn't match the hardware hash key size %u",
2362bd3fcf0dSAlvin Zhang rss_conf->rss_key_len, IGC_HKEY_SIZE);
2363bd3fcf0dSAlvin Zhang return -EINVAL;
2364bd3fcf0dSAlvin Zhang }
2365bd3fcf0dSAlvin Zhang
2366bd3fcf0dSAlvin Zhang /* read RSS key from register */
2367bd3fcf0dSAlvin Zhang for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
2368bd3fcf0dSAlvin Zhang hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i));
2369bd3fcf0dSAlvin Zhang }
2370bd3fcf0dSAlvin Zhang
2371bd3fcf0dSAlvin Zhang /* get RSS functions configured in MRQC register */
2372bd3fcf0dSAlvin Zhang mrqc = IGC_READ_REG(hw, IGC_MRQC);
2373bd3fcf0dSAlvin Zhang if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0)
2374bd3fcf0dSAlvin Zhang return 0;
2375bd3fcf0dSAlvin Zhang
2376bd3fcf0dSAlvin Zhang rss_hf = 0;
2377bd3fcf0dSAlvin Zhang if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
2378295968d1SFerruh Yigit rss_hf |= RTE_ETH_RSS_IPV4;
2379bd3fcf0dSAlvin Zhang if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
2380295968d1SFerruh Yigit rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2381bd3fcf0dSAlvin Zhang if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
2382295968d1SFerruh Yigit rss_hf |= RTE_ETH_RSS_IPV6;
2383bd3fcf0dSAlvin Zhang if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
2384295968d1SFerruh Yigit rss_hf |= RTE_ETH_RSS_IPV6_EX;
2385bd3fcf0dSAlvin Zhang if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
2386295968d1SFerruh Yigit rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
2387bd3fcf0dSAlvin Zhang if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
2388295968d1SFerruh Yigit rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
2389bd3fcf0dSAlvin Zhang if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
2390295968d1SFerruh Yigit rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2391bd3fcf0dSAlvin Zhang if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
2392295968d1SFerruh Yigit rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
2393bd3fcf0dSAlvin Zhang if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
2394295968d1SFerruh Yigit rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
2395bd3fcf0dSAlvin Zhang
2396bd3fcf0dSAlvin Zhang rss_conf->rss_hf |= rss_hf;
2397bd3fcf0dSAlvin Zhang return 0;
2398bd3fcf0dSAlvin Zhang }
2399bd3fcf0dSAlvin Zhang
2400bd3fcf0dSAlvin Zhang static int
eth_igc_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)24015f266d0dSAlvin Zhang eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
24025f266d0dSAlvin Zhang {
24035f266d0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
24045f266d0dSAlvin Zhang struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
24055f266d0dSAlvin Zhang uint32_t vfta;
24065f266d0dSAlvin Zhang uint32_t vid_idx;
24075f266d0dSAlvin Zhang uint32_t vid_bit;
24085f266d0dSAlvin Zhang
24095f266d0dSAlvin Zhang vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK;
24105f266d0dSAlvin Zhang vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK);
24115f266d0dSAlvin Zhang vfta = shadow_vfta->vfta[vid_idx];
24125f266d0dSAlvin Zhang if (on)
24135f266d0dSAlvin Zhang vfta |= vid_bit;
24145f266d0dSAlvin Zhang else
24155f266d0dSAlvin Zhang vfta &= ~vid_bit;
24165f266d0dSAlvin Zhang IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta);
24175f266d0dSAlvin Zhang
24185f266d0dSAlvin Zhang /* update local VFTA copy */
24195f266d0dSAlvin Zhang shadow_vfta->vfta[vid_idx] = vfta;
24205f266d0dSAlvin Zhang
24215f266d0dSAlvin Zhang return 0;
24225f266d0dSAlvin Zhang }
24235f266d0dSAlvin Zhang
24245f266d0dSAlvin Zhang static void
igc_vlan_hw_filter_disable(struct rte_eth_dev * dev)24255f266d0dSAlvin Zhang igc_vlan_hw_filter_disable(struct rte_eth_dev *dev)
24265f266d0dSAlvin Zhang {
24275f266d0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
24285f266d0dSAlvin Zhang igc_read_reg_check_clear_bits(hw, IGC_RCTL,
24295f266d0dSAlvin Zhang IGC_RCTL_CFIEN | IGC_RCTL_VFE);
24305f266d0dSAlvin Zhang }
24315f266d0dSAlvin Zhang
24325f266d0dSAlvin Zhang static void
igc_vlan_hw_filter_enable(struct rte_eth_dev * dev)24335f266d0dSAlvin Zhang igc_vlan_hw_filter_enable(struct rte_eth_dev *dev)
24345f266d0dSAlvin Zhang {
24355f266d0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
24365f266d0dSAlvin Zhang struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
24375f266d0dSAlvin Zhang uint32_t reg_val;
24385f266d0dSAlvin Zhang int i;
24395f266d0dSAlvin Zhang
24405f266d0dSAlvin Zhang /* Filter Table Enable, CFI not used for packet acceptance */
24415f266d0dSAlvin Zhang reg_val = IGC_READ_REG(hw, IGC_RCTL);
24425f266d0dSAlvin Zhang reg_val &= ~IGC_RCTL_CFIEN;
24435f266d0dSAlvin Zhang reg_val |= IGC_RCTL_VFE;
24445f266d0dSAlvin Zhang IGC_WRITE_REG(hw, IGC_RCTL, reg_val);
24455f266d0dSAlvin Zhang
24465f266d0dSAlvin Zhang /* restore VFTA table */
24475f266d0dSAlvin Zhang for (i = 0; i < IGC_VFTA_SIZE; i++)
24485f266d0dSAlvin Zhang IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]);
24495f266d0dSAlvin Zhang }
24505f266d0dSAlvin Zhang
24515f266d0dSAlvin Zhang static void
igc_vlan_hw_strip_disable(struct rte_eth_dev * dev)24525f266d0dSAlvin Zhang igc_vlan_hw_strip_disable(struct rte_eth_dev *dev)
24535f266d0dSAlvin Zhang {
24545f266d0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
24555f266d0dSAlvin Zhang
24565f266d0dSAlvin Zhang igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME);
24575f266d0dSAlvin Zhang }
24585f266d0dSAlvin Zhang
24595f266d0dSAlvin Zhang static void
igc_vlan_hw_strip_enable(struct rte_eth_dev * dev)24605f266d0dSAlvin Zhang igc_vlan_hw_strip_enable(struct rte_eth_dev *dev)
24615f266d0dSAlvin Zhang {
24625f266d0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
24635f266d0dSAlvin Zhang
24645f266d0dSAlvin Zhang igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME);
24655f266d0dSAlvin Zhang }
24665f266d0dSAlvin Zhang
24675f266d0dSAlvin Zhang static int
igc_vlan_hw_extend_disable(struct rte_eth_dev * dev)24685f266d0dSAlvin Zhang igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
24695f266d0dSAlvin Zhang {
24705f266d0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
24711bb4a528SFerruh Yigit uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
24725f266d0dSAlvin Zhang uint32_t ctrl_ext;
24735f266d0dSAlvin Zhang
24745f266d0dSAlvin Zhang ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
24755f266d0dSAlvin Zhang
24765f266d0dSAlvin Zhang /* if extend vlan hasn't been enabled */
24775f266d0dSAlvin Zhang if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
24785f266d0dSAlvin Zhang return 0;
24795f266d0dSAlvin Zhang
24805f266d0dSAlvin Zhang /* Update maximum packet length */
24811bb4a528SFerruh Yigit if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
24825f266d0dSAlvin Zhang PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
24831bb4a528SFerruh Yigit frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
24845f266d0dSAlvin Zhang return -EINVAL;
24855f266d0dSAlvin Zhang }
24861bb4a528SFerruh Yigit IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE);
24875f266d0dSAlvin Zhang
24885f266d0dSAlvin Zhang IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
24895f266d0dSAlvin Zhang return 0;
24905f266d0dSAlvin Zhang }
24915f266d0dSAlvin Zhang
24925f266d0dSAlvin Zhang static int
igc_vlan_hw_extend_enable(struct rte_eth_dev * dev)24935f266d0dSAlvin Zhang igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
24945f266d0dSAlvin Zhang {
24955f266d0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
24961bb4a528SFerruh Yigit uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
24975f266d0dSAlvin Zhang uint32_t ctrl_ext;
24985f266d0dSAlvin Zhang
24995f266d0dSAlvin Zhang ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
25005f266d0dSAlvin Zhang
25015f266d0dSAlvin Zhang /* if extend vlan has been enabled */
25025f266d0dSAlvin Zhang if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
25035f266d0dSAlvin Zhang return 0;
25045f266d0dSAlvin Zhang
25055f266d0dSAlvin Zhang /* Update maximum packet length */
25061bb4a528SFerruh Yigit if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) {
25075f266d0dSAlvin Zhang PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
25081bb4a528SFerruh Yigit frame_size, MAX_RX_JUMBO_FRAME_SIZE);
25095f266d0dSAlvin Zhang return -EINVAL;
25105f266d0dSAlvin Zhang }
25111bb4a528SFerruh Yigit IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
25125f266d0dSAlvin Zhang
25135f266d0dSAlvin Zhang IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
25145f266d0dSAlvin Zhang return 0;
25155f266d0dSAlvin Zhang }
25165f266d0dSAlvin Zhang
25175f266d0dSAlvin Zhang static int
eth_igc_vlan_offload_set(struct rte_eth_dev * dev,int mask)25185f266d0dSAlvin Zhang eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
25195f266d0dSAlvin Zhang {
25205f266d0dSAlvin Zhang struct rte_eth_rxmode *rxmode;
25215f266d0dSAlvin Zhang
25225f266d0dSAlvin Zhang rxmode = &dev->data->dev_conf.rxmode;
2523295968d1SFerruh Yigit if (mask & RTE_ETH_VLAN_STRIP_MASK) {
2524295968d1SFerruh Yigit if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
25255f266d0dSAlvin Zhang igc_vlan_hw_strip_enable(dev);
25265f266d0dSAlvin Zhang else
25275f266d0dSAlvin Zhang igc_vlan_hw_strip_disable(dev);
25285f266d0dSAlvin Zhang }
25295f266d0dSAlvin Zhang
2530295968d1SFerruh Yigit if (mask & RTE_ETH_VLAN_FILTER_MASK) {
2531295968d1SFerruh Yigit if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
25325f266d0dSAlvin Zhang igc_vlan_hw_filter_enable(dev);
25335f266d0dSAlvin Zhang else
25345f266d0dSAlvin Zhang igc_vlan_hw_filter_disable(dev);
25355f266d0dSAlvin Zhang }
25365f266d0dSAlvin Zhang
2537295968d1SFerruh Yigit if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
2538295968d1SFerruh Yigit if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
25395f266d0dSAlvin Zhang return igc_vlan_hw_extend_enable(dev);
25405f266d0dSAlvin Zhang else
25415f266d0dSAlvin Zhang return igc_vlan_hw_extend_disable(dev);
25425f266d0dSAlvin Zhang }
25435f266d0dSAlvin Zhang
25445f266d0dSAlvin Zhang return 0;
25455f266d0dSAlvin Zhang }
25465f266d0dSAlvin Zhang
25475f266d0dSAlvin Zhang static int
eth_igc_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)25485f266d0dSAlvin Zhang eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
25495f266d0dSAlvin Zhang enum rte_vlan_type vlan_type,
25505f266d0dSAlvin Zhang uint16_t tpid)
25515f266d0dSAlvin Zhang {
25525f266d0dSAlvin Zhang struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
25535f266d0dSAlvin Zhang uint32_t reg_val;
25545f266d0dSAlvin Zhang
25555f266d0dSAlvin Zhang /* only outer TPID of double VLAN can be configured*/
2556295968d1SFerruh Yigit if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
25575f266d0dSAlvin Zhang reg_val = IGC_READ_REG(hw, IGC_VET);
25585f266d0dSAlvin Zhang reg_val = (reg_val & (~IGC_VET_EXT)) |
25595f266d0dSAlvin Zhang ((uint32_t)tpid << IGC_VET_EXT_SHIFT);
25605f266d0dSAlvin Zhang IGC_WRITE_REG(hw, IGC_VET, reg_val);
25615f266d0dSAlvin Zhang
25625f266d0dSAlvin Zhang return 0;
25635f266d0dSAlvin Zhang }
25645f266d0dSAlvin Zhang
25655f266d0dSAlvin Zhang /* all other TPID values are read-only*/
25665f266d0dSAlvin Zhang PMD_DRV_LOG(ERR, "Not supported");
25675f266d0dSAlvin Zhang return -ENOTSUP;
25685f266d0dSAlvin Zhang }
25695f266d0dSAlvin Zhang
25705f266d0dSAlvin Zhang static int
eth_igc_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)257166fde1b9SAlvin Zhang eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
257266fde1b9SAlvin Zhang struct rte_pci_device *pci_dev)
257366fde1b9SAlvin Zhang {
257466fde1b9SAlvin Zhang PMD_INIT_FUNC_TRACE();
25758cb7c57dSAlvin Zhang return rte_eth_dev_pci_generic_probe(pci_dev,
25768cb7c57dSAlvin Zhang sizeof(struct igc_adapter), eth_igc_dev_init);
257766fde1b9SAlvin Zhang }
257866fde1b9SAlvin Zhang
257966fde1b9SAlvin Zhang static int
eth_igc_pci_remove(struct rte_pci_device * pci_dev)258066fde1b9SAlvin Zhang eth_igc_pci_remove(struct rte_pci_device *pci_dev)
258166fde1b9SAlvin Zhang {
258266fde1b9SAlvin Zhang PMD_INIT_FUNC_TRACE();
258366fde1b9SAlvin Zhang return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
258466fde1b9SAlvin Zhang }
258566fde1b9SAlvin Zhang
258666fde1b9SAlvin Zhang static struct rte_pci_driver rte_igc_pmd = {
258766fde1b9SAlvin Zhang .id_table = pci_id_igc_map,
258866fde1b9SAlvin Zhang .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
258966fde1b9SAlvin Zhang .probe = eth_igc_pci_probe,
259066fde1b9SAlvin Zhang .remove = eth_igc_pci_remove,
259166fde1b9SAlvin Zhang };
259266fde1b9SAlvin Zhang
259366fde1b9SAlvin Zhang RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
259466fde1b9SAlvin Zhang RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
259566fde1b9SAlvin Zhang RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");
2596