xref: /f-stack/dpdk/drivers/net/igc/base/igc_base.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #include "igc_hw.h"
6 #include "igc_i225.h"
7 #include "igc_mac.h"
8 #include "igc_base.h"
9 #include "igc_manage.h"
10 
11 /**
12  *  igc_acquire_phy_base - Acquire rights to access PHY
13  *  @hw: pointer to the HW structure
14  *
15  *  Acquire access rights to the correct PHY.
16  **/
igc_acquire_phy_base(struct igc_hw * hw)17 s32 igc_acquire_phy_base(struct igc_hw *hw)
18 {
19 	u16 mask = IGC_SWFW_PHY0_SM;
20 
21 	DEBUGFUNC("igc_acquire_phy_base");
22 
23 	if (hw->bus.func == IGC_FUNC_1)
24 		mask = IGC_SWFW_PHY1_SM;
25 	else if (hw->bus.func == IGC_FUNC_2)
26 		mask = IGC_SWFW_PHY2_SM;
27 	else if (hw->bus.func == IGC_FUNC_3)
28 		mask = IGC_SWFW_PHY3_SM;
29 
30 	return hw->mac.ops.acquire_swfw_sync(hw, mask);
31 }
32 
33 /**
34  *  igc_release_phy_base - Release rights to access PHY
35  *  @hw: pointer to the HW structure
36  *
37  *  A wrapper to release access rights to the correct PHY.
38  **/
igc_release_phy_base(struct igc_hw * hw)39 void igc_release_phy_base(struct igc_hw *hw)
40 {
41 	u16 mask = IGC_SWFW_PHY0_SM;
42 
43 	DEBUGFUNC("igc_release_phy_base");
44 
45 	if (hw->bus.func == IGC_FUNC_1)
46 		mask = IGC_SWFW_PHY1_SM;
47 	else if (hw->bus.func == IGC_FUNC_2)
48 		mask = IGC_SWFW_PHY2_SM;
49 	else if (hw->bus.func == IGC_FUNC_3)
50 		mask = IGC_SWFW_PHY3_SM;
51 
52 	hw->mac.ops.release_swfw_sync(hw, mask);
53 }
54 
55 /**
56  *  igc_init_hw_base - Initialize hardware
57  *  @hw: pointer to the HW structure
58  *
59  *  This inits the hardware readying it for operation.
60  **/
igc_init_hw_base(struct igc_hw * hw)61 s32 igc_init_hw_base(struct igc_hw *hw)
62 {
63 	struct igc_mac_info *mac = &hw->mac;
64 	s32 ret_val;
65 	u16 i, rar_count = mac->rar_entry_count;
66 
67 	DEBUGFUNC("igc_init_hw_base");
68 
69 	/* Setup the receive address */
70 	igc_init_rx_addrs_generic(hw, rar_count);
71 
72 	/* Zero out the Multicast HASH table */
73 	DEBUGOUT("Zeroing the MTA\n");
74 	for (i = 0; i < mac->mta_reg_count; i++)
75 		IGC_WRITE_REG_ARRAY(hw, IGC_MTA, i, 0);
76 
77 	/* Zero out the Unicast HASH table */
78 	DEBUGOUT("Zeroing the UTA\n");
79 	for (i = 0; i < mac->uta_reg_count; i++)
80 		IGC_WRITE_REG_ARRAY(hw, IGC_UTA, i, 0);
81 
82 	/* Setup link and flow control */
83 	ret_val = mac->ops.setup_link(hw);
84 	/*
85 	 * Clear all of the statistics registers (clear on read).  It is
86 	 * important that we do this after we have tried to establish link
87 	 * because the symbol error count will increment wildly if there
88 	 * is no link.
89 	 */
90 	igc_clear_hw_cntrs_base_generic(hw);
91 
92 	return ret_val;
93 }
94 
95 /**
96  * igc_power_down_phy_copper_base - Remove link during PHY power down
97  * @hw: pointer to the HW structure
98  *
99  * In the case of a PHY power down to save power, or to turn off link during a
100  * driver unload, or wake on lan is not enabled, remove the link.
101  **/
igc_power_down_phy_copper_base(struct igc_hw * hw)102 void igc_power_down_phy_copper_base(struct igc_hw *hw)
103 {
104 	struct igc_phy_info *phy = &hw->phy;
105 
106 	if (!(phy->ops.check_reset_block))
107 		return;
108 
109 	/* If the management interface is not enabled, then power down */
110 	if (!phy->ops.check_reset_block(hw))
111 		igc_power_down_phy_copper(hw);
112 }
113 
114 /**
115  *  igc_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
116  *  @hw: pointer to the HW structure
117  *
118  *  After Rx enable, if manageability is enabled then there is likely some
119  *  bad data at the start of the FIFO and possibly in the DMA FIFO.  This
120  *  function clears the FIFOs and flushes any packets that came in as Rx was
121  *  being enabled.
122  **/
igc_rx_fifo_flush_base(struct igc_hw * hw)123 void igc_rx_fifo_flush_base(struct igc_hw *hw)
124 {
125 	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
126 	int i, ms_wait;
127 
128 	DEBUGFUNC("igc_rx_fifo_flush_base");
129 
130 	/* disable IPv6 options as per hardware errata */
131 	rfctl = IGC_READ_REG(hw, IGC_RFCTL);
132 	rfctl |= IGC_RFCTL_IPV6_EX_DIS;
133 	IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
134 
135 	if (!(IGC_READ_REG(hw, IGC_MANC) & IGC_MANC_RCV_TCO_EN))
136 		return;
137 
138 	/* Disable all Rx queues */
139 	for (i = 0; i < 4; i++) {
140 		rxdctl[i] = IGC_READ_REG(hw, IGC_RXDCTL(i));
141 		IGC_WRITE_REG(hw, IGC_RXDCTL(i),
142 				rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
143 	}
144 	/* Poll all queues to verify they have shut down */
145 	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
146 		msec_delay(1);
147 		rx_enabled = 0;
148 		for (i = 0; i < 4; i++)
149 			rx_enabled |= IGC_READ_REG(hw, IGC_RXDCTL(i));
150 		if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
151 			break;
152 	}
153 
154 	if (ms_wait == 10)
155 		DEBUGOUT("Queue disable timed out after 10ms\n");
156 
157 	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
158 	 * incoming packets are rejected.  Set enable and wait 2ms so that
159 	 * any packet that was coming in as RCTL.EN was set is flushed
160 	 */
161 	IGC_WRITE_REG(hw, IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
162 
163 	rlpml = IGC_READ_REG(hw, IGC_RLPML);
164 	IGC_WRITE_REG(hw, IGC_RLPML, 0);
165 
166 	rctl = IGC_READ_REG(hw, IGC_RCTL);
167 	temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
168 	temp_rctl |= IGC_RCTL_LPE;
169 
170 	IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl);
171 	IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl | IGC_RCTL_EN);
172 	IGC_WRITE_FLUSH(hw);
173 	msec_delay(2);
174 
175 	/* Enable Rx queues that were previously enabled and restore our
176 	 * previous state
177 	 */
178 	for (i = 0; i < 4; i++)
179 		IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl[i]);
180 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
181 	IGC_WRITE_FLUSH(hw);
182 
183 	IGC_WRITE_REG(hw, IGC_RLPML, rlpml);
184 	IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
185 
186 	/* Flush receive errors generated by workaround */
187 	IGC_READ_REG(hw, IGC_ROC);
188 	IGC_READ_REG(hw, IGC_RNBC);
189 	IGC_READ_REG(hw, IGC_MPC);
190 }
191