xref: /freebsd-12.1/sys/dev/ixgbe/if_ix.c (revision 6e69a11a)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 int ixgbe_intr(void *arg);
143 
144 /************************************************************************
145  * Function prototypes
146  ************************************************************************/
147 #if __FreeBSD_version >= 1100036
148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
149 #endif
150 
151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
154 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
155 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
156 
157 static void ixgbe_config_dmac(struct adapter *adapter);
158 static void ixgbe_configure_ivars(struct adapter *adapter);
159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
160                            s8 type);
161 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 static bool ixgbe_sfp_probe(if_ctx_t ctx);
163 
164 static void ixgbe_free_pci_resources(if_ctx_t ctx);
165 
166 static int  ixgbe_msix_link(void *arg);
167 static int  ixgbe_msix_que(void *arg);
168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
169 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
171 
172 static int  ixgbe_setup_interface(if_ctx_t ctx);
173 static void ixgbe_init_device_features(struct adapter *adapter);
174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
175 static void ixgbe_add_media_types(if_ctx_t ctx);
176 static void ixgbe_update_stats_counters(struct adapter *adapter);
177 static void ixgbe_config_link(if_ctx_t ctx);
178 static void ixgbe_get_slot_info(struct adapter *);
179 static void ixgbe_check_wol_support(struct adapter *adapter);
180 static void ixgbe_enable_rx_drop(struct adapter *);
181 static void ixgbe_disable_rx_drop(struct adapter *);
182 
183 static void ixgbe_add_hw_stats(struct adapter *adapter);
184 static int  ixgbe_set_flowcntl(struct adapter *, int);
185 static int  ixgbe_set_advertise(struct adapter *, int);
186 static int  ixgbe_get_advertise(struct adapter *);
187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
188 static void ixgbe_config_gpie(struct adapter *adapter);
189 static void ixgbe_config_delay_values(struct adapter *adapter);
190 
191 /* Sysctl handlers */
192 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
198 #ifdef IXGBE_DEBUG
199 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
200 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
201 #endif
202 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
209 
210 /* Deferred interrupt tasklets */
211 static void ixgbe_handle_msf(void *);
212 static void ixgbe_handle_mod(void *);
213 static void ixgbe_handle_phy(void *);
214 
215 /************************************************************************
216  *  FreeBSD Device Interface Entry Points
217  ************************************************************************/
218 static device_method_t ix_methods[] = {
219 	/* Device interface */
220 	DEVMETHOD(device_register, ixgbe_register),
221 	DEVMETHOD(device_probe, iflib_device_probe),
222 	DEVMETHOD(device_attach, iflib_device_attach),
223 	DEVMETHOD(device_detach, iflib_device_detach),
224 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
225 	DEVMETHOD(device_suspend, iflib_device_suspend),
226 	DEVMETHOD(device_resume, iflib_device_resume),
227 #ifdef PCI_IOV
228 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
229 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
230 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
231 #endif /* PCI_IOV */
232 	DEVMETHOD_END
233 };
234 
235 static driver_t ix_driver = {
236 	"ix", ix_methods, sizeof(struct adapter),
237 };
238 
239 devclass_t ix_devclass;
240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
242 MODULE_DEPEND(ix, pci, 1, 1, 1);
243 MODULE_DEPEND(ix, ether, 1, 1, 1);
244 MODULE_DEPEND(ix, iflib, 1, 1, 1);
245 
246 static device_method_t ixgbe_if_methods[] = {
247 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
248 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
249 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
250 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
251 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
252 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
253 	DEVMETHOD(ifdi_init, ixgbe_if_init),
254 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
255 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
256 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
257 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
258 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
259 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
260 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
262 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
263 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
264 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
265 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
266 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
267 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
268 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
269 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
270 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
271 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
272 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
273 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
274 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
275 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
276 #ifdef PCI_IOV
277 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
278 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
279 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
280 #endif /* PCI_IOV */
281 	DEVMETHOD_END
282 };
283 
284 /*
285  * TUNEABLE PARAMETERS:
286  */
287 
288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
289 static driver_t ixgbe_if_driver = {
290   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
291 };
292 
293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
295     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
296 
297 /* Flow control setting, default to full */
298 static int ixgbe_flow_control = ixgbe_fc_full;
299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
300     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
301 
302 /* Advertise Speed, default to 0 (auto) */
303 static int ixgbe_advertise_speed = 0;
304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
305     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306 
307 /*
308  * Smart speed setting, default to on
309  * this only works as a compile option
310  * right now as its during attach, set
311  * this to 'ixgbe_smart_speed_off' to
312  * disable.
313  */
314 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315 
316 /*
317  * MSI-X should be the default for best performance,
318  * but this allows it to be forced off for testing.
319  */
320 static int ixgbe_enable_msix = 1;
321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
322     "Enable MSI-X interrupts");
323 
324 /*
325  * Defining this on will allow the use
326  * of unsupported SFP+ modules, note that
327  * doing so you are on your own :)
328  */
329 static int allow_unsupported_sfp = FALSE;
330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
331     &allow_unsupported_sfp, 0,
332     "Allow unsupported SFP modules...use at your own risk");
333 
334 /*
335  * Not sure if Flow Director is fully baked,
336  * so we'll default to turning it off.
337  */
338 static int ixgbe_enable_fdir = 0;
339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
340     "Enable Flow Director");
341 
342 /* Receive-Side Scaling */
343 static int ixgbe_enable_rss = 1;
344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
345     "Enable Receive-Side Scaling (RSS)");
346 
347 #if 0
348 /* Keep running tab on them for sanity check */
349 static int ixgbe_total_ports;
350 #endif
351 
352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
353 
354 /*
355  * For Flow Director: this is the number of TX packets we sample
356  * for the filter pool, this means every 20th packet will be probed.
357  *
358  * This feature can be disabled by setting this to 0.
359  */
360 static int atr_sample_rate = 20;
361 
362 extern struct if_txrx ixgbe_txrx;
363 
364 static struct if_shared_ctx ixgbe_sctx_init = {
365 	.isc_magic = IFLIB_MAGIC,
366 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
367 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368 	.isc_tx_maxsegsize = PAGE_SIZE,
369 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
370 	.isc_tso_maxsegsize = PAGE_SIZE,
371 	.isc_rx_maxsize = PAGE_SIZE*4,
372 	.isc_rx_nsegments = 1,
373 	.isc_rx_maxsegsize = PAGE_SIZE*4,
374 	.isc_nfl = 1,
375 	.isc_ntxqs = 1,
376 	.isc_nrxqs = 1,
377 
378 	.isc_admin_intrcnt = 1,
379 	.isc_vendor_info = ixgbe_vendor_info_array,
380 	.isc_driver_version = ixgbe_driver_version,
381 	.isc_driver = &ixgbe_if_driver,
382 	.isc_flags = IFLIB_TSO_INIT_IP,
383 
384 	.isc_nrxd_min = {MIN_RXD},
385 	.isc_ntxd_min = {MIN_TXD},
386 	.isc_nrxd_max = {MAX_RXD},
387 	.isc_ntxd_max = {MAX_TXD},
388 	.isc_nrxd_default = {DEFAULT_RXD},
389 	.isc_ntxd_default = {DEFAULT_TXD},
390 };
391 
392 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
393 
394 /************************************************************************
395  * ixgbe_if_tx_queues_alloc
396  ************************************************************************/
397 static int
ixgbe_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)398 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
399                          int ntxqs, int ntxqsets)
400 {
401 	struct adapter     *adapter = iflib_get_softc(ctx);
402 	if_softc_ctx_t     scctx = adapter->shared;
403 	struct ix_tx_queue *que;
404 	int                i, j, error;
405 
406 	MPASS(adapter->num_tx_queues > 0);
407 	MPASS(adapter->num_tx_queues == ntxqsets);
408 	MPASS(ntxqs == 1);
409 
410 	/* Allocate queue structure memory */
411 	adapter->tx_queues =
412 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
413 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
414 	if (!adapter->tx_queues) {
415 		device_printf(iflib_get_dev(ctx),
416 		    "Unable to allocate TX ring memory\n");
417 		return (ENOMEM);
418 	}
419 
420 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
421 		struct tx_ring *txr = &que->txr;
422 
423 		/* In case SR-IOV is enabled, align the index properly */
424 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
425 		    i);
426 
427 		txr->adapter = que->adapter = adapter;
428 		adapter->active_queues |= (u64)1 << txr->me;
429 
430 		/* Allocate report status array */
431 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
432 		if (txr->tx_rsq == NULL) {
433 			error = ENOMEM;
434 			goto fail;
435 		}
436 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
437 			txr->tx_rsq[j] = QIDX_INVALID;
438 		/* get the virtual and physical address of the hardware queues */
439 		txr->tail = IXGBE_TDT(txr->me);
440 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
441 		txr->tx_paddr = paddrs[i];
442 
443 		txr->bytes = 0;
444 		txr->total_packets = 0;
445 
446 		/* Set the rate at which we sample packets */
447 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
448 			txr->atr_sample = atr_sample_rate;
449 
450 	}
451 
452 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
453 	    adapter->num_tx_queues);
454 
455 	return (0);
456 
457 fail:
458 	ixgbe_if_queues_free(ctx);
459 
460 	return (error);
461 } /* ixgbe_if_tx_queues_alloc */
462 
463 /************************************************************************
464  * ixgbe_if_rx_queues_alloc
465  ************************************************************************/
466 static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)467 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
468                          int nrxqs, int nrxqsets)
469 {
470 	struct adapter     *adapter = iflib_get_softc(ctx);
471 	struct ix_rx_queue *que;
472 	int                i;
473 
474 	MPASS(adapter->num_rx_queues > 0);
475 	MPASS(adapter->num_rx_queues == nrxqsets);
476 	MPASS(nrxqs == 1);
477 
478 	/* Allocate queue structure memory */
479 	adapter->rx_queues =
480 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
481 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
482 	if (!adapter->rx_queues) {
483 		device_printf(iflib_get_dev(ctx),
484 		    "Unable to allocate TX ring memory\n");
485 		return (ENOMEM);
486 	}
487 
488 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
489 		struct rx_ring *rxr = &que->rxr;
490 
491 		/* In case SR-IOV is enabled, align the index properly */
492 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
493 		    i);
494 
495 		rxr->adapter = que->adapter = adapter;
496 
497 		/* get the virtual and physical address of the hw queues */
498 		rxr->tail = IXGBE_RDT(rxr->me);
499 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
500 		rxr->rx_paddr = paddrs[i];
501 		rxr->bytes = 0;
502 		rxr->que = que;
503 	}
504 
505 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
506 	    adapter->num_rx_queues);
507 
508 	return (0);
509 } /* ixgbe_if_rx_queues_alloc */
510 
511 /************************************************************************
512  * ixgbe_if_queues_free
513  ************************************************************************/
514 static void
ixgbe_if_queues_free(if_ctx_t ctx)515 ixgbe_if_queues_free(if_ctx_t ctx)
516 {
517 	struct adapter     *adapter = iflib_get_softc(ctx);
518 	struct ix_tx_queue *tx_que = adapter->tx_queues;
519 	struct ix_rx_queue *rx_que = adapter->rx_queues;
520 	int                i;
521 
522 	if (tx_que != NULL) {
523 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
524 			struct tx_ring *txr = &tx_que->txr;
525 			if (txr->tx_rsq == NULL)
526 				break;
527 
528 			free(txr->tx_rsq, M_IXGBE);
529 			txr->tx_rsq = NULL;
530 		}
531 
532 		free(adapter->tx_queues, M_IXGBE);
533 		adapter->tx_queues = NULL;
534 	}
535 	if (rx_que != NULL) {
536 		free(adapter->rx_queues, M_IXGBE);
537 		adapter->rx_queues = NULL;
538 	}
539 } /* ixgbe_if_queues_free */
540 
541 /************************************************************************
542  * ixgbe_initialize_rss_mapping
543  ************************************************************************/
544 static void
ixgbe_initialize_rss_mapping(struct adapter * adapter)545 ixgbe_initialize_rss_mapping(struct adapter *adapter)
546 {
547 	struct ixgbe_hw *hw = &adapter->hw;
548 	u32             reta = 0, mrqc, rss_key[10];
549 	int             queue_id, table_size, index_mult;
550 	int             i, j;
551 	u32             rss_hash_config;
552 
553 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
554 		/* Fetch the configured RSS key */
555 		rss_getkey((uint8_t *)&rss_key);
556 	} else {
557 		/* set up random bits */
558 		arc4rand(&rss_key, sizeof(rss_key), 0);
559 	}
560 
561 	/* Set multiplier for RETA setup and table size based on MAC */
562 	index_mult = 0x1;
563 	table_size = 128;
564 	switch (adapter->hw.mac.type) {
565 	case ixgbe_mac_82598EB:
566 		index_mult = 0x11;
567 		break;
568 	case ixgbe_mac_X550:
569 	case ixgbe_mac_X550EM_x:
570 	case ixgbe_mac_X550EM_a:
571 		table_size = 512;
572 		break;
573 	default:
574 		break;
575 	}
576 
577 	/* Set up the redirection table */
578 	for (i = 0, j = 0; i < table_size; i++, j++) {
579 		if (j == adapter->num_rx_queues)
580 			j = 0;
581 
582 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
583 			/*
584 			 * Fetch the RSS bucket id for the given indirection
585 			 * entry. Cap it at the number of configured buckets
586 			 * (which is num_rx_queues.)
587 			 */
588 			queue_id = rss_get_indirection_to_bucket(i);
589 			queue_id = queue_id % adapter->num_rx_queues;
590 		} else
591 			queue_id = (j * index_mult);
592 
593 		/*
594 		 * The low 8 bits are for hash value (n+0);
595 		 * The next 8 bits are for hash value (n+1), etc.
596 		 */
597 		reta = reta >> 8;
598 		reta = reta | (((uint32_t)queue_id) << 24);
599 		if ((i & 3) == 3) {
600 			if (i < 128)
601 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
602 			else
603 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
604 				    reta);
605 			reta = 0;
606 		}
607 	}
608 
609 	/* Now fill our hash function seeds */
610 	for (i = 0; i < 10; i++)
611 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
612 
613 	/* Perform hash on these packet types */
614 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
615 		rss_hash_config = rss_gethashconfig();
616 	else {
617 		/*
618 		 * Disable UDP - IP fragments aren't currently being handled
619 		 * and so we end up with a mix of 2-tuple and 4-tuple
620 		 * traffic.
621 		 */
622 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
623 		                | RSS_HASHTYPE_RSS_TCP_IPV4
624 		                | RSS_HASHTYPE_RSS_IPV6
625 		                | RSS_HASHTYPE_RSS_TCP_IPV6
626 		                | RSS_HASHTYPE_RSS_IPV6_EX
627 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
628 	}
629 
630 	mrqc = IXGBE_MRQC_RSSEN;
631 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
632 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
633 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
634 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
635 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
636 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
637 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
638 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
639 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
640 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
649 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
650 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
651 } /* ixgbe_initialize_rss_mapping */
652 
653 /************************************************************************
654  * ixgbe_initialize_receive_units - Setup receive registers and features.
655  ************************************************************************/
656 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
657 
658 static void
ixgbe_initialize_receive_units(if_ctx_t ctx)659 ixgbe_initialize_receive_units(if_ctx_t ctx)
660 {
661 	struct adapter     *adapter = iflib_get_softc(ctx);
662 	if_softc_ctx_t     scctx = adapter->shared;
663 	struct ixgbe_hw    *hw = &adapter->hw;
664 	struct ifnet       *ifp = iflib_get_ifp(ctx);
665 	struct ix_rx_queue *que;
666 	int                i, j;
667 	u32                bufsz, fctrl, srrctl, rxcsum;
668 	u32                hlreg;
669 
670 	/*
671 	 * Make sure receives are disabled while
672 	 * setting up the descriptor ring
673 	 */
674 	ixgbe_disable_rx(hw);
675 
676 	/* Enable broadcasts */
677 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
678 	fctrl |= IXGBE_FCTRL_BAM;
679 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
680 		fctrl |= IXGBE_FCTRL_DPF;
681 		fctrl |= IXGBE_FCTRL_PMCF;
682 	}
683 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
684 
685 	/* Set for Jumbo Frames? */
686 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
687 	if (ifp->if_mtu > ETHERMTU)
688 		hlreg |= IXGBE_HLREG0_JUMBOEN;
689 	else
690 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
691 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
692 
693 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
694 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
695 
696 	/* Setup the Base and Length of the Rx Descriptor Ring */
697 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
698 		struct rx_ring *rxr = &que->rxr;
699 		u64            rdba = rxr->rx_paddr;
700 
701 		j = rxr->me;
702 
703 		/* Setup the Base and Length of the Rx Descriptor Ring */
704 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
705 		    (rdba & 0x00000000ffffffffULL));
706 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
707 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
708 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
709 
710 		/* Set up the SRRCTL register */
711 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
712 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
713 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
714 		srrctl |= bufsz;
715 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
716 
717 		/*
718 		 * Set DROP_EN iff we have no flow control and >1 queue.
719 		 * Note that srrctl was cleared shortly before during reset,
720 		 * so we do not need to clear the bit, but do it just in case
721 		 * this code is moved elsewhere.
722 		 */
723 		if (adapter->num_rx_queues > 1 &&
724 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
725 			srrctl |= IXGBE_SRRCTL_DROP_EN;
726 		} else {
727 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
728 		}
729 
730 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
731 
732 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
733 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
734 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
735 
736 		/* Set the driver rx tail address */
737 		rxr->tail =  IXGBE_RDT(rxr->me);
738 	}
739 
740 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
741 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
742 		            | IXGBE_PSRTYPE_UDPHDR
743 		            | IXGBE_PSRTYPE_IPV4HDR
744 		            | IXGBE_PSRTYPE_IPV6HDR;
745 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
746 	}
747 
748 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
749 
750 	ixgbe_initialize_rss_mapping(adapter);
751 
752 	if (adapter->num_rx_queues > 1) {
753 		/* RSS and RX IPP Checksum are mutually exclusive */
754 		rxcsum |= IXGBE_RXCSUM_PCSD;
755 	}
756 
757 	if (ifp->if_capenable & IFCAP_RXCSUM)
758 		rxcsum |= IXGBE_RXCSUM_PCSD;
759 
760 	/* This is useful for calculating UDP/IP fragment checksums */
761 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
762 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
763 
764 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
765 
766 } /* ixgbe_initialize_receive_units */
767 
768 /************************************************************************
769  * ixgbe_initialize_transmit_units - Enable transmit units.
770  ************************************************************************/
771 static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)772 ixgbe_initialize_transmit_units(if_ctx_t ctx)
773 {
774 	struct adapter     *adapter = iflib_get_softc(ctx);
775 	struct ixgbe_hw    *hw = &adapter->hw;
776 	if_softc_ctx_t     scctx = adapter->shared;
777 	struct ix_tx_queue *que;
778 	int i;
779 
780 	/* Setup the Base and Length of the Tx Descriptor Ring */
781 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
782 	    i++, que++) {
783 		struct tx_ring	   *txr = &que->txr;
784 		u64 tdba = txr->tx_paddr;
785 		u32 txctrl = 0;
786 		int j = txr->me;
787 
788 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
789 		    (tdba & 0x00000000ffffffffULL));
790 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
791 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
792 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
793 
794 		/* Setup the HW Tx Head and Tail descriptor pointers */
795 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
796 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
797 
798 		/* Cache the tail address */
799 		txr->tx_rs_cidx = txr->tx_rs_pidx;
800 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
801 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
802 			txr->tx_rsq[k] = QIDX_INVALID;
803 
804 		/* Disable Head Writeback */
805 		/*
806 		 * Note: for X550 series devices, these registers are actually
807 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
808 		 * fields remain the same.
809 		 */
810 		switch (hw->mac.type) {
811 		case ixgbe_mac_82598EB:
812 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
813 			break;
814 		default:
815 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
816 			break;
817 		}
818 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
819 		switch (hw->mac.type) {
820 		case ixgbe_mac_82598EB:
821 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
822 			break;
823 		default:
824 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
825 			break;
826 		}
827 
828 	}
829 
830 	if (hw->mac.type != ixgbe_mac_82598EB) {
831 		u32 dmatxctl, rttdcs;
832 
833 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
834 		dmatxctl |= IXGBE_DMATXCTL_TE;
835 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
836 		/* Disable arbiter to set MTQC */
837 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
838 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
839 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
840 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
841 		    ixgbe_get_mtqc(adapter->iov_mode));
842 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
843 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
844 	}
845 
846 } /* ixgbe_initialize_transmit_units */
847 
848 /************************************************************************
849  * ixgbe_register
850  ************************************************************************/
851 static void *
ixgbe_register(device_t dev)852 ixgbe_register(device_t dev)
853 {
854 	return (ixgbe_sctx);
855 } /* ixgbe_register */
856 
857 /************************************************************************
858  * ixgbe_if_attach_pre - Device initialization routine, part 1
859  *
860  *   Called when the driver is being loaded.
861  *   Identifies the type of hardware, initializes the hardware,
862  *   and initializes iflib structures.
863  *
864  *   return 0 on success, positive on failure
865  ************************************************************************/
866 static int
ixgbe_if_attach_pre(if_ctx_t ctx)867 ixgbe_if_attach_pre(if_ctx_t ctx)
868 {
869 	struct adapter  *adapter;
870 	device_t        dev;
871 	if_softc_ctx_t  scctx;
872 	struct ixgbe_hw *hw;
873 	int             error = 0;
874 	u32             ctrl_ext;
875 
876 	INIT_DEBUGOUT("ixgbe_attach: begin");
877 
878 	/* Allocate, clear, and link in our adapter structure */
879 	dev = iflib_get_dev(ctx);
880 	adapter = iflib_get_softc(ctx);
881 	adapter->hw.back = adapter;
882 	adapter->ctx = ctx;
883 	adapter->dev = dev;
884 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
885 	adapter->media = iflib_get_media(ctx);
886 	hw = &adapter->hw;
887 
888 	/* Determine hardware revision */
889 	hw->vendor_id = pci_get_vendor(dev);
890 	hw->device_id = pci_get_device(dev);
891 	hw->revision_id = pci_get_revid(dev);
892 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
893 	hw->subsystem_device_id = pci_get_subdevice(dev);
894 
895 	/* Do base PCI setup - map BAR0 */
896 	if (ixgbe_allocate_pci_resources(ctx)) {
897 		device_printf(dev, "Allocation of PCI resources failed\n");
898 		return (ENXIO);
899 	}
900 
901 	/* let hardware know driver is loaded */
902 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
903 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
904 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
905 
906 	/*
907 	 * Initialize the shared code
908 	 */
909 	if (ixgbe_init_shared_code(hw) != 0) {
910 		device_printf(dev, "Unable to initialize the shared code\n");
911 		error = ENXIO;
912 		goto err_pci;
913 	}
914 
915 	if (hw->mbx.ops.init_params)
916 		hw->mbx.ops.init_params(hw);
917 
918 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
919 
920 	if (hw->mac.type != ixgbe_mac_82598EB)
921 		hw->phy.smart_speed = ixgbe_smart_speed;
922 
923 	ixgbe_init_device_features(adapter);
924 
925 	/* Enable WoL (if supported) */
926 	ixgbe_check_wol_support(adapter);
927 
928 	/* Verify adapter fan is still functional (if applicable) */
929 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
930 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
931 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
932 	}
933 
934 	/* Ensure SW/FW semaphore is free */
935 	ixgbe_init_swfw_semaphore(hw);
936 
937 	/* Set an initial default flow control value */
938 	hw->fc.requested_mode = ixgbe_flow_control;
939 
940 	hw->phy.reset_if_overtemp = TRUE;
941 	error = ixgbe_reset_hw(hw);
942 	hw->phy.reset_if_overtemp = FALSE;
943 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
944 		/*
945 		 * No optics in this port, set up
946 		 * so the timer routine will probe
947 		 * for later insertion.
948 		 */
949 		adapter->sfp_probe = TRUE;
950 		error = 0;
951 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
952 		device_printf(dev, "Unsupported SFP+ module detected!\n");
953 		error = EIO;
954 		goto err_pci;
955 	} else if (error) {
956 		device_printf(dev, "Hardware initialization failed\n");
957 		error = EIO;
958 		goto err_pci;
959 	}
960 
961 	/* Make sure we have a good EEPROM before we read from it */
962 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
963 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
964 		error = EIO;
965 		goto err_pci;
966 	}
967 
968 	error = ixgbe_start_hw(hw);
969 	switch (error) {
970 	case IXGBE_ERR_EEPROM_VERSION:
971 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
972 		break;
973 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
974 		device_printf(dev, "Unsupported SFP+ Module\n");
975 		error = EIO;
976 		goto err_pci;
977 	case IXGBE_ERR_SFP_NOT_PRESENT:
978 		device_printf(dev, "No SFP+ Module found\n");
979 		/* falls thru */
980 	default:
981 		break;
982 	}
983 
984 	/* Most of the iflib initialization... */
985 
986 	iflib_set_mac(ctx, hw->mac.addr);
987 	switch (adapter->hw.mac.type) {
988 	case ixgbe_mac_X550:
989 	case ixgbe_mac_X550EM_x:
990 	case ixgbe_mac_X550EM_a:
991 		scctx->isc_rss_table_size = 512;
992 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
993 		break;
994 	default:
995 		scctx->isc_rss_table_size = 128;
996 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
997 	}
998 
999 	/* Allow legacy interrupts */
1000 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1001 
1002 	scctx->isc_txqsizes[0] =
1003 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1004 	    sizeof(u32), DBA_ALIGN),
1005 	scctx->isc_rxqsizes[0] =
1006 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1007 	    DBA_ALIGN);
1008 
1009 	/* XXX */
1010 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1011 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1012 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1013 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1014 	} else {
1015 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1016 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1017 	}
1018 
1019 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1020 
1021 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1022 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1023 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1024 
1025 	scctx->isc_txrx = &ixgbe_txrx;
1026 
1027 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1028 
1029 	return (0);
1030 
1031 err_pci:
1032 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1033 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1034 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1035 	ixgbe_free_pci_resources(ctx);
1036 
1037 	return (error);
1038 } /* ixgbe_if_attach_pre */
1039 
1040  /*********************************************************************
1041  * ixgbe_if_attach_post - Device initialization routine, part 2
1042  *
1043  *   Called during driver load, but after interrupts and
1044  *   resources have been allocated and configured.
1045  *   Sets up some data structures not relevant to iflib.
1046  *
1047  *   return 0 on success, positive on failure
1048  *********************************************************************/
1049 static int
ixgbe_if_attach_post(if_ctx_t ctx)1050 ixgbe_if_attach_post(if_ctx_t ctx)
1051 {
1052 	device_t dev;
1053 	struct adapter  *adapter;
1054 	struct ixgbe_hw *hw;
1055 	int             error = 0;
1056 
1057 	dev = iflib_get_dev(ctx);
1058 	adapter = iflib_get_softc(ctx);
1059 	hw = &adapter->hw;
1060 
1061 
1062 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1063 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1064 		device_printf(dev, "Device does not support legacy interrupts");
1065 		error = ENXIO;
1066 		goto err;
1067 	}
1068 
1069 	/* Allocate multicast array memory. */
1070 	adapter->mta = malloc(sizeof(*adapter->mta) *
1071 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1072 	if (adapter->mta == NULL) {
1073 		device_printf(dev, "Can not allocate multicast setup array\n");
1074 		error = ENOMEM;
1075 		goto err;
1076 	}
1077 
1078 	/* hw.ix defaults init */
1079 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1080 
1081 	/* Enable the optics for 82599 SFP+ fiber */
1082 	ixgbe_enable_tx_laser(hw);
1083 
1084 	/* Enable power to the phy. */
1085 	ixgbe_set_phy_power(hw, TRUE);
1086 
1087 	ixgbe_initialize_iov(adapter);
1088 
1089 	error = ixgbe_setup_interface(ctx);
1090 	if (error) {
1091 		device_printf(dev, "Interface setup failed: %d\n", error);
1092 		goto err;
1093 	}
1094 
1095 	ixgbe_if_update_admin_status(ctx);
1096 
1097 	/* Initialize statistics */
1098 	ixgbe_update_stats_counters(adapter);
1099 	ixgbe_add_hw_stats(adapter);
1100 
1101 	/* Check PCIE slot type/speed/width */
1102 	ixgbe_get_slot_info(adapter);
1103 
1104 	/*
1105 	 * Do time init and sysctl init here, but
1106 	 * only on the first port of a bypass adapter.
1107 	 */
1108 	ixgbe_bypass_init(adapter);
1109 
1110 	/* Set an initial dmac value */
1111 	adapter->dmac = 0;
1112 	/* Set initial advertised speeds (if applicable) */
1113 	adapter->advertise = ixgbe_get_advertise(adapter);
1114 
1115 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1116 		ixgbe_define_iov_schemas(dev, &error);
1117 
1118 	/* Add sysctls */
1119 	ixgbe_add_device_sysctls(ctx);
1120 
1121 	return (0);
1122 err:
1123 	return (error);
1124 } /* ixgbe_if_attach_post */
1125 
1126 /************************************************************************
1127  * ixgbe_check_wol_support
1128  *
1129  *   Checks whether the adapter's ports are capable of
1130  *   Wake On LAN by reading the adapter's NVM.
1131  *
1132  *   Sets each port's hw->wol_enabled value depending
1133  *   on the value read here.
1134  ************************************************************************/
1135 static void
ixgbe_check_wol_support(struct adapter * adapter)1136 ixgbe_check_wol_support(struct adapter *adapter)
1137 {
1138 	struct ixgbe_hw *hw = &adapter->hw;
1139 	u16             dev_caps = 0;
1140 
1141 	/* Find out WoL support for port */
1142 	adapter->wol_support = hw->wol_enabled = 0;
1143 	ixgbe_get_device_caps(hw, &dev_caps);
1144 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1145 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1146 	     hw->bus.func == 0))
1147 		adapter->wol_support = hw->wol_enabled = 1;
1148 
1149 	/* Save initial wake up filter configuration */
1150 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1151 
1152 	return;
1153 } /* ixgbe_check_wol_support */
1154 
1155 /************************************************************************
1156  * ixgbe_setup_interface
1157  *
1158  *   Setup networking device structure and register an interface.
1159  ************************************************************************/
1160 static int
ixgbe_setup_interface(if_ctx_t ctx)1161 ixgbe_setup_interface(if_ctx_t ctx)
1162 {
1163 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1164 	struct adapter *adapter = iflib_get_softc(ctx);
1165 
1166 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1167 
1168 	if_setbaudrate(ifp, IF_Gbps(10));
1169 
1170 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1171 
1172 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1173 
1174 	ixgbe_add_media_types(ctx);
1175 
1176 	/* Autoselect media by default */
1177 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1178 
1179 	return (0);
1180 } /* ixgbe_setup_interface */
1181 
1182 /************************************************************************
1183  * ixgbe_if_get_counter
1184  ************************************************************************/
1185 static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx,ift_counter cnt)1186 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1187 {
1188 	struct adapter *adapter = iflib_get_softc(ctx);
1189 	if_t           ifp = iflib_get_ifp(ctx);
1190 
1191 	switch (cnt) {
1192 	case IFCOUNTER_IPACKETS:
1193 		return (adapter->ipackets);
1194 	case IFCOUNTER_OPACKETS:
1195 		return (adapter->opackets);
1196 	case IFCOUNTER_IBYTES:
1197 		return (adapter->ibytes);
1198 	case IFCOUNTER_OBYTES:
1199 		return (adapter->obytes);
1200 	case IFCOUNTER_IMCASTS:
1201 		return (adapter->imcasts);
1202 	case IFCOUNTER_OMCASTS:
1203 		return (adapter->omcasts);
1204 	case IFCOUNTER_COLLISIONS:
1205 		return (0);
1206 	case IFCOUNTER_IQDROPS:
1207 		return (adapter->iqdrops);
1208 	case IFCOUNTER_OQDROPS:
1209 		return (0);
1210 	case IFCOUNTER_IERRORS:
1211 		return (adapter->ierrors);
1212 	default:
1213 		return (if_get_counter_default(ifp, cnt));
1214 	}
1215 } /* ixgbe_if_get_counter */
1216 
1217 /************************************************************************
1218  * ixgbe_if_i2c_req
1219  ************************************************************************/
1220 static int
ixgbe_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1221 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1222 {
1223 	struct adapter		*adapter = iflib_get_softc(ctx);
1224 	struct ixgbe_hw 	*hw = &adapter->hw;
1225 	int 			i;
1226 
1227 
1228 	if (hw->phy.ops.read_i2c_byte == NULL)
1229 		return (ENXIO);
1230 	for (i = 0; i < req->len; i++)
1231 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1232 		    req->dev_addr, &req->data[i]);
1233 	return (0);
1234 } /* ixgbe_if_i2c_req */
1235 
1236 /************************************************************************
1237  * ixgbe_add_media_types
1238  ************************************************************************/
1239 static void
ixgbe_add_media_types(if_ctx_t ctx)1240 ixgbe_add_media_types(if_ctx_t ctx)
1241 {
1242 	struct adapter  *adapter = iflib_get_softc(ctx);
1243 	struct ixgbe_hw *hw = &adapter->hw;
1244 	device_t        dev = iflib_get_dev(ctx);
1245 	u64             layer;
1246 
1247 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1248 
1249 	/* Media types with matching FreeBSD media defines */
1250 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1251 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1252 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1253 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1254 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1255 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1256 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1257 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1258 
1259 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1260 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1261 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1262 		    NULL);
1263 
1264 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1265 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1266 		if (hw->phy.multispeed_fiber)
1267 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1268 			    NULL);
1269 	}
1270 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1271 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1272 		if (hw->phy.multispeed_fiber)
1273 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1274 			    NULL);
1275 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1276 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1277 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1278 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1279 
1280 #ifdef IFM_ETH_XTYPE
1281 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1282 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1283 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1284 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1285 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1286 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1287 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1288 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1289 #else
1290 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1291 		device_printf(dev, "Media supported: 10GbaseKR\n");
1292 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1293 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1294 	}
1295 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1296 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1297 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1298 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1299 	}
1300 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1301 		device_printf(dev, "Media supported: 1000baseKX\n");
1302 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1303 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1304 	}
1305 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1306 		device_printf(dev, "Media supported: 2500baseKX\n");
1307 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1308 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1309 	}
1310 #endif
1311 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1312 		device_printf(dev, "Media supported: 1000baseBX\n");
1313 
1314 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1315 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1316 		    0, NULL);
1317 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1318 	}
1319 
1320 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1321 } /* ixgbe_add_media_types */
1322 
1323 /************************************************************************
1324  * ixgbe_is_sfp
1325  ************************************************************************/
1326 static inline bool
ixgbe_is_sfp(struct ixgbe_hw * hw)1327 ixgbe_is_sfp(struct ixgbe_hw *hw)
1328 {
1329 	switch (hw->mac.type) {
1330 	case ixgbe_mac_82598EB:
1331 		if (hw->phy.type == ixgbe_phy_nl)
1332 			return (TRUE);
1333 		return (FALSE);
1334 	case ixgbe_mac_82599EB:
1335 		switch (hw->mac.ops.get_media_type(hw)) {
1336 		case ixgbe_media_type_fiber:
1337 		case ixgbe_media_type_fiber_qsfp:
1338 			return (TRUE);
1339 		default:
1340 			return (FALSE);
1341 		}
1342 	case ixgbe_mac_X550EM_x:
1343 	case ixgbe_mac_X550EM_a:
1344 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1345 			return (TRUE);
1346 		return (FALSE);
1347 	default:
1348 		return (FALSE);
1349 	}
1350 } /* ixgbe_is_sfp */
1351 
1352 /************************************************************************
1353  * ixgbe_config_link
1354  ************************************************************************/
1355 static void
ixgbe_config_link(if_ctx_t ctx)1356 ixgbe_config_link(if_ctx_t ctx)
1357 {
1358 	struct adapter  *adapter = iflib_get_softc(ctx);
1359 	struct ixgbe_hw *hw = &adapter->hw;
1360 	u32             autoneg, err = 0;
1361 	bool            sfp, negotiate;
1362 
1363 	sfp = ixgbe_is_sfp(hw);
1364 
1365 	if (sfp) {
1366 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1367 		iflib_admin_intr_deferred(ctx);
1368 	} else {
1369 		if (hw->mac.ops.check_link)
1370 			err = ixgbe_check_link(hw, &adapter->link_speed,
1371 			    &adapter->link_up, FALSE);
1372 		if (err)
1373 			return;
1374 		autoneg = hw->phy.autoneg_advertised;
1375 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1376 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1377 			    &negotiate);
1378 		if (err)
1379 			return;
1380 		if (hw->mac.ops.setup_link)
1381 			err = hw->mac.ops.setup_link(hw, autoneg,
1382 			    adapter->link_up);
1383 	}
1384 } /* ixgbe_config_link */
1385 
1386 /************************************************************************
1387  * ixgbe_update_stats_counters - Update board statistics counters.
1388  ************************************************************************/
1389 static void
ixgbe_update_stats_counters(struct adapter * adapter)1390 ixgbe_update_stats_counters(struct adapter *adapter)
1391 {
1392 	struct ixgbe_hw       *hw = &adapter->hw;
1393 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1394 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1395 	u64                   total_missed_rx = 0;
1396 
1397 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1398 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1399 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1400 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1401 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1402 
1403 	for (int i = 0; i < 16; i++) {
1404 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1405 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1406 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1407 	}
1408 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1409 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1410 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1411 
1412 	/* Hardware workaround, gprc counts missed packets */
1413 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1414 	stats->gprc -= missed_rx;
1415 
1416 	if (hw->mac.type != ixgbe_mac_82598EB) {
1417 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1418 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1419 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1420 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1421 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1422 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1423 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1424 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1425 	} else {
1426 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1427 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1428 		/* 82598 only has a counter in the high register */
1429 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1430 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1431 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1432 	}
1433 
1434 	/*
1435 	 * Workaround: mprc hardware is incorrectly counting
1436 	 * broadcasts, so for now we subtract those.
1437 	 */
1438 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1439 	stats->bprc += bprc;
1440 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1441 	if (hw->mac.type == ixgbe_mac_82598EB)
1442 		stats->mprc -= bprc;
1443 
1444 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1445 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1446 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1447 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1448 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1449 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1450 
1451 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1452 	stats->lxontxc += lxon;
1453 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1454 	stats->lxofftxc += lxoff;
1455 	total = lxon + lxoff;
1456 
1457 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1458 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1459 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1460 	stats->gptc -= total;
1461 	stats->mptc -= total;
1462 	stats->ptc64 -= total;
1463 	stats->gotc -= total * ETHER_MIN_LEN;
1464 
1465 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1466 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1467 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1468 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1469 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1470 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1471 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1472 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1473 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1474 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1475 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1476 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1477 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1478 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1479 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1480 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1481 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1482 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1483 	/* Only read FCOE on 82599 */
1484 	if (hw->mac.type != ixgbe_mac_82598EB) {
1485 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1486 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1487 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1488 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1489 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1490 	}
1491 
1492 	/* Fill out the OS statistics structure */
1493 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1494 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1495 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1496 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1497 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1498 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1499 	IXGBE_SET_COLLISIONS(adapter, 0);
1500 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1501 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1502 } /* ixgbe_update_stats_counters */
1503 
1504 /************************************************************************
1505  * ixgbe_add_hw_stats
1506  *
1507  *   Add sysctl variables, one per statistic, to the system.
1508  ************************************************************************/
1509 static void
ixgbe_add_hw_stats(struct adapter * adapter)1510 ixgbe_add_hw_stats(struct adapter *adapter)
1511 {
1512 	device_t               dev = iflib_get_dev(adapter->ctx);
1513 	struct ix_rx_queue     *rx_que;
1514 	struct ix_tx_queue     *tx_que;
1515 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1516 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1517 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1518 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1519 	struct sysctl_oid      *stat_node, *queue_node;
1520 	struct sysctl_oid_list *stat_list, *queue_list;
1521 	int                    i;
1522 
1523 #define QUEUE_NAME_LEN 32
1524 	char                   namebuf[QUEUE_NAME_LEN];
1525 
1526 	/* Driver Statistics */
1527 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1528 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1529 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1530 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1531 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1532 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1533 
1534 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1535 		struct tx_ring *txr = &tx_que->txr;
1536 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1537 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1538 		    CTLFLAG_RD, NULL, "Queue Name");
1539 		queue_list = SYSCTL_CHILDREN(queue_node);
1540 
1541 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1542 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1543 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1544 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1545 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1546 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1547 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1548 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1549 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1550 		    CTLFLAG_RD, &txr->total_packets,
1551 		    "Queue Packets Transmitted");
1552 	}
1553 
1554 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1555 		struct rx_ring *rxr = &rx_que->rxr;
1556 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1557 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1558 		    CTLFLAG_RD, NULL, "Queue Name");
1559 		queue_list = SYSCTL_CHILDREN(queue_node);
1560 
1561 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1562 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1563 		    sizeof(&adapter->rx_queues[i]),
1564 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1565 		    "Interrupt Rate");
1566 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1567 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1568 		    "irqs on this queue");
1569 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1570 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1571 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1572 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1573 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1574 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1575 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1576 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1577 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1578 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1579 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1580 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1581 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1582 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1583 	}
1584 
1585 	/* MAC stats get their own sub node */
1586 
1587 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1588 	    CTLFLAG_RD, NULL, "MAC Statistics");
1589 	stat_list = SYSCTL_CHILDREN(stat_node);
1590 
1591 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1592 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1593 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1594 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1595 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1596 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1597 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1598 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1599 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1600 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1601 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1602 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1603 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1604 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1605 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1606 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1607 
1608 	/* Flow Control stats */
1609 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1610 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1611 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1612 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1613 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1614 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1615 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1616 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1617 
1618 	/* Packet Reception Stats */
1619 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1620 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1621 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1622 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1623 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1624 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1625 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1626 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1627 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1628 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1629 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1630 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1631 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1632 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1633 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1634 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1635 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1636 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1637 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1638 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1639 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1640 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1641 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1642 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1643 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1644 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1645 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1646 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1647 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1648 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1649 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1650 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1651 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1652 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1653 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1654 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1655 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1656 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1657 
1658 	/* Packet Transmission Stats */
1659 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1660 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1661 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1662 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1663 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1664 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1665 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1666 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1667 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1668 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1669 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1670 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1671 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1672 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1673 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1674 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1675 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1676 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1677 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1678 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1679 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1680 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1681 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1682 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1683 } /* ixgbe_add_hw_stats */
1684 
1685 /************************************************************************
1686  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1687  *
1688  *   Retrieves the TDH value from the hardware
1689  ************************************************************************/
1690 static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)1691 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1692 {
1693 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1694 	int            error;
1695 	unsigned int   val;
1696 
1697 	if (!txr)
1698 		return (0);
1699 
1700 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1701 	error = sysctl_handle_int(oidp, &val, 0, req);
1702 	if (error || !req->newptr)
1703 		return error;
1704 
1705 	return (0);
1706 } /* ixgbe_sysctl_tdh_handler */
1707 
1708 /************************************************************************
1709  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1710  *
1711  *   Retrieves the TDT value from the hardware
1712  ************************************************************************/
1713 static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)1714 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1715 {
1716 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1717 	int            error;
1718 	unsigned int   val;
1719 
1720 	if (!txr)
1721 		return (0);
1722 
1723 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1724 	error = sysctl_handle_int(oidp, &val, 0, req);
1725 	if (error || !req->newptr)
1726 		return error;
1727 
1728 	return (0);
1729 } /* ixgbe_sysctl_tdt_handler */
1730 
1731 /************************************************************************
1732  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1733  *
1734  *   Retrieves the RDH value from the hardware
1735  ************************************************************************/
1736 static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)1737 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1738 {
1739 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1740 	int            error;
1741 	unsigned int   val;
1742 
1743 	if (!rxr)
1744 		return (0);
1745 
1746 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1747 	error = sysctl_handle_int(oidp, &val, 0, req);
1748 	if (error || !req->newptr)
1749 		return error;
1750 
1751 	return (0);
1752 } /* ixgbe_sysctl_rdh_handler */
1753 
1754 /************************************************************************
1755  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1756  *
1757  *   Retrieves the RDT value from the hardware
1758  ************************************************************************/
1759 static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)1760 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1761 {
1762 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1763 	int            error;
1764 	unsigned int   val;
1765 
1766 	if (!rxr)
1767 		return (0);
1768 
1769 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1770 	error = sysctl_handle_int(oidp, &val, 0, req);
1771 	if (error || !req->newptr)
1772 		return error;
1773 
1774 	return (0);
1775 } /* ixgbe_sysctl_rdt_handler */
1776 
1777 /************************************************************************
1778  * ixgbe_if_vlan_register
1779  *
1780  *   Run via vlan config EVENT, it enables us to use the
1781  *   HW Filter table since we can get the vlan id. This
1782  *   just creates the entry in the soft version of the
1783  *   VFTA, init will repopulate the real table.
1784  ************************************************************************/
1785 static void
ixgbe_if_vlan_register(if_ctx_t ctx,u16 vtag)1786 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1787 {
1788 	struct adapter *adapter = iflib_get_softc(ctx);
1789 	u16            index, bit;
1790 
1791 	index = (vtag >> 5) & 0x7F;
1792 	bit = vtag & 0x1F;
1793 	adapter->shadow_vfta[index] |= (1 << bit);
1794 	++adapter->num_vlans;
1795 	ixgbe_setup_vlan_hw_support(ctx);
1796 } /* ixgbe_if_vlan_register */
1797 
1798 /************************************************************************
1799  * ixgbe_if_vlan_unregister
1800  *
1801  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1802  ************************************************************************/
1803 static void
ixgbe_if_vlan_unregister(if_ctx_t ctx,u16 vtag)1804 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1805 {
1806 	struct adapter *adapter = iflib_get_softc(ctx);
1807 	u16            index, bit;
1808 
1809 	index = (vtag >> 5) & 0x7F;
1810 	bit = vtag & 0x1F;
1811 	adapter->shadow_vfta[index] &= ~(1 << bit);
1812 	--adapter->num_vlans;
1813 	/* Re-init to load the changes */
1814 	ixgbe_setup_vlan_hw_support(ctx);
1815 } /* ixgbe_if_vlan_unregister */
1816 
1817 /************************************************************************
1818  * ixgbe_setup_vlan_hw_support
1819  ************************************************************************/
1820 static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)1821 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1822 {
1823 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1824 	struct adapter  *adapter = iflib_get_softc(ctx);
1825 	struct ixgbe_hw *hw = &adapter->hw;
1826 	struct rx_ring  *rxr;
1827 	int             i;
1828 	u32             ctrl;
1829 
1830 
1831 	/*
1832 	 * We get here thru init_locked, meaning
1833 	 * a soft reset, this has already cleared
1834 	 * the VFTA and other state, so if there
1835 	 * have been no vlan's registered do nothing.
1836 	 */
1837 	if (adapter->num_vlans == 0)
1838 		return;
1839 
1840 	/* Setup the queues for vlans */
1841 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1842 		for (i = 0; i < adapter->num_rx_queues; i++) {
1843 			rxr = &adapter->rx_queues[i].rxr;
1844 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1845 			if (hw->mac.type != ixgbe_mac_82598EB) {
1846 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1847 				ctrl |= IXGBE_RXDCTL_VME;
1848 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1849 			}
1850 			rxr->vtag_strip = TRUE;
1851 		}
1852 	}
1853 
1854 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1855 		return;
1856 	/*
1857 	 * A soft reset zero's out the VFTA, so
1858 	 * we need to repopulate it now.
1859 	 */
1860 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1861 		if (adapter->shadow_vfta[i] != 0)
1862 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1863 			    adapter->shadow_vfta[i]);
1864 
1865 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1866 	/* Enable the Filter Table if enabled */
1867 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1868 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1869 		ctrl |= IXGBE_VLNCTRL_VFE;
1870 	}
1871 	if (hw->mac.type == ixgbe_mac_82598EB)
1872 		ctrl |= IXGBE_VLNCTRL_VME;
1873 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1874 } /* ixgbe_setup_vlan_hw_support */
1875 
1876 /************************************************************************
1877  * ixgbe_get_slot_info
1878  *
1879  *   Get the width and transaction speed of
1880  *   the slot this adapter is plugged into.
1881  ************************************************************************/
1882 static void
ixgbe_get_slot_info(struct adapter * adapter)1883 ixgbe_get_slot_info(struct adapter *adapter)
1884 {
1885 	device_t        dev = iflib_get_dev(adapter->ctx);
1886 	struct ixgbe_hw *hw = &adapter->hw;
1887 	int             bus_info_valid = TRUE;
1888 	u32             offset;
1889 	u16             link;
1890 
1891 	/* Some devices are behind an internal bridge */
1892 	switch (hw->device_id) {
1893 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1894 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1895 		goto get_parent_info;
1896 	default:
1897 		break;
1898 	}
1899 
1900 	ixgbe_get_bus_info(hw);
1901 
1902 	/*
1903 	 * Some devices don't use PCI-E, but there is no need
1904 	 * to display "Unknown" for bus speed and width.
1905 	 */
1906 	switch (hw->mac.type) {
1907 	case ixgbe_mac_X550EM_x:
1908 	case ixgbe_mac_X550EM_a:
1909 		return;
1910 	default:
1911 		goto display;
1912 	}
1913 
1914 get_parent_info:
1915 	/*
1916 	 * For the Quad port adapter we need to parse back
1917 	 * up the PCI tree to find the speed of the expansion
1918 	 * slot into which this adapter is plugged. A bit more work.
1919 	 */
1920 	dev = device_get_parent(device_get_parent(dev));
1921 #ifdef IXGBE_DEBUG
1922 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1923 	    pci_get_slot(dev), pci_get_function(dev));
1924 #endif
1925 	dev = device_get_parent(device_get_parent(dev));
1926 #ifdef IXGBE_DEBUG
1927 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1928 	    pci_get_slot(dev), pci_get_function(dev));
1929 #endif
1930 	/* Now get the PCI Express Capabilities offset */
1931 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1932 		/*
1933 		 * Hmm...can't get PCI-Express capabilities.
1934 		 * Falling back to default method.
1935 		 */
1936 		bus_info_valid = FALSE;
1937 		ixgbe_get_bus_info(hw);
1938 		goto display;
1939 	}
1940 	/* ...and read the Link Status Register */
1941 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1942 	ixgbe_set_pci_config_data_generic(hw, link);
1943 
1944 display:
1945 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1946 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1947 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1948 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1949 	     "Unknown"),
1950 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1951 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1952 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1953 	     "Unknown"));
1954 
1955 	if (bus_info_valid) {
1956 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1957 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1958 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1959 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1960 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1961 		}
1962 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1963 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1964 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1965 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1966 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1967 		}
1968 	} else
1969 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1970 
1971 	return;
1972 } /* ixgbe_get_slot_info */
1973 
1974 /************************************************************************
1975  * ixgbe_if_msix_intr_assign
1976  *
1977  *   Setup MSI-X Interrupt resources and handlers
1978  ************************************************************************/
1979 static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx,int msix)1980 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1981 {
1982 	struct adapter     *adapter = iflib_get_softc(ctx);
1983 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1984 	struct ix_tx_queue *tx_que;
1985 	int                error, rid, vector = 0;
1986 	int                cpu_id = 0;
1987 	char               buf[16];
1988 
1989 	/* Admin Que is vector 0*/
1990 	rid = vector + 1;
1991 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1992 		rid = vector + 1;
1993 
1994 		snprintf(buf, sizeof(buf), "rxq%d", i);
1995 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1996 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
1997 
1998 		if (error) {
1999 			device_printf(iflib_get_dev(ctx),
2000 			    "Failed to allocate que int %d err: %d", i, error);
2001 			adapter->num_rx_queues = i + 1;
2002 			goto fail;
2003 		}
2004 
2005 		rx_que->msix = vector;
2006 		adapter->active_queues |= (u64)(1 << rx_que->msix);
2007 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2008 			/*
2009 			 * The queue ID is used as the RSS layer bucket ID.
2010 			 * We look up the queue ID -> RSS CPU ID and select
2011 			 * that.
2012 			 */
2013 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2014 		} else {
2015 			/*
2016 			 * Bind the MSI-X vector, and thus the
2017 			 * rings to the corresponding cpu.
2018 			 *
2019 			 * This just happens to match the default RSS
2020 			 * round-robin bucket -> queue -> CPU allocation.
2021 			 */
2022 			if (adapter->num_rx_queues > 1)
2023 				cpu_id = i;
2024 		}
2025 
2026 	}
2027 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2028 		snprintf(buf, sizeof(buf), "txq%d", i);
2029 		tx_que = &adapter->tx_queues[i];
2030 		tx_que->msix = i % adapter->num_rx_queues;
2031 		iflib_softirq_alloc_generic(ctx,
2032 		    &adapter->rx_queues[tx_que->msix].que_irq,
2033 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2034 	}
2035 	rid = vector + 1;
2036 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2037 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2038 	if (error) {
2039 		device_printf(iflib_get_dev(ctx),
2040 		    "Failed to register admin handler");
2041 		return (error);
2042 	}
2043 
2044 	adapter->vector = vector;
2045 
2046 	return (0);
2047 fail:
2048 	iflib_irq_free(ctx, &adapter->irq);
2049 	rx_que = adapter->rx_queues;
2050 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2051 		iflib_irq_free(ctx, &rx_que->que_irq);
2052 
2053 	return (error);
2054 } /* ixgbe_if_msix_intr_assign */
2055 
2056 /*********************************************************************
2057  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2058  **********************************************************************/
2059 static int
ixgbe_msix_que(void * arg)2060 ixgbe_msix_que(void *arg)
2061 {
2062 	struct ix_rx_queue *que = arg;
2063 	struct adapter     *adapter = que->adapter;
2064 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2065 
2066 	/* Protect against spurious interrupts */
2067 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2068 		return (FILTER_HANDLED);
2069 
2070 	ixgbe_disable_queue(adapter, que->msix);
2071 	++que->irqs;
2072 
2073 	return (FILTER_SCHEDULE_THREAD);
2074 } /* ixgbe_msix_que */
2075 
2076 /************************************************************************
2077  * ixgbe_media_status - Media Ioctl callback
2078  *
2079  *   Called whenever the user queries the status of
2080  *   the interface using ifconfig.
2081  ************************************************************************/
2082 static void
ixgbe_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)2083 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2084 {
2085 	struct adapter  *adapter = iflib_get_softc(ctx);
2086 	struct ixgbe_hw *hw = &adapter->hw;
2087 	int             layer;
2088 
2089 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2090 
2091 	ifmr->ifm_status = IFM_AVALID;
2092 	ifmr->ifm_active = IFM_ETHER;
2093 
2094 	if (!adapter->link_active)
2095 		return;
2096 
2097 	ifmr->ifm_status |= IFM_ACTIVE;
2098 	layer = adapter->phy_layer;
2099 
2100 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2101 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2102 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2103 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2104 		switch (adapter->link_speed) {
2105 		case IXGBE_LINK_SPEED_10GB_FULL:
2106 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2107 			break;
2108 		case IXGBE_LINK_SPEED_1GB_FULL:
2109 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2110 			break;
2111 		case IXGBE_LINK_SPEED_100_FULL:
2112 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2113 			break;
2114 		case IXGBE_LINK_SPEED_10_FULL:
2115 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2116 			break;
2117 		}
2118 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2119 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2120 		switch (adapter->link_speed) {
2121 		case IXGBE_LINK_SPEED_10GB_FULL:
2122 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2123 			break;
2124 		}
2125 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2126 		switch (adapter->link_speed) {
2127 		case IXGBE_LINK_SPEED_10GB_FULL:
2128 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2129 			break;
2130 		case IXGBE_LINK_SPEED_1GB_FULL:
2131 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2132 			break;
2133 		}
2134 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2135 		switch (adapter->link_speed) {
2136 		case IXGBE_LINK_SPEED_10GB_FULL:
2137 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2138 			break;
2139 		case IXGBE_LINK_SPEED_1GB_FULL:
2140 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2141 			break;
2142 		}
2143 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2144 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2145 		switch (adapter->link_speed) {
2146 		case IXGBE_LINK_SPEED_10GB_FULL:
2147 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2148 			break;
2149 		case IXGBE_LINK_SPEED_1GB_FULL:
2150 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2151 			break;
2152 		}
2153 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2154 		switch (adapter->link_speed) {
2155 		case IXGBE_LINK_SPEED_10GB_FULL:
2156 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2157 			break;
2158 		}
2159 	/*
2160 	 * XXX: These need to use the proper media types once
2161 	 * they're added.
2162 	 */
2163 #ifndef IFM_ETH_XTYPE
2164 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2165 		switch (adapter->link_speed) {
2166 		case IXGBE_LINK_SPEED_10GB_FULL:
2167 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2168 			break;
2169 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2170 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2171 			break;
2172 		case IXGBE_LINK_SPEED_1GB_FULL:
2173 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2174 			break;
2175 		}
2176 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2177 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2178 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2179 		switch (adapter->link_speed) {
2180 		case IXGBE_LINK_SPEED_10GB_FULL:
2181 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2182 			break;
2183 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2184 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2185 			break;
2186 		case IXGBE_LINK_SPEED_1GB_FULL:
2187 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2188 			break;
2189 		}
2190 #else
2191 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2192 		switch (adapter->link_speed) {
2193 		case IXGBE_LINK_SPEED_10GB_FULL:
2194 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2195 			break;
2196 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2197 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2198 			break;
2199 		case IXGBE_LINK_SPEED_1GB_FULL:
2200 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2201 			break;
2202 		}
2203 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2204 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2205 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2206 		switch (adapter->link_speed) {
2207 		case IXGBE_LINK_SPEED_10GB_FULL:
2208 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2209 			break;
2210 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2211 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2212 			break;
2213 		case IXGBE_LINK_SPEED_1GB_FULL:
2214 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2215 			break;
2216 		}
2217 #endif
2218 
2219 	/* If nothing is recognized... */
2220 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2221 		ifmr->ifm_active |= IFM_UNKNOWN;
2222 
2223 	/* Display current flow control setting used on link */
2224 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2225 	    hw->fc.current_mode == ixgbe_fc_full)
2226 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2227 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2228 	    hw->fc.current_mode == ixgbe_fc_full)
2229 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2230 } /* ixgbe_media_status */
2231 
2232 /************************************************************************
2233  * ixgbe_media_change - Media Ioctl callback
2234  *
2235  *   Called when the user changes speed/duplex using
2236  *   media/mediopt option with ifconfig.
2237  ************************************************************************/
2238 static int
ixgbe_if_media_change(if_ctx_t ctx)2239 ixgbe_if_media_change(if_ctx_t ctx)
2240 {
2241 	struct adapter   *adapter = iflib_get_softc(ctx);
2242 	struct ifmedia   *ifm = iflib_get_media(ctx);
2243 	struct ixgbe_hw  *hw = &adapter->hw;
2244 	ixgbe_link_speed speed = 0;
2245 
2246 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2247 
2248 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2249 		return (EINVAL);
2250 
2251 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2252 		return (EPERM);
2253 
2254 	/*
2255 	 * We don't actually need to check against the supported
2256 	 * media types of the adapter; ifmedia will take care of
2257 	 * that for us.
2258 	 */
2259 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2260 	case IFM_AUTO:
2261 	case IFM_10G_T:
2262 		speed |= IXGBE_LINK_SPEED_100_FULL;
2263 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2264 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2265 		break;
2266 	case IFM_10G_LRM:
2267 	case IFM_10G_LR:
2268 #ifndef IFM_ETH_XTYPE
2269 	case IFM_10G_SR: /* KR, too */
2270 	case IFM_10G_CX4: /* KX4 */
2271 #else
2272 	case IFM_10G_KR:
2273 	case IFM_10G_KX4:
2274 #endif
2275 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2276 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2277 		break;
2278 #ifndef IFM_ETH_XTYPE
2279 	case IFM_1000_CX: /* KX */
2280 #else
2281 	case IFM_1000_KX:
2282 #endif
2283 	case IFM_1000_LX:
2284 	case IFM_1000_SX:
2285 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2286 		break;
2287 	case IFM_1000_T:
2288 		speed |= IXGBE_LINK_SPEED_100_FULL;
2289 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2290 		break;
2291 	case IFM_10G_TWINAX:
2292 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2293 		break;
2294 	case IFM_100_TX:
2295 		speed |= IXGBE_LINK_SPEED_100_FULL;
2296 		break;
2297 	case IFM_10_T:
2298 		speed |= IXGBE_LINK_SPEED_10_FULL;
2299 		break;
2300 	default:
2301 		goto invalid;
2302 	}
2303 
2304 	hw->mac.autotry_restart = TRUE;
2305 	hw->mac.ops.setup_link(hw, speed, TRUE);
2306 	adapter->advertise =
2307 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2308 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2309 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2310 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2311 
2312 	return (0);
2313 
2314 invalid:
2315 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2316 
2317 	return (EINVAL);
2318 } /* ixgbe_if_media_change */
2319 
2320 /************************************************************************
2321  * ixgbe_set_promisc
2322  ************************************************************************/
2323 static int
ixgbe_if_promisc_set(if_ctx_t ctx,int flags)2324 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2325 {
2326 	struct adapter *adapter = iflib_get_softc(ctx);
2327 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2328 	u32            rctl;
2329 	int            mcnt = 0;
2330 
2331 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2332 	rctl &= (~IXGBE_FCTRL_UPE);
2333 	if (ifp->if_flags & IFF_ALLMULTI)
2334 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2335 	else {
2336 		mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2337 	}
2338 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2339 		rctl &= (~IXGBE_FCTRL_MPE);
2340 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2341 
2342 	if (ifp->if_flags & IFF_PROMISC) {
2343 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2344 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2345 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2346 		rctl |= IXGBE_FCTRL_MPE;
2347 		rctl &= ~IXGBE_FCTRL_UPE;
2348 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2349 	}
2350 	return (0);
2351 } /* ixgbe_if_promisc_set */
2352 
2353 /************************************************************************
2354  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2355  ************************************************************************/
2356 static int
ixgbe_msix_link(void * arg)2357 ixgbe_msix_link(void *arg)
2358 {
2359 	struct adapter  *adapter = arg;
2360 	struct ixgbe_hw *hw = &adapter->hw;
2361 	u32             eicr, eicr_mask;
2362 	s32             retval;
2363 
2364 	++adapter->link_irq;
2365 
2366 	/* Pause other interrupts */
2367 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2368 
2369 	/* First get the cause */
2370 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2371 	/* Be sure the queue bits are not cleared */
2372 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2373 	/* Clear interrupt with write */
2374 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2375 
2376 	/* Link status change */
2377 	if (eicr & IXGBE_EICR_LSC) {
2378 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2379 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2380 	}
2381 
2382 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2383 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2384 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2385 			/* This is probably overkill :) */
2386 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2387 				return (FILTER_HANDLED);
2388 			/* Disable the interrupt */
2389 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2390 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2391 		} else
2392 			if (eicr & IXGBE_EICR_ECC) {
2393 				device_printf(iflib_get_dev(adapter->ctx),
2394 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2395 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2396 			}
2397 
2398 		/* Check for over temp condition */
2399 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2400 			switch (adapter->hw.mac.type) {
2401 			case ixgbe_mac_X550EM_a:
2402 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2403 					break;
2404 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2405 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2406 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2407 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2408 				retval = hw->phy.ops.check_overtemp(hw);
2409 				if (retval != IXGBE_ERR_OVERTEMP)
2410 					break;
2411 				device_printf(iflib_get_dev(adapter->ctx),
2412 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2413 				device_printf(iflib_get_dev(adapter->ctx),
2414 				    "System shutdown required!\n");
2415 				break;
2416 			default:
2417 				if (!(eicr & IXGBE_EICR_TS))
2418 					break;
2419 				retval = hw->phy.ops.check_overtemp(hw);
2420 				if (retval != IXGBE_ERR_OVERTEMP)
2421 					break;
2422 				device_printf(iflib_get_dev(adapter->ctx),
2423 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2424 				device_printf(iflib_get_dev(adapter->ctx),
2425 				    "System shutdown required!\n");
2426 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2427 				break;
2428 			}
2429 		}
2430 
2431 		/* Check for VF message */
2432 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2433 		    (eicr & IXGBE_EICR_MAILBOX))
2434 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2435 	}
2436 
2437 	if (ixgbe_is_sfp(hw)) {
2438 		/* Pluggable optics-related interrupt */
2439 		if (hw->mac.type >= ixgbe_mac_X540)
2440 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2441 		else
2442 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2443 
2444 		if (eicr & eicr_mask) {
2445 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2446 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2447 		}
2448 
2449 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2450 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2451 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2452 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2453 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2454 		}
2455 	}
2456 
2457 	/* Check for fan failure */
2458 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2459 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2460 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2461 	}
2462 
2463 	/* External PHY interrupt */
2464 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2465 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2466 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2467 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2468 	}
2469 
2470 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2471 } /* ixgbe_msix_link */
2472 
2473 /************************************************************************
2474  * ixgbe_sysctl_interrupt_rate_handler
2475  ************************************************************************/
2476 static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)2477 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2478 {
2479 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2480 	int                error;
2481 	unsigned int       reg, usec, rate;
2482 
2483 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2484 	usec = ((reg & 0x0FF8) >> 3);
2485 	if (usec > 0)
2486 		rate = 500000 / usec;
2487 	else
2488 		rate = 0;
2489 	error = sysctl_handle_int(oidp, &rate, 0, req);
2490 	if (error || !req->newptr)
2491 		return error;
2492 	reg &= ~0xfff; /* default, no limitation */
2493 	ixgbe_max_interrupt_rate = 0;
2494 	if (rate > 0 && rate < 500000) {
2495 		if (rate < 1000)
2496 			rate = 1000;
2497 		ixgbe_max_interrupt_rate = rate;
2498 		reg |= ((4000000/rate) & 0xff8);
2499 	}
2500 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2501 
2502 	return (0);
2503 } /* ixgbe_sysctl_interrupt_rate_handler */
2504 
2505 /************************************************************************
2506  * ixgbe_add_device_sysctls
2507  ************************************************************************/
2508 static void
ixgbe_add_device_sysctls(if_ctx_t ctx)2509 ixgbe_add_device_sysctls(if_ctx_t ctx)
2510 {
2511 	struct adapter         *adapter = iflib_get_softc(ctx);
2512 	device_t               dev = iflib_get_dev(ctx);
2513 	struct ixgbe_hw        *hw = &adapter->hw;
2514 	struct sysctl_oid_list *child;
2515 	struct sysctl_ctx_list *ctx_list;
2516 
2517 	ctx_list = device_get_sysctl_ctx(dev);
2518 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2519 
2520 	/* Sysctls for all devices */
2521 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2522 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2523 	    IXGBE_SYSCTL_DESC_SET_FC);
2524 
2525 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2526 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2527 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2528 
2529 #ifdef IXGBE_DEBUG
2530 	/* testing sysctls (for all devices) */
2531 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2532 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2533 	    "I", "PCI Power State");
2534 
2535 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2536 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2537 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2538 #endif
2539 	/* for X550 series devices */
2540 	if (hw->mac.type >= ixgbe_mac_X550)
2541 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2542 		    CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2543 		    "I", "DMA Coalesce");
2544 
2545 	/* for WoL-capable devices */
2546 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2547 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2548 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2549 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2550 
2551 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2552 		    CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2553 		    "I", "Enable/Disable Wake Up Filters");
2554 	}
2555 
2556 	/* for X552/X557-AT devices */
2557 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2558 		struct sysctl_oid *phy_node;
2559 		struct sysctl_oid_list *phy_list;
2560 
2561 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2562 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2563 		phy_list = SYSCTL_CHILDREN(phy_node);
2564 
2565 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2566 		    CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2567 		    "I", "Current External PHY Temperature (Celsius)");
2568 
2569 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2570 		    "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2571 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2572 		    "External PHY High Temperature Event Occurred");
2573 	}
2574 
2575 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2576 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2577 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2578 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2579 	}
2580 } /* ixgbe_add_device_sysctls */
2581 
2582 /************************************************************************
2583  * ixgbe_allocate_pci_resources
2584  ************************************************************************/
2585 static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)2586 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2587 {
2588 	struct adapter *adapter = iflib_get_softc(ctx);
2589 	device_t        dev = iflib_get_dev(ctx);
2590 	int             rid;
2591 
2592 	rid = PCIR_BAR(0);
2593 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2594 	    RF_ACTIVE);
2595 
2596 	if (!(adapter->pci_mem)) {
2597 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2598 		return (ENXIO);
2599 	}
2600 
2601 	/* Save bus_space values for READ/WRITE_REG macros */
2602 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2603 	adapter->osdep.mem_bus_space_handle =
2604 	    rman_get_bushandle(adapter->pci_mem);
2605 	/* Set hw values for shared code */
2606 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2607 
2608 	return (0);
2609 } /* ixgbe_allocate_pci_resources */
2610 
2611 /************************************************************************
2612  * ixgbe_detach - Device removal routine
2613  *
2614  *   Called when the driver is being removed.
2615  *   Stops the adapter and deallocates all the resources
2616  *   that were allocated for driver operation.
2617  *
2618  *   return 0 on success, positive on failure
2619  ************************************************************************/
2620 static int
ixgbe_if_detach(if_ctx_t ctx)2621 ixgbe_if_detach(if_ctx_t ctx)
2622 {
2623 	struct adapter *adapter = iflib_get_softc(ctx);
2624 	device_t       dev = iflib_get_dev(ctx);
2625 	u32            ctrl_ext;
2626 
2627 	INIT_DEBUGOUT("ixgbe_detach: begin");
2628 
2629 	if (ixgbe_pci_iov_detach(dev) != 0) {
2630 		device_printf(dev, "SR-IOV in use; detach first.\n");
2631 		return (EBUSY);
2632 	}
2633 
2634 	ixgbe_setup_low_power_mode(ctx);
2635 
2636 	/* let hardware know driver is unloading */
2637 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2638 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2639 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2640 
2641 	ixgbe_free_pci_resources(ctx);
2642 	free(adapter->mta, M_IXGBE);
2643 
2644 	return (0);
2645 } /* ixgbe_if_detach */
2646 
2647 /************************************************************************
2648  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2649  *
2650  *   Prepare the adapter/port for LPLU and/or WoL
2651  ************************************************************************/
2652 static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)2653 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2654 {
2655 	struct adapter  *adapter = iflib_get_softc(ctx);
2656 	struct ixgbe_hw *hw = &adapter->hw;
2657 	device_t        dev = iflib_get_dev(ctx);
2658 	s32             error = 0;
2659 
2660 	if (!hw->wol_enabled)
2661 		ixgbe_set_phy_power(hw, FALSE);
2662 
2663 	/* Limit power management flow to X550EM baseT */
2664 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2665 	    hw->phy.ops.enter_lplu) {
2666 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2667 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2668 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2669 
2670 		/*
2671 		 * Clear Wake Up Status register to prevent any previous wakeup
2672 		 * events from waking us up immediately after we suspend.
2673 		 */
2674 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2675 
2676 		/*
2677 		 * Program the Wakeup Filter Control register with user filter
2678 		 * settings
2679 		 */
2680 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2681 
2682 		/* Enable wakeups and power management in Wakeup Control */
2683 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2684 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2685 
2686 		/* X550EM baseT adapters need a special LPLU flow */
2687 		hw->phy.reset_disable = TRUE;
2688 		ixgbe_if_stop(ctx);
2689 		error = hw->phy.ops.enter_lplu(hw);
2690 		if (error)
2691 			device_printf(dev, "Error entering LPLU: %d\n", error);
2692 		hw->phy.reset_disable = FALSE;
2693 	} else {
2694 		/* Just stop for other adapters */
2695 		ixgbe_if_stop(ctx);
2696 	}
2697 
2698 	return error;
2699 } /* ixgbe_setup_low_power_mode */
2700 
2701 /************************************************************************
2702  * ixgbe_shutdown - Shutdown entry point
2703  ************************************************************************/
2704 static int
ixgbe_if_shutdown(if_ctx_t ctx)2705 ixgbe_if_shutdown(if_ctx_t ctx)
2706 {
2707 	int error = 0;
2708 
2709 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2710 
2711 	error = ixgbe_setup_low_power_mode(ctx);
2712 
2713 	return (error);
2714 } /* ixgbe_if_shutdown */
2715 
2716 /************************************************************************
2717  * ixgbe_suspend
2718  *
2719  *   From D0 to D3
2720  ************************************************************************/
2721 static int
ixgbe_if_suspend(if_ctx_t ctx)2722 ixgbe_if_suspend(if_ctx_t ctx)
2723 {
2724 	int error = 0;
2725 
2726 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2727 
2728 	error = ixgbe_setup_low_power_mode(ctx);
2729 
2730 	return (error);
2731 } /* ixgbe_if_suspend */
2732 
2733 /************************************************************************
2734  * ixgbe_resume
2735  *
2736  *   From D3 to D0
2737  ************************************************************************/
2738 static int
ixgbe_if_resume(if_ctx_t ctx)2739 ixgbe_if_resume(if_ctx_t ctx)
2740 {
2741 	struct adapter  *adapter = iflib_get_softc(ctx);
2742 	device_t        dev = iflib_get_dev(ctx);
2743 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2744 	struct ixgbe_hw *hw = &adapter->hw;
2745 	u32             wus;
2746 
2747 	INIT_DEBUGOUT("ixgbe_resume: begin");
2748 
2749 	/* Read & clear WUS register */
2750 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2751 	if (wus)
2752 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2753 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2754 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2755 	/* And clear WUFC until next low-power transition */
2756 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2757 
2758 	/*
2759 	 * Required after D3->D0 transition;
2760 	 * will re-advertise all previous advertised speeds
2761 	 */
2762 	if (ifp->if_flags & IFF_UP)
2763 		ixgbe_if_init(ctx);
2764 
2765 	return (0);
2766 } /* ixgbe_if_resume */
2767 
2768 /************************************************************************
2769  * ixgbe_if_mtu_set - Ioctl mtu entry point
2770  *
2771  *   Return 0 on success, EINVAL on failure
2772  ************************************************************************/
2773 static int
ixgbe_if_mtu_set(if_ctx_t ctx,uint32_t mtu)2774 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2775 {
2776 	struct adapter *adapter = iflib_get_softc(ctx);
2777 	int error = 0;
2778 
2779 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2780 
2781 	if (mtu > IXGBE_MAX_MTU) {
2782 		error = EINVAL;
2783 	} else {
2784 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2785 	}
2786 
2787 	return error;
2788 } /* ixgbe_if_mtu_set */
2789 
2790 /************************************************************************
2791  * ixgbe_if_crcstrip_set
2792  ************************************************************************/
2793 static void
ixgbe_if_crcstrip_set(if_ctx_t ctx,int onoff,int crcstrip)2794 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2795 {
2796 	struct adapter *sc = iflib_get_softc(ctx);
2797 	struct ixgbe_hw *hw = &sc->hw;
2798 	/* crc stripping is set in two places:
2799 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2800 	 * IXGBE_RDRXCTL (set by the original driver in
2801 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2802 	 *	We disable the setting when netmap is compiled in).
2803 	 * We update the values here, but also in ixgbe.c because
2804 	 * init_locked sometimes is called outside our control.
2805 	 */
2806 	uint32_t hl, rxc;
2807 
2808 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2809 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2810 #ifdef NETMAP
2811 	if (netmap_verbose)
2812 		D("%s read  HLREG 0x%x rxc 0x%x",
2813 			onoff ? "enter" : "exit", hl, rxc);
2814 #endif
2815 	/* hw requirements ... */
2816 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2817 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2818 	if (onoff && !crcstrip) {
2819 		/* keep the crc. Fast rx */
2820 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2821 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2822 	} else {
2823 		/* reset default mode */
2824 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2825 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2826 	}
2827 #ifdef NETMAP
2828 	if (netmap_verbose)
2829 		D("%s write HLREG 0x%x rxc 0x%x",
2830 			onoff ? "enter" : "exit", hl, rxc);
2831 #endif
2832 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2833 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2834 } /* ixgbe_if_crcstrip_set */
2835 
2836 /*********************************************************************
2837  * ixgbe_if_init - Init entry point
2838  *
2839  *   Used in two ways: It is used by the stack as an init
2840  *   entry point in network interface structure. It is also
2841  *   used by the driver as a hw/sw initialization routine to
2842  *   get to a consistent state.
2843  *
2844  *   Return 0 on success, positive on failure
2845  **********************************************************************/
2846 void
ixgbe_if_init(if_ctx_t ctx)2847 ixgbe_if_init(if_ctx_t ctx)
2848 {
2849 	struct adapter     *adapter = iflib_get_softc(ctx);
2850 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2851 	device_t           dev = iflib_get_dev(ctx);
2852 	struct ixgbe_hw *hw = &adapter->hw;
2853 	struct ix_rx_queue *rx_que;
2854 	struct ix_tx_queue *tx_que;
2855 	u32             txdctl, mhadd;
2856 	u32             rxdctl, rxctrl;
2857 	u32             ctrl_ext;
2858 
2859 	int             i, j, err;
2860 
2861 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2862 
2863 	/* Queue indices may change with IOV mode */
2864 	ixgbe_align_all_queue_indices(adapter);
2865 
2866 	/* reprogram the RAR[0] in case user changed it. */
2867 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2868 
2869 	/* Get the latest mac address, User can use a LAA */
2870 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2871 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2872 	hw->addr_ctrl.rar_used_count = 1;
2873 
2874 	ixgbe_init_hw(hw);
2875 
2876 	ixgbe_initialize_iov(adapter);
2877 
2878 	ixgbe_initialize_transmit_units(ctx);
2879 
2880 	/* Setup Multicast table */
2881 	ixgbe_if_multi_set(ctx);
2882 
2883 	/* Determine the correct mbuf pool, based on frame size */
2884 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2885 
2886 	/* Configure RX settings */
2887 	ixgbe_initialize_receive_units(ctx);
2888 
2889 	/*
2890 	 * Initialize variable holding task enqueue requests
2891 	 * from MSI-X interrupts
2892 	 */
2893 	adapter->task_requests = 0;
2894 
2895 	/* Enable SDP & MSI-X interrupts based on adapter */
2896 	ixgbe_config_gpie(adapter);
2897 
2898 	/* Set MTU size */
2899 	if (ifp->if_mtu > ETHERMTU) {
2900 		/* aka IXGBE_MAXFRS on 82599 and newer */
2901 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2902 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2903 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2904 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2905 	}
2906 
2907 	/* Now enable all the queues */
2908 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2909 		struct tx_ring *txr = &tx_que->txr;
2910 
2911 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2912 		txdctl |= IXGBE_TXDCTL_ENABLE;
2913 		/* Set WTHRESH to 8, burst writeback */
2914 		txdctl |= (8 << 16);
2915 		/*
2916 		 * When the internal queue falls below PTHRESH (32),
2917 		 * start prefetching as long as there are at least
2918 		 * HTHRESH (1) buffers ready. The values are taken
2919 		 * from the Intel linux driver 3.8.21.
2920 		 * Prefetching enables tx line rate even with 1 queue.
2921 		 */
2922 		txdctl |= (32 << 0) | (1 << 8);
2923 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2924 	}
2925 
2926 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2927 		struct rx_ring *rxr = &rx_que->rxr;
2928 
2929 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2930 		if (hw->mac.type == ixgbe_mac_82598EB) {
2931 			/*
2932 			 * PTHRESH = 21
2933 			 * HTHRESH = 4
2934 			 * WTHRESH = 8
2935 			 */
2936 			rxdctl &= ~0x3FFFFF;
2937 			rxdctl |= 0x080420;
2938 		}
2939 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2940 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2941 		for (j = 0; j < 10; j++) {
2942 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2943 			    IXGBE_RXDCTL_ENABLE)
2944 				break;
2945 			else
2946 				msec_delay(1);
2947 		}
2948 		wmb();
2949 	}
2950 
2951 	/* Enable Receive engine */
2952 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2953 	if (hw->mac.type == ixgbe_mac_82598EB)
2954 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2955 	rxctrl |= IXGBE_RXCTRL_RXEN;
2956 	ixgbe_enable_rx_dma(hw, rxctrl);
2957 
2958 	/* Set up MSI/MSI-X routing */
2959 	if (ixgbe_enable_msix)  {
2960 		ixgbe_configure_ivars(adapter);
2961 		/* Set up auto-mask */
2962 		if (hw->mac.type == ixgbe_mac_82598EB)
2963 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2964 		else {
2965 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2966 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2967 		}
2968 	} else {  /* Simple settings for Legacy/MSI */
2969 		ixgbe_set_ivar(adapter, 0, 0, 0);
2970 		ixgbe_set_ivar(adapter, 0, 0, 1);
2971 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2972 	}
2973 
2974 	ixgbe_init_fdir(adapter);
2975 
2976 	/*
2977 	 * Check on any SFP devices that
2978 	 * need to be kick-started
2979 	 */
2980 	if (hw->phy.type == ixgbe_phy_none) {
2981 		err = hw->phy.ops.identify(hw);
2982 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2983 			device_printf(dev,
2984 			    "Unsupported SFP+ module type was detected.\n");
2985 			return;
2986 		}
2987 	}
2988 
2989 	/* Set moderation on the Link interrupt */
2990 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
2991 
2992 	/* Enable power to the phy. */
2993 	ixgbe_set_phy_power(hw, TRUE);
2994 
2995 	/* Config/Enable Link */
2996 	ixgbe_config_link(ctx);
2997 
2998 	/* Hardware Packet Buffer & Flow Control setup */
2999 	ixgbe_config_delay_values(adapter);
3000 
3001 	/* Initialize the FC settings */
3002 	ixgbe_start_hw(hw);
3003 
3004 	/* Set up VLAN support and filter */
3005 	ixgbe_setup_vlan_hw_support(ctx);
3006 
3007 	/* Setup DMA Coalescing */
3008 	ixgbe_config_dmac(adapter);
3009 
3010 	/* And now turn on interrupts */
3011 	ixgbe_if_enable_intr(ctx);
3012 
3013 	/* Enable the use of the MBX by the VF's */
3014 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3015 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3016 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3017 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3018 	}
3019 
3020 } /* ixgbe_init_locked */
3021 
3022 /************************************************************************
3023  * ixgbe_set_ivar
3024  *
3025  *   Setup the correct IVAR register for a particular MSI-X interrupt
3026  *     (yes this is all very magic and confusing :)
3027  *    - entry is the register array entry
3028  *    - vector is the MSI-X vector for this queue
3029  *    - type is RX/TX/MISC
3030  ************************************************************************/
3031 static void
ixgbe_set_ivar(struct adapter * adapter,u8 entry,u8 vector,s8 type)3032 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3033 {
3034 	struct ixgbe_hw *hw = &adapter->hw;
3035 	u32 ivar, index;
3036 
3037 	vector |= IXGBE_IVAR_ALLOC_VAL;
3038 
3039 	switch (hw->mac.type) {
3040 	case ixgbe_mac_82598EB:
3041 		if (type == -1)
3042 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3043 		else
3044 			entry += (type * 64);
3045 		index = (entry >> 2) & 0x1F;
3046 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3047 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3048 		ivar |= (vector << (8 * (entry & 0x3)));
3049 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3050 		break;
3051 	case ixgbe_mac_82599EB:
3052 	case ixgbe_mac_X540:
3053 	case ixgbe_mac_X550:
3054 	case ixgbe_mac_X550EM_x:
3055 	case ixgbe_mac_X550EM_a:
3056 		if (type == -1) { /* MISC IVAR */
3057 			index = (entry & 1) * 8;
3058 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3059 			ivar &= ~(0xFF << index);
3060 			ivar |= (vector << index);
3061 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3062 		} else {          /* RX/TX IVARS */
3063 			index = (16 * (entry & 1)) + (8 * type);
3064 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3065 			ivar &= ~(0xFF << index);
3066 			ivar |= (vector << index);
3067 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3068 		}
3069 	default:
3070 		break;
3071 	}
3072 } /* ixgbe_set_ivar */
3073 
3074 /************************************************************************
3075  * ixgbe_configure_ivars
3076  ************************************************************************/
3077 static void
ixgbe_configure_ivars(struct adapter * adapter)3078 ixgbe_configure_ivars(struct adapter *adapter)
3079 {
3080 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3081 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3082 	u32                newitr;
3083 
3084 	if (ixgbe_max_interrupt_rate > 0)
3085 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3086 	else {
3087 		/*
3088 		 * Disable DMA coalescing if interrupt moderation is
3089 		 * disabled.
3090 		 */
3091 		adapter->dmac = 0;
3092 		newitr = 0;
3093 	}
3094 
3095 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3096 		struct rx_ring *rxr = &rx_que->rxr;
3097 
3098 		/* First the RX queue entry */
3099 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3100 
3101 		/* Set an Initial EITR value */
3102 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3103 	}
3104 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3105 		struct tx_ring *txr = &tx_que->txr;
3106 
3107 		/* ... and the TX */
3108 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3109 	}
3110 	/* For the Link interrupt */
3111 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3112 } /* ixgbe_configure_ivars */
3113 
3114 /************************************************************************
3115  * ixgbe_config_gpie
3116  ************************************************************************/
3117 static void
ixgbe_config_gpie(struct adapter * adapter)3118 ixgbe_config_gpie(struct adapter *adapter)
3119 {
3120 	struct ixgbe_hw *hw = &adapter->hw;
3121 	u32             gpie;
3122 
3123 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3124 
3125 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3126 		/* Enable Enhanced MSI-X mode */
3127 		gpie |= IXGBE_GPIE_MSIX_MODE
3128 		     |  IXGBE_GPIE_EIAME
3129 		     |  IXGBE_GPIE_PBA_SUPPORT
3130 		     |  IXGBE_GPIE_OCD;
3131 	}
3132 
3133 	/* Fan Failure Interrupt */
3134 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3135 		gpie |= IXGBE_SDP1_GPIEN;
3136 
3137 	/* Thermal Sensor Interrupt */
3138 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3139 		gpie |= IXGBE_SDP0_GPIEN_X540;
3140 
3141 	/* Link detection */
3142 	switch (hw->mac.type) {
3143 	case ixgbe_mac_82599EB:
3144 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3145 		break;
3146 	case ixgbe_mac_X550EM_x:
3147 	case ixgbe_mac_X550EM_a:
3148 		gpie |= IXGBE_SDP0_GPIEN_X540;
3149 		break;
3150 	default:
3151 		break;
3152 	}
3153 
3154 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3155 
3156 } /* ixgbe_config_gpie */
3157 
3158 /************************************************************************
3159  * ixgbe_config_delay_values
3160  *
3161  *   Requires adapter->max_frame_size to be set.
3162  ************************************************************************/
3163 static void
ixgbe_config_delay_values(struct adapter * adapter)3164 ixgbe_config_delay_values(struct adapter *adapter)
3165 {
3166 	struct ixgbe_hw *hw = &adapter->hw;
3167 	u32             rxpb, frame, size, tmp;
3168 
3169 	frame = adapter->max_frame_size;
3170 
3171 	/* Calculate High Water */
3172 	switch (hw->mac.type) {
3173 	case ixgbe_mac_X540:
3174 	case ixgbe_mac_X550:
3175 	case ixgbe_mac_X550EM_x:
3176 	case ixgbe_mac_X550EM_a:
3177 		tmp = IXGBE_DV_X540(frame, frame);
3178 		break;
3179 	default:
3180 		tmp = IXGBE_DV(frame, frame);
3181 		break;
3182 	}
3183 	size = IXGBE_BT2KB(tmp);
3184 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3185 	hw->fc.high_water[0] = rxpb - size;
3186 
3187 	/* Now calculate Low Water */
3188 	switch (hw->mac.type) {
3189 	case ixgbe_mac_X540:
3190 	case ixgbe_mac_X550:
3191 	case ixgbe_mac_X550EM_x:
3192 	case ixgbe_mac_X550EM_a:
3193 		tmp = IXGBE_LOW_DV_X540(frame);
3194 		break;
3195 	default:
3196 		tmp = IXGBE_LOW_DV(frame);
3197 		break;
3198 	}
3199 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3200 
3201 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3202 	hw->fc.send_xon = TRUE;
3203 } /* ixgbe_config_delay_values */
3204 
3205 /************************************************************************
3206  * ixgbe_set_multi - Multicast Update
3207  *
3208  *   Called whenever multicast address list is updated.
3209  ************************************************************************/
3210 static int
ixgbe_mc_filter_apply(void * arg,struct ifmultiaddr * ifma,int count)3211 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3212 {
3213 	struct adapter *adapter = arg;
3214 	struct ixgbe_mc_addr *mta = adapter->mta;
3215 
3216 	if (ifma->ifma_addr->sa_family != AF_LINK)
3217 		return (0);
3218 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3219 		return (0);
3220 	bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3221 	    mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3222 	mta[count].vmdq = adapter->pool;
3223 
3224 	return (1);
3225 } /* ixgbe_mc_filter_apply */
3226 
3227 static void
ixgbe_if_multi_set(if_ctx_t ctx)3228 ixgbe_if_multi_set(if_ctx_t ctx)
3229 {
3230 	struct adapter       *adapter = iflib_get_softc(ctx);
3231 	struct ixgbe_mc_addr *mta;
3232 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3233 	u8                   *update_ptr;
3234 	int                  mcnt = 0;
3235 	u32                  fctrl;
3236 
3237 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3238 
3239 	mta = adapter->mta;
3240 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3241 
3242 	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3243 
3244 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3245 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3246 	if (ifp->if_flags & IFF_PROMISC)
3247 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3248 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3249 	    ifp->if_flags & IFF_ALLMULTI) {
3250 		fctrl |= IXGBE_FCTRL_MPE;
3251 		fctrl &= ~IXGBE_FCTRL_UPE;
3252 	} else
3253 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3254 
3255 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3256 
3257 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3258 		update_ptr = (u8 *)mta;
3259 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3260 		    ixgbe_mc_array_itr, TRUE);
3261 	}
3262 
3263 } /* ixgbe_if_multi_set */
3264 
3265 /************************************************************************
3266  * ixgbe_mc_array_itr
3267  *
3268  *   An iterator function needed by the multicast shared code.
3269  *   It feeds the shared code routine the addresses in the
3270  *   array of ixgbe_set_multi() one by one.
3271  ************************************************************************/
3272 static u8 *
ixgbe_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)3273 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3274 {
3275 	struct ixgbe_mc_addr *mta;
3276 
3277 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3278 	*vmdq = mta->vmdq;
3279 
3280 	*update_ptr = (u8*)(mta + 1);
3281 
3282 	return (mta->addr);
3283 } /* ixgbe_mc_array_itr */
3284 
3285 /************************************************************************
3286  * ixgbe_local_timer - Timer routine
3287  *
3288  *   Checks for link status, updates statistics,
3289  *   and runs the watchdog check.
3290  ************************************************************************/
3291 static void
ixgbe_if_timer(if_ctx_t ctx,uint16_t qid)3292 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3293 {
3294 	struct adapter *adapter = iflib_get_softc(ctx);
3295 
3296 	if (qid != 0)
3297 		return;
3298 
3299 	/* Check for pluggable optics */
3300 	if (adapter->sfp_probe)
3301 		if (!ixgbe_sfp_probe(ctx))
3302 			return; /* Nothing to do */
3303 
3304 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3305 	    &adapter->link_up, 0);
3306 
3307 	/* Fire off the adminq task */
3308 	iflib_admin_intr_deferred(ctx);
3309 
3310 } /* ixgbe_if_timer */
3311 
3312 /************************************************************************
3313  * ixgbe_sfp_probe
3314  *
3315  *   Determine if a port had optics inserted.
3316  ************************************************************************/
3317 static bool
ixgbe_sfp_probe(if_ctx_t ctx)3318 ixgbe_sfp_probe(if_ctx_t ctx)
3319 {
3320 	struct adapter  *adapter = iflib_get_softc(ctx);
3321 	struct ixgbe_hw *hw = &adapter->hw;
3322 	device_t        dev = iflib_get_dev(ctx);
3323 	bool            result = FALSE;
3324 
3325 	if ((hw->phy.type == ixgbe_phy_nl) &&
3326 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3327 		s32 ret = hw->phy.ops.identify_sfp(hw);
3328 		if (ret)
3329 			goto out;
3330 		ret = hw->phy.ops.reset(hw);
3331 		adapter->sfp_probe = FALSE;
3332 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3333 			device_printf(dev, "Unsupported SFP+ module detected!");
3334 			device_printf(dev,
3335 			    "Reload driver with supported module.\n");
3336 			goto out;
3337 		} else
3338 			device_printf(dev, "SFP+ module detected!\n");
3339 		/* We now have supported optics */
3340 		result = TRUE;
3341 	}
3342 out:
3343 
3344 	return (result);
3345 } /* ixgbe_sfp_probe */
3346 
3347 /************************************************************************
3348  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3349  ************************************************************************/
3350 static void
ixgbe_handle_mod(void * context)3351 ixgbe_handle_mod(void *context)
3352 {
3353 	if_ctx_t        ctx = context;
3354 	struct adapter  *adapter = iflib_get_softc(ctx);
3355 	struct ixgbe_hw *hw = &adapter->hw;
3356 	device_t        dev = iflib_get_dev(ctx);
3357 	u32             err, cage_full = 0;
3358 
3359 	if (adapter->hw.need_crosstalk_fix) {
3360 		switch (hw->mac.type) {
3361 		case ixgbe_mac_82599EB:
3362 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3363 			    IXGBE_ESDP_SDP2;
3364 			break;
3365 		case ixgbe_mac_X550EM_x:
3366 		case ixgbe_mac_X550EM_a:
3367 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3368 			    IXGBE_ESDP_SDP0;
3369 			break;
3370 		default:
3371 			break;
3372 		}
3373 
3374 		if (!cage_full)
3375 			goto handle_mod_out;
3376 	}
3377 
3378 	err = hw->phy.ops.identify_sfp(hw);
3379 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3380 		device_printf(dev,
3381 		    "Unsupported SFP+ module type was detected.\n");
3382 		goto handle_mod_out;
3383 	}
3384 
3385 	if (hw->mac.type == ixgbe_mac_82598EB)
3386 		err = hw->phy.ops.reset(hw);
3387 	else
3388 		err = hw->mac.ops.setup_sfp(hw);
3389 
3390 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3391 		device_printf(dev,
3392 		    "Setup failure - unsupported SFP+ module type.\n");
3393 		goto handle_mod_out;
3394 	}
3395 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3396 	return;
3397 
3398 handle_mod_out:
3399 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3400 } /* ixgbe_handle_mod */
3401 
3402 
3403 /************************************************************************
3404  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3405  ************************************************************************/
3406 static void
ixgbe_handle_msf(void * context)3407 ixgbe_handle_msf(void *context)
3408 {
3409 	if_ctx_t        ctx = context;
3410 	struct adapter  *adapter = iflib_get_softc(ctx);
3411 	struct ixgbe_hw *hw = &adapter->hw;
3412 	u32             autoneg;
3413 	bool            negotiate;
3414 
3415 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3416 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3417 
3418 	autoneg = hw->phy.autoneg_advertised;
3419 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3420 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3421 	if (hw->mac.ops.setup_link)
3422 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3423 
3424 	/* Adjust media types shown in ifconfig */
3425 	ifmedia_removeall(adapter->media);
3426 	ixgbe_add_media_types(adapter->ctx);
3427 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3428 } /* ixgbe_handle_msf */
3429 
3430 /************************************************************************
3431  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3432  ************************************************************************/
3433 static void
ixgbe_handle_phy(void * context)3434 ixgbe_handle_phy(void *context)
3435 {
3436 	if_ctx_t        ctx = context;
3437 	struct adapter  *adapter = iflib_get_softc(ctx);
3438 	struct ixgbe_hw *hw = &adapter->hw;
3439 	int             error;
3440 
3441 	error = hw->phy.ops.handle_lasi(hw);
3442 	if (error == IXGBE_ERR_OVERTEMP)
3443 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3444 	else if (error)
3445 		device_printf(adapter->dev,
3446 		    "Error handling LASI interrupt: %d\n", error);
3447 } /* ixgbe_handle_phy */
3448 
3449 /************************************************************************
3450  * ixgbe_if_stop - Stop the hardware
3451  *
3452  *   Disables all traffic on the adapter by issuing a
3453  *   global reset on the MAC and deallocates TX/RX buffers.
3454  ************************************************************************/
3455 static void
ixgbe_if_stop(if_ctx_t ctx)3456 ixgbe_if_stop(if_ctx_t ctx)
3457 {
3458 	struct adapter  *adapter = iflib_get_softc(ctx);
3459 	struct ixgbe_hw *hw = &adapter->hw;
3460 
3461 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3462 
3463 	ixgbe_reset_hw(hw);
3464 	hw->adapter_stopped = FALSE;
3465 	ixgbe_stop_adapter(hw);
3466 	if (hw->mac.type == ixgbe_mac_82599EB)
3467 		ixgbe_stop_mac_link_on_d3_82599(hw);
3468 	/* Turn off the laser - noop with no optics */
3469 	ixgbe_disable_tx_laser(hw);
3470 
3471 	/* Update the stack */
3472 	adapter->link_up = FALSE;
3473 	ixgbe_if_update_admin_status(ctx);
3474 
3475 	/* reprogram the RAR[0] in case user changed it. */
3476 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3477 
3478 	return;
3479 } /* ixgbe_if_stop */
3480 
3481 /************************************************************************
3482  * ixgbe_update_link_status - Update OS on link state
3483  *
3484  * Note: Only updates the OS on the cached link state.
3485  *       The real check of the hardware only happens with
3486  *       a link interrupt.
3487  ************************************************************************/
3488 static void
ixgbe_if_update_admin_status(if_ctx_t ctx)3489 ixgbe_if_update_admin_status(if_ctx_t ctx)
3490 {
3491 	struct adapter *adapter = iflib_get_softc(ctx);
3492 	device_t       dev = iflib_get_dev(ctx);
3493 
3494 	if (adapter->link_up) {
3495 		if (adapter->link_active == FALSE) {
3496 			if (bootverbose)
3497 				device_printf(dev, "Link is up %d Gbps %s \n",
3498 				    ((adapter->link_speed == 128) ? 10 : 1),
3499 				    "Full Duplex");
3500 			adapter->link_active = TRUE;
3501 			/* Update any Flow Control changes */
3502 			ixgbe_fc_enable(&adapter->hw);
3503 			/* Update DMA coalescing config */
3504 			ixgbe_config_dmac(adapter);
3505 			/* should actually be negotiated value */
3506 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3507 
3508 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3509 				ixgbe_ping_all_vfs(adapter);
3510 		}
3511 	} else { /* Link down */
3512 		if (adapter->link_active == TRUE) {
3513 			if (bootverbose)
3514 				device_printf(dev, "Link is Down\n");
3515 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3516 			adapter->link_active = FALSE;
3517 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3518 				ixgbe_ping_all_vfs(adapter);
3519 		}
3520 	}
3521 
3522 	/* Handle task requests from msix_link() */
3523 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3524 		ixgbe_handle_mod(ctx);
3525 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3526 		ixgbe_handle_msf(ctx);
3527 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3528 		ixgbe_handle_mbx(ctx);
3529 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3530 		ixgbe_reinit_fdir(ctx);
3531 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3532 		ixgbe_handle_phy(ctx);
3533 	adapter->task_requests = 0;
3534 
3535 	ixgbe_update_stats_counters(adapter);
3536 } /* ixgbe_if_update_admin_status */
3537 
3538 /************************************************************************
3539  * ixgbe_config_dmac - Configure DMA Coalescing
3540  ************************************************************************/
3541 static void
ixgbe_config_dmac(struct adapter * adapter)3542 ixgbe_config_dmac(struct adapter *adapter)
3543 {
3544 	struct ixgbe_hw          *hw = &adapter->hw;
3545 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3546 
3547 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3548 		return;
3549 
3550 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3551 	    dcfg->link_speed ^ adapter->link_speed) {
3552 		dcfg->watchdog_timer = adapter->dmac;
3553 		dcfg->fcoe_en = FALSE;
3554 		dcfg->link_speed = adapter->link_speed;
3555 		dcfg->num_tcs = 1;
3556 
3557 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3558 		    dcfg->watchdog_timer, dcfg->link_speed);
3559 
3560 		hw->mac.ops.dmac_config(hw);
3561 	}
3562 } /* ixgbe_config_dmac */
3563 
3564 /************************************************************************
3565  * ixgbe_if_enable_intr
3566  ************************************************************************/
3567 void
ixgbe_if_enable_intr(if_ctx_t ctx)3568 ixgbe_if_enable_intr(if_ctx_t ctx)
3569 {
3570 	struct adapter     *adapter = iflib_get_softc(ctx);
3571 	struct ixgbe_hw    *hw = &adapter->hw;
3572 	struct ix_rx_queue *que = adapter->rx_queues;
3573 	u32                mask, fwsm;
3574 
3575 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3576 
3577 	switch (adapter->hw.mac.type) {
3578 	case ixgbe_mac_82599EB:
3579 		mask |= IXGBE_EIMS_ECC;
3580 		/* Temperature sensor on some adapters */
3581 		mask |= IXGBE_EIMS_GPI_SDP0;
3582 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3583 		mask |= IXGBE_EIMS_GPI_SDP1;
3584 		mask |= IXGBE_EIMS_GPI_SDP2;
3585 		break;
3586 	case ixgbe_mac_X540:
3587 		/* Detect if Thermal Sensor is enabled */
3588 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3589 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3590 			mask |= IXGBE_EIMS_TS;
3591 		mask |= IXGBE_EIMS_ECC;
3592 		break;
3593 	case ixgbe_mac_X550:
3594 		/* MAC thermal sensor is automatically enabled */
3595 		mask |= IXGBE_EIMS_TS;
3596 		mask |= IXGBE_EIMS_ECC;
3597 		break;
3598 	case ixgbe_mac_X550EM_x:
3599 	case ixgbe_mac_X550EM_a:
3600 		/* Some devices use SDP0 for important information */
3601 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3602 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3603 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3604 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3605 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3606 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3607 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3608 		mask |= IXGBE_EIMS_ECC;
3609 		break;
3610 	default:
3611 		break;
3612 	}
3613 
3614 	/* Enable Fan Failure detection */
3615 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3616 		mask |= IXGBE_EIMS_GPI_SDP1;
3617 	/* Enable SR-IOV */
3618 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3619 		mask |= IXGBE_EIMS_MAILBOX;
3620 	/* Enable Flow Director */
3621 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3622 		mask |= IXGBE_EIMS_FLOW_DIR;
3623 
3624 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3625 
3626 	/* With MSI-X we use auto clear */
3627 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3628 		mask = IXGBE_EIMS_ENABLE_MASK;
3629 		/* Don't autoclear Link */
3630 		mask &= ~IXGBE_EIMS_OTHER;
3631 		mask &= ~IXGBE_EIMS_LSC;
3632 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3633 			mask &= ~IXGBE_EIMS_MAILBOX;
3634 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3635 	}
3636 
3637 	/*
3638 	 * Now enable all queues, this is done separately to
3639 	 * allow for handling the extended (beyond 32) MSI-X
3640 	 * vectors that can be used by 82599
3641 	 */
3642 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3643 		ixgbe_enable_queue(adapter, que->msix);
3644 
3645 	IXGBE_WRITE_FLUSH(hw);
3646 
3647 } /* ixgbe_if_enable_intr */
3648 
3649 /************************************************************************
3650  * ixgbe_disable_intr
3651  ************************************************************************/
3652 static void
ixgbe_if_disable_intr(if_ctx_t ctx)3653 ixgbe_if_disable_intr(if_ctx_t ctx)
3654 {
3655 	struct adapter *adapter = iflib_get_softc(ctx);
3656 
3657 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3658 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3659 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3660 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3661 	} else {
3662 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3663 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3664 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3665 	}
3666 	IXGBE_WRITE_FLUSH(&adapter->hw);
3667 
3668 } /* ixgbe_if_disable_intr */
3669 
3670 /************************************************************************
3671  * ixgbe_link_intr_enable
3672  ************************************************************************/
3673 static void
ixgbe_link_intr_enable(if_ctx_t ctx)3674 ixgbe_link_intr_enable(if_ctx_t ctx)
3675 {
3676 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3677 
3678 	/* Re-enable other interrupts */
3679 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3680 } /* ixgbe_link_intr_enable */
3681 
3682 /************************************************************************
3683  * ixgbe_if_rx_queue_intr_enable
3684  ************************************************************************/
3685 static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)3686 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3687 {
3688 	struct adapter     *adapter = iflib_get_softc(ctx);
3689 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3690 
3691 	ixgbe_enable_queue(adapter, que->rxr.me);
3692 
3693 	return (0);
3694 } /* ixgbe_if_rx_queue_intr_enable */
3695 
3696 /************************************************************************
3697  * ixgbe_enable_queue
3698  ************************************************************************/
3699 static void
ixgbe_enable_queue(struct adapter * adapter,u32 vector)3700 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3701 {
3702 	struct ixgbe_hw *hw = &adapter->hw;
3703 	u64             queue = (u64)(1 << vector);
3704 	u32             mask;
3705 
3706 	if (hw->mac.type == ixgbe_mac_82598EB) {
3707 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3708 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3709 	} else {
3710 		mask = (queue & 0xFFFFFFFF);
3711 		if (mask)
3712 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3713 		mask = (queue >> 32);
3714 		if (mask)
3715 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3716 	}
3717 } /* ixgbe_enable_queue */
3718 
3719 /************************************************************************
3720  * ixgbe_disable_queue
3721  ************************************************************************/
3722 static void
ixgbe_disable_queue(struct adapter * adapter,u32 vector)3723 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3724 {
3725 	struct ixgbe_hw *hw = &adapter->hw;
3726 	u64             queue = (u64)(1 << vector);
3727 	u32             mask;
3728 
3729 	if (hw->mac.type == ixgbe_mac_82598EB) {
3730 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3731 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3732 	} else {
3733 		mask = (queue & 0xFFFFFFFF);
3734 		if (mask)
3735 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3736 		mask = (queue >> 32);
3737 		if (mask)
3738 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3739 	}
3740 } /* ixgbe_disable_queue */
3741 
3742 /************************************************************************
3743  * ixgbe_intr - Legacy Interrupt Service Routine
3744  ************************************************************************/
3745 int
ixgbe_intr(void * arg)3746 ixgbe_intr(void *arg)
3747 {
3748 	struct adapter     *adapter = arg;
3749 	struct ix_rx_queue *que = adapter->rx_queues;
3750 	struct ixgbe_hw    *hw = &adapter->hw;
3751 	if_ctx_t           ctx = adapter->ctx;
3752 	u32                eicr, eicr_mask;
3753 
3754 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3755 
3756 	++que->irqs;
3757 	if (eicr == 0) {
3758 		ixgbe_if_enable_intr(ctx);
3759 		return (FILTER_HANDLED);
3760 	}
3761 
3762 	/* Check for fan failure */
3763 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3764 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3765 		device_printf(adapter->dev,
3766 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3767 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3768 	}
3769 
3770 	/* Link status change */
3771 	if (eicr & IXGBE_EICR_LSC) {
3772 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3773 		iflib_admin_intr_deferred(ctx);
3774 	}
3775 
3776 	if (ixgbe_is_sfp(hw)) {
3777 		/* Pluggable optics-related interrupt */
3778 		if (hw->mac.type >= ixgbe_mac_X540)
3779 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3780 		else
3781 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3782 
3783 		if (eicr & eicr_mask) {
3784 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3785 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3786 		}
3787 
3788 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3789 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3790 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3791 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3792 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3793 		}
3794 	}
3795 
3796 	/* External PHY interrupt */
3797 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3798 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3799 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3800 
3801 	return (FILTER_SCHEDULE_THREAD);
3802 } /* ixgbe_intr */
3803 
3804 /************************************************************************
3805  * ixgbe_free_pci_resources
3806  ************************************************************************/
3807 static void
ixgbe_free_pci_resources(if_ctx_t ctx)3808 ixgbe_free_pci_resources(if_ctx_t ctx)
3809 {
3810 	struct adapter *adapter = iflib_get_softc(ctx);
3811 	struct         ix_rx_queue *que = adapter->rx_queues;
3812 	device_t       dev = iflib_get_dev(ctx);
3813 
3814 	/* Release all MSI-X queue resources */
3815 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3816 		iflib_irq_free(ctx, &adapter->irq);
3817 
3818 	if (que != NULL) {
3819 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3820 			iflib_irq_free(ctx, &que->que_irq);
3821 		}
3822 	}
3823 
3824 	if (adapter->pci_mem != NULL)
3825 		bus_release_resource(dev, SYS_RES_MEMORY,
3826 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3827 } /* ixgbe_free_pci_resources */
3828 
3829 /************************************************************************
3830  * ixgbe_sysctl_flowcntl
3831  *
3832  *   SYSCTL wrapper around setting Flow Control
3833  ************************************************************************/
3834 static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)3835 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3836 {
3837 	struct adapter *adapter;
3838 	int            error, fc;
3839 
3840 	adapter = (struct adapter *)arg1;
3841 	fc = adapter->hw.fc.current_mode;
3842 
3843 	error = sysctl_handle_int(oidp, &fc, 0, req);
3844 	if ((error) || (req->newptr == NULL))
3845 		return (error);
3846 
3847 	/* Don't bother if it's not changed */
3848 	if (fc == adapter->hw.fc.current_mode)
3849 		return (0);
3850 
3851 	return ixgbe_set_flowcntl(adapter, fc);
3852 } /* ixgbe_sysctl_flowcntl */
3853 
3854 /************************************************************************
3855  * ixgbe_set_flowcntl - Set flow control
3856  *
3857  *   Flow control values:
3858  *     0 - off
3859  *     1 - rx pause
3860  *     2 - tx pause
3861  *     3 - full
3862  ************************************************************************/
3863 static int
ixgbe_set_flowcntl(struct adapter * adapter,int fc)3864 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3865 {
3866 	switch (fc) {
3867 	case ixgbe_fc_rx_pause:
3868 	case ixgbe_fc_tx_pause:
3869 	case ixgbe_fc_full:
3870 		adapter->hw.fc.requested_mode = fc;
3871 		if (adapter->num_rx_queues > 1)
3872 			ixgbe_disable_rx_drop(adapter);
3873 		break;
3874 	case ixgbe_fc_none:
3875 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3876 		if (adapter->num_rx_queues > 1)
3877 			ixgbe_enable_rx_drop(adapter);
3878 		break;
3879 	default:
3880 		return (EINVAL);
3881 	}
3882 
3883 	/* Don't autoneg if forcing a value */
3884 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3885 	ixgbe_fc_enable(&adapter->hw);
3886 
3887 	return (0);
3888 } /* ixgbe_set_flowcntl */
3889 
3890 /************************************************************************
3891  * ixgbe_enable_rx_drop
3892  *
3893  *   Enable the hardware to drop packets when the buffer is
3894  *   full. This is useful with multiqueue, so that no single
3895  *   queue being full stalls the entire RX engine. We only
3896  *   enable this when Multiqueue is enabled AND Flow Control
3897  *   is disabled.
3898  ************************************************************************/
3899 static void
ixgbe_enable_rx_drop(struct adapter * adapter)3900 ixgbe_enable_rx_drop(struct adapter *adapter)
3901 {
3902 	struct ixgbe_hw *hw = &adapter->hw;
3903 	struct rx_ring  *rxr;
3904 	u32             srrctl;
3905 
3906 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3907 		rxr = &adapter->rx_queues[i].rxr;
3908 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3909 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3910 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3911 	}
3912 
3913 	/* enable drop for each vf */
3914 	for (int i = 0; i < adapter->num_vfs; i++) {
3915 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3916 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3917 		                IXGBE_QDE_ENABLE));
3918 	}
3919 } /* ixgbe_enable_rx_drop */
3920 
3921 /************************************************************************
3922  * ixgbe_disable_rx_drop
3923  ************************************************************************/
3924 static void
ixgbe_disable_rx_drop(struct adapter * adapter)3925 ixgbe_disable_rx_drop(struct adapter *adapter)
3926 {
3927 	struct ixgbe_hw *hw = &adapter->hw;
3928 	struct rx_ring  *rxr;
3929 	u32             srrctl;
3930 
3931 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3932 		rxr = &adapter->rx_queues[i].rxr;
3933 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3934 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3935 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3936 	}
3937 
3938 	/* disable drop for each vf */
3939 	for (int i = 0; i < adapter->num_vfs; i++) {
3940 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3941 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3942 	}
3943 } /* ixgbe_disable_rx_drop */
3944 
3945 /************************************************************************
3946  * ixgbe_sysctl_advertise
3947  *
3948  *   SYSCTL wrapper around setting advertised speed
3949  ************************************************************************/
3950 static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)3951 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3952 {
3953 	struct adapter *adapter;
3954 	int            error, advertise;
3955 
3956 	adapter = (struct adapter *)arg1;
3957 	advertise = adapter->advertise;
3958 
3959 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3960 	if ((error) || (req->newptr == NULL))
3961 		return (error);
3962 
3963 	return ixgbe_set_advertise(adapter, advertise);
3964 } /* ixgbe_sysctl_advertise */
3965 
3966 /************************************************************************
3967  * ixgbe_set_advertise - Control advertised link speed
3968  *
3969  *   Flags:
3970  *     0x1 - advertise 100 Mb
3971  *     0x2 - advertise 1G
3972  *     0x4 - advertise 10G
3973  *     0x8 - advertise 10 Mb (yes, Mb)
3974  ************************************************************************/
3975 static int
ixgbe_set_advertise(struct adapter * adapter,int advertise)3976 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3977 {
3978 	device_t         dev = iflib_get_dev(adapter->ctx);
3979 	struct ixgbe_hw  *hw;
3980 	ixgbe_link_speed speed = 0;
3981 	ixgbe_link_speed link_caps = 0;
3982 	s32              err = IXGBE_NOT_IMPLEMENTED;
3983 	bool             negotiate = FALSE;
3984 
3985 	/* Checks to validate new value */
3986 	if (adapter->advertise == advertise) /* no change */
3987 		return (0);
3988 
3989 	hw = &adapter->hw;
3990 
3991 	/* No speed changes for backplane media */
3992 	if (hw->phy.media_type == ixgbe_media_type_backplane)
3993 		return (ENODEV);
3994 
3995 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
3996 	      (hw->phy.multispeed_fiber))) {
3997 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
3998 		return (EINVAL);
3999 	}
4000 
4001 	if (advertise < 0x1 || advertise > 0xF) {
4002 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4003 		return (EINVAL);
4004 	}
4005 
4006 	if (hw->mac.ops.get_link_capabilities) {
4007 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4008 		    &negotiate);
4009 		if (err != IXGBE_SUCCESS) {
4010 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4011 			return (ENODEV);
4012 		}
4013 	}
4014 
4015 	/* Set new value and report new advertised mode */
4016 	if (advertise & 0x1) {
4017 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4018 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4019 			return (EINVAL);
4020 		}
4021 		speed |= IXGBE_LINK_SPEED_100_FULL;
4022 	}
4023 	if (advertise & 0x2) {
4024 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4025 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4026 			return (EINVAL);
4027 		}
4028 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4029 	}
4030 	if (advertise & 0x4) {
4031 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4032 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4033 			return (EINVAL);
4034 		}
4035 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4036 	}
4037 	if (advertise & 0x8) {
4038 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4039 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4040 			return (EINVAL);
4041 		}
4042 		speed |= IXGBE_LINK_SPEED_10_FULL;
4043 	}
4044 
4045 	hw->mac.autotry_restart = TRUE;
4046 	hw->mac.ops.setup_link(hw, speed, TRUE);
4047 	adapter->advertise = advertise;
4048 
4049 	return (0);
4050 } /* ixgbe_set_advertise */
4051 
4052 /************************************************************************
4053  * ixgbe_get_advertise - Get current advertised speed settings
4054  *
4055  *   Formatted for sysctl usage.
4056  *   Flags:
4057  *     0x1 - advertise 100 Mb
4058  *     0x2 - advertise 1G
4059  *     0x4 - advertise 10G
4060  *     0x8 - advertise 10 Mb (yes, Mb)
4061  ************************************************************************/
4062 static int
ixgbe_get_advertise(struct adapter * adapter)4063 ixgbe_get_advertise(struct adapter *adapter)
4064 {
4065 	struct ixgbe_hw  *hw = &adapter->hw;
4066 	int              speed;
4067 	ixgbe_link_speed link_caps = 0;
4068 	s32              err;
4069 	bool             negotiate = FALSE;
4070 
4071 	/*
4072 	 * Advertised speed means nothing unless it's copper or
4073 	 * multi-speed fiber
4074 	 */
4075 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4076 	    !(hw->phy.multispeed_fiber))
4077 		return (0);
4078 
4079 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4080 	if (err != IXGBE_SUCCESS)
4081 		return (0);
4082 
4083 	speed =
4084 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4085 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4086 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4087 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4088 
4089 	return speed;
4090 } /* ixgbe_get_advertise */
4091 
4092 /************************************************************************
4093  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4094  *
4095  *   Control values:
4096  *     0/1 - off / on (use default value of 1000)
4097  *
4098  *     Legal timer values are:
4099  *     50,100,250,500,1000,2000,5000,10000
4100  *
4101  *     Turning off interrupt moderation will also turn this off.
4102  ************************************************************************/
4103 static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)4104 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4105 {
4106 	struct adapter *adapter = (struct adapter *)arg1;
4107 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4108 	int            error;
4109 	u16            newval;
4110 
4111 	newval = adapter->dmac;
4112 	error = sysctl_handle_16(oidp, &newval, 0, req);
4113 	if ((error) || (req->newptr == NULL))
4114 		return (error);
4115 
4116 	switch (newval) {
4117 	case 0:
4118 		/* Disabled */
4119 		adapter->dmac = 0;
4120 		break;
4121 	case 1:
4122 		/* Enable and use default */
4123 		adapter->dmac = 1000;
4124 		break;
4125 	case 50:
4126 	case 100:
4127 	case 250:
4128 	case 500:
4129 	case 1000:
4130 	case 2000:
4131 	case 5000:
4132 	case 10000:
4133 		/* Legal values - allow */
4134 		adapter->dmac = newval;
4135 		break;
4136 	default:
4137 		/* Do nothing, illegal value */
4138 		return (EINVAL);
4139 	}
4140 
4141 	/* Re-initialize hardware if it's already running */
4142 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4143 		ifp->if_init(ifp);
4144 
4145 	return (0);
4146 } /* ixgbe_sysctl_dmac */
4147 
4148 #ifdef IXGBE_DEBUG
4149 /************************************************************************
4150  * ixgbe_sysctl_power_state
4151  *
4152  *   Sysctl to test power states
4153  *   Values:
4154  *     0      - set device to D0
4155  *     3      - set device to D3
4156  *     (none) - get current device power state
4157  ************************************************************************/
4158 static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)4159 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4160 {
4161 	struct adapter *adapter = (struct adapter *)arg1;
4162 	device_t       dev = adapter->dev;
4163 	int            curr_ps, new_ps, error = 0;
4164 
4165 	curr_ps = new_ps = pci_get_powerstate(dev);
4166 
4167 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4168 	if ((error) || (req->newptr == NULL))
4169 		return (error);
4170 
4171 	if (new_ps == curr_ps)
4172 		return (0);
4173 
4174 	if (new_ps == 3 && curr_ps == 0)
4175 		error = DEVICE_SUSPEND(dev);
4176 	else if (new_ps == 0 && curr_ps == 3)
4177 		error = DEVICE_RESUME(dev);
4178 	else
4179 		return (EINVAL);
4180 
4181 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4182 
4183 	return (error);
4184 } /* ixgbe_sysctl_power_state */
4185 #endif
4186 
4187 /************************************************************************
4188  * ixgbe_sysctl_wol_enable
4189  *
4190  *   Sysctl to enable/disable the WoL capability,
4191  *   if supported by the adapter.
4192  *
4193  *   Values:
4194  *     0 - disabled
4195  *     1 - enabled
4196  ************************************************************************/
4197 static int
ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)4198 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4199 {
4200 	struct adapter  *adapter = (struct adapter *)arg1;
4201 	struct ixgbe_hw *hw = &adapter->hw;
4202 	int             new_wol_enabled;
4203 	int             error = 0;
4204 
4205 	new_wol_enabled = hw->wol_enabled;
4206 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4207 	if ((error) || (req->newptr == NULL))
4208 		return (error);
4209 	new_wol_enabled = !!(new_wol_enabled);
4210 	if (new_wol_enabled == hw->wol_enabled)
4211 		return (0);
4212 
4213 	if (new_wol_enabled > 0 && !adapter->wol_support)
4214 		return (ENODEV);
4215 	else
4216 		hw->wol_enabled = new_wol_enabled;
4217 
4218 	return (0);
4219 } /* ixgbe_sysctl_wol_enable */
4220 
4221 /************************************************************************
4222  * ixgbe_sysctl_wufc - Wake Up Filter Control
4223  *
4224  *   Sysctl to enable/disable the types of packets that the
4225  *   adapter will wake up on upon receipt.
4226  *   Flags:
4227  *     0x1  - Link Status Change
4228  *     0x2  - Magic Packet
4229  *     0x4  - Direct Exact
4230  *     0x8  - Directed Multicast
4231  *     0x10 - Broadcast
4232  *     0x20 - ARP/IPv4 Request Packet
4233  *     0x40 - Direct IPv4 Packet
4234  *     0x80 - Direct IPv6 Packet
4235  *
4236  *   Settings not listed above will cause the sysctl to return an error.
4237  ************************************************************************/
4238 static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)4239 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4240 {
4241 	struct adapter *adapter = (struct adapter *)arg1;
4242 	int            error = 0;
4243 	u32            new_wufc;
4244 
4245 	new_wufc = adapter->wufc;
4246 
4247 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4248 	if ((error) || (req->newptr == NULL))
4249 		return (error);
4250 	if (new_wufc == adapter->wufc)
4251 		return (0);
4252 
4253 	if (new_wufc & 0xffffff00)
4254 		return (EINVAL);
4255 
4256 	new_wufc &= 0xff;
4257 	new_wufc |= (0xffffff & adapter->wufc);
4258 	adapter->wufc = new_wufc;
4259 
4260 	return (0);
4261 } /* ixgbe_sysctl_wufc */
4262 
4263 #ifdef IXGBE_DEBUG
4264 /************************************************************************
4265  * ixgbe_sysctl_print_rss_config
4266  ************************************************************************/
4267 static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)4268 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4269 {
4270 	struct adapter  *adapter = (struct adapter *)arg1;
4271 	struct ixgbe_hw *hw = &adapter->hw;
4272 	device_t        dev = adapter->dev;
4273 	struct sbuf     *buf;
4274 	int             error = 0, reta_size;
4275 	u32             reg;
4276 
4277 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4278 	if (!buf) {
4279 		device_printf(dev, "Could not allocate sbuf for output.\n");
4280 		return (ENOMEM);
4281 	}
4282 
4283 	// TODO: use sbufs to make a string to print out
4284 	/* Set multiplier for RETA setup and table size based on MAC */
4285 	switch (adapter->hw.mac.type) {
4286 	case ixgbe_mac_X550:
4287 	case ixgbe_mac_X550EM_x:
4288 	case ixgbe_mac_X550EM_a:
4289 		reta_size = 128;
4290 		break;
4291 	default:
4292 		reta_size = 32;
4293 		break;
4294 	}
4295 
4296 	/* Print out the redirection table */
4297 	sbuf_cat(buf, "\n");
4298 	for (int i = 0; i < reta_size; i++) {
4299 		if (i < 32) {
4300 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4301 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4302 		} else {
4303 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4304 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4305 		}
4306 	}
4307 
4308 	// TODO: print more config
4309 
4310 	error = sbuf_finish(buf);
4311 	if (error)
4312 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4313 
4314 	sbuf_delete(buf);
4315 
4316 	return (0);
4317 } /* ixgbe_sysctl_print_rss_config */
4318 #endif /* IXGBE_DEBUG */
4319 
4320 /************************************************************************
4321  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4322  *
4323  *   For X552/X557-AT devices using an external PHY
4324  ************************************************************************/
4325 static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)4326 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4327 {
4328 	struct adapter  *adapter = (struct adapter *)arg1;
4329 	struct ixgbe_hw *hw = &adapter->hw;
4330 	u16             reg;
4331 
4332 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4333 		device_printf(iflib_get_dev(adapter->ctx),
4334 		    "Device has no supported external thermal sensor.\n");
4335 		return (ENODEV);
4336 	}
4337 
4338 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4339 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4340 		device_printf(iflib_get_dev(adapter->ctx),
4341 		    "Error reading from PHY's current temperature register\n");
4342 		return (EAGAIN);
4343 	}
4344 
4345 	/* Shift temp for output */
4346 	reg = reg >> 8;
4347 
4348 	return (sysctl_handle_16(oidp, NULL, reg, req));
4349 } /* ixgbe_sysctl_phy_temp */
4350 
4351 /************************************************************************
4352  * ixgbe_sysctl_phy_overtemp_occurred
4353  *
4354  *   Reports (directly from the PHY) whether the current PHY
4355  *   temperature is over the overtemp threshold.
4356  ************************************************************************/
4357 static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)4358 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4359 {
4360 	struct adapter  *adapter = (struct adapter *)arg1;
4361 	struct ixgbe_hw *hw = &adapter->hw;
4362 	u16             reg;
4363 
4364 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4365 		device_printf(iflib_get_dev(adapter->ctx),
4366 		    "Device has no supported external thermal sensor.\n");
4367 		return (ENODEV);
4368 	}
4369 
4370 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4371 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4372 		device_printf(iflib_get_dev(adapter->ctx),
4373 		    "Error reading from PHY's temperature status register\n");
4374 		return (EAGAIN);
4375 	}
4376 
4377 	/* Get occurrence bit */
4378 	reg = !!(reg & 0x4000);
4379 
4380 	return (sysctl_handle_16(oidp, 0, reg, req));
4381 } /* ixgbe_sysctl_phy_overtemp_occurred */
4382 
4383 /************************************************************************
4384  * ixgbe_sysctl_eee_state
4385  *
4386  *   Sysctl to set EEE power saving feature
4387  *   Values:
4388  *     0      - disable EEE
4389  *     1      - enable EEE
4390  *     (none) - get current device EEE state
4391  ************************************************************************/
4392 static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)4393 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4394 {
4395 	struct adapter *adapter = (struct adapter *)arg1;
4396 	device_t       dev = adapter->dev;
4397 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4398 	int            curr_eee, new_eee, error = 0;
4399 	s32            retval;
4400 
4401 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4402 
4403 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4404 	if ((error) || (req->newptr == NULL))
4405 		return (error);
4406 
4407 	/* Nothing to do */
4408 	if (new_eee == curr_eee)
4409 		return (0);
4410 
4411 	/* Not supported */
4412 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4413 		return (EINVAL);
4414 
4415 	/* Bounds checking */
4416 	if ((new_eee < 0) || (new_eee > 1))
4417 		return (EINVAL);
4418 
4419 	retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4420 	if (retval) {
4421 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4422 		return (EINVAL);
4423 	}
4424 
4425 	/* Restart auto-neg */
4426 	ifp->if_init(ifp);
4427 
4428 	device_printf(dev, "New EEE state: %d\n", new_eee);
4429 
4430 	/* Cache new value */
4431 	if (new_eee)
4432 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4433 	else
4434 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4435 
4436 	return (error);
4437 } /* ixgbe_sysctl_eee_state */
4438 
4439 /************************************************************************
4440  * ixgbe_init_device_features
4441  ************************************************************************/
4442 static void
ixgbe_init_device_features(struct adapter * adapter)4443 ixgbe_init_device_features(struct adapter *adapter)
4444 {
4445 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4446 	                  | IXGBE_FEATURE_RSS
4447 	                  | IXGBE_FEATURE_MSI
4448 	                  | IXGBE_FEATURE_MSIX
4449 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4450 
4451 	/* Set capabilities first... */
4452 	switch (adapter->hw.mac.type) {
4453 	case ixgbe_mac_82598EB:
4454 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4455 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4456 		break;
4457 	case ixgbe_mac_X540:
4458 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4459 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4460 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4461 		    (adapter->hw.bus.func == 0))
4462 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4463 		break;
4464 	case ixgbe_mac_X550:
4465 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4466 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4467 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4468 		break;
4469 	case ixgbe_mac_X550EM_x:
4470 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4471 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4472 		break;
4473 	case ixgbe_mac_X550EM_a:
4474 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4475 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4476 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4477 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4478 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4479 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4480 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4481 		}
4482 		break;
4483 	case ixgbe_mac_82599EB:
4484 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4485 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4486 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4487 		    (adapter->hw.bus.func == 0))
4488 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4489 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4490 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4491 		break;
4492 	default:
4493 		break;
4494 	}
4495 
4496 	/* Enabled by default... */
4497 	/* Fan failure detection */
4498 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4499 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4500 	/* Netmap */
4501 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4502 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4503 	/* EEE */
4504 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4505 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4506 	/* Thermal Sensor */
4507 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4508 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4509 
4510 	/* Enabled via global sysctl... */
4511 	/* Flow Director */
4512 	if (ixgbe_enable_fdir) {
4513 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4514 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4515 		else
4516 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4517 	}
4518 	/*
4519 	 * Message Signal Interrupts - Extended (MSI-X)
4520 	 * Normal MSI is only enabled if MSI-X calls fail.
4521 	 */
4522 	if (!ixgbe_enable_msix)
4523 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4524 	/* Receive-Side Scaling (RSS) */
4525 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4526 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4527 
4528 	/* Disable features with unmet dependencies... */
4529 	/* No MSI-X */
4530 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4531 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4532 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4533 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4534 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4535 	}
4536 } /* ixgbe_init_device_features */
4537 
4538 /************************************************************************
4539  * ixgbe_check_fan_failure
4540  ************************************************************************/
4541 static void
ixgbe_check_fan_failure(struct adapter * adapter,u32 reg,bool in_interrupt)4542 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4543 {
4544 	u32 mask;
4545 
4546 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4547 	    IXGBE_ESDP_SDP1;
4548 
4549 	if (reg & mask)
4550 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4551 } /* ixgbe_check_fan_failure */
4552