1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_rss.h"
37
38 #include "ixgbe.h"
39 #include "ixgbe_sriov.h"
40 #include "ifdi_if.h"
41
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44
45 /************************************************************************
46 * Driver version
47 ************************************************************************/
48 static const char ixgbe_driver_version[] = "4.0.1-k";
49
50 /************************************************************************
51 * PCI Device ID Table
52 *
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixgbe_strings
55 * Last entry must be all 0s
56 *
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static const pci_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"),
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"),
68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"),
70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"),
73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"),
75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"),
77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, "Intel(R) X520 82599 LS"),
79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"),
81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"),
85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"),
87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"),
91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"),
93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
106 /* required last entry */
107 PVID_END
108 };
109
110 static void *ixgbe_register(device_t);
111 static int ixgbe_if_attach_pre(if_ctx_t);
112 static int ixgbe_if_attach_post(if_ctx_t);
113 static int ixgbe_if_detach(if_ctx_t);
114 static int ixgbe_if_shutdown(if_ctx_t);
115 static int ixgbe_if_suspend(if_ctx_t);
116 static int ixgbe_if_resume(if_ctx_t);
117
118 static void ixgbe_if_stop(if_ctx_t);
119 void ixgbe_if_enable_intr(if_ctx_t);
120 static void ixgbe_if_disable_intr(if_ctx_t);
121 static void ixgbe_link_intr_enable(if_ctx_t);
122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
124 static int ixgbe_if_media_change(if_ctx_t);
125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
128 static void ixgbe_if_multi_set(if_ctx_t);
129 static int ixgbe_if_promisc_set(if_ctx_t, int);
130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
132 static void ixgbe_if_queues_free(if_ctx_t);
133 static void ixgbe_if_timer(if_ctx_t, uint16_t);
134 static void ixgbe_if_update_admin_status(if_ctx_t);
135 static void ixgbe_if_vlan_register(if_ctx_t, u16);
136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
139 int ixgbe_intr(void *);
140
141 /************************************************************************
142 * Function prototypes
143 ************************************************************************/
144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
145
146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
148 static void ixgbe_add_device_sysctls(if_ctx_t);
149 static int ixgbe_allocate_pci_resources(if_ctx_t);
150 static int ixgbe_setup_low_power_mode(if_ctx_t);
151
152 static void ixgbe_config_dmac(struct ixgbe_softc *);
153 static void ixgbe_configure_ivars(struct ixgbe_softc *);
154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
156 static bool ixgbe_sfp_probe(if_ctx_t);
157
158 static void ixgbe_free_pci_resources(if_ctx_t);
159
160 static int ixgbe_msix_link(void *);
161 static int ixgbe_msix_que(void *);
162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
163 static void ixgbe_initialize_receive_units(if_ctx_t);
164 static void ixgbe_initialize_transmit_units(if_ctx_t);
165
166 static int ixgbe_setup_interface(if_ctx_t);
167 static void ixgbe_init_device_features(struct ixgbe_softc *);
168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
170 static void ixgbe_print_fw_version(if_ctx_t);
171 static void ixgbe_add_media_types(if_ctx_t);
172 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
173 static void ixgbe_config_link(if_ctx_t);
174 static void ixgbe_get_slot_info(struct ixgbe_softc *);
175 static void ixgbe_fw_mode_timer(void *);
176 static void ixgbe_check_wol_support(struct ixgbe_softc *);
177 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
178 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
179
180 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
181 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
182 static int ixgbe_set_advertise(struct ixgbe_softc *, int);
183 static int ixgbe_get_default_advertise(struct ixgbe_softc *);
184 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
185 static void ixgbe_config_gpie(struct ixgbe_softc *);
186 static void ixgbe_config_delay_values(struct ixgbe_softc *);
187
188 /* Sysctl handlers */
189 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
191 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
192 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
193 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
195 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
196 #ifdef IXGBE_DEBUG
197 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
198 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
199 #endif
200 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
201 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
202 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
203 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
204 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
205 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
206 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
207
208 /* Deferred interrupt tasklets */
209 static void ixgbe_handle_msf(void *);
210 static void ixgbe_handle_mod(void *);
211 static void ixgbe_handle_phy(void *);
212
213 /************************************************************************
214 * FreeBSD Device Interface Entry Points
215 ************************************************************************/
216 static device_method_t ix_methods[] = {
217 /* Device interface */
218 DEVMETHOD(device_register, ixgbe_register),
219 DEVMETHOD(device_probe, iflib_device_probe),
220 DEVMETHOD(device_attach, iflib_device_attach),
221 DEVMETHOD(device_detach, iflib_device_detach),
222 DEVMETHOD(device_shutdown, iflib_device_shutdown),
223 DEVMETHOD(device_suspend, iflib_device_suspend),
224 DEVMETHOD(device_resume, iflib_device_resume),
225 #ifdef PCI_IOV
226 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
227 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
228 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
229 #endif /* PCI_IOV */
230 DEVMETHOD_END
231 };
232
233 static driver_t ix_driver = {
234 "ix", ix_methods, sizeof(struct ixgbe_softc),
235 };
236
237 DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
238 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
239 MODULE_DEPEND(ix, pci, 1, 1, 1);
240 MODULE_DEPEND(ix, ether, 1, 1, 1);
241 MODULE_DEPEND(ix, iflib, 1, 1, 1);
242
243 static device_method_t ixgbe_if_methods[] = {
244 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
245 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
246 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
247 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
248 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
249 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
250 DEVMETHOD(ifdi_init, ixgbe_if_init),
251 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
252 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
253 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
254 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
255 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
256 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
257 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
259 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
260 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
261 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
262 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
263 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
264 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
265 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
266 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
267 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
268 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
269 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
270 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
271 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
272 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
273 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
274 #ifdef PCI_IOV
275 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
276 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
277 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
278 #endif /* PCI_IOV */
279 DEVMETHOD_END
280 };
281
282 /*
283 * TUNEABLE PARAMETERS:
284 */
285
286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
287 "IXGBE driver parameters");
288 static driver_t ixgbe_if_driver = {
289 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
290 };
291
292 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
293 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
294 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
295
296 /* Flow control setting, default to full */
297 static int ixgbe_flow_control = ixgbe_fc_full;
298 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
299 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
300
301 /* Advertise Speed, default to 0 (auto) */
302 static int ixgbe_advertise_speed = 0;
303 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
304 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
305
306 /*
307 * Smart speed setting, default to on
308 * this only works as a compile option
309 * right now as its during attach, set
310 * this to 'ixgbe_smart_speed_off' to
311 * disable.
312 */
313 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
314
315 /*
316 * MSI-X should be the default for best performance,
317 * but this allows it to be forced off for testing.
318 */
319 static int ixgbe_enable_msix = 1;
320 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
321 "Enable MSI-X interrupts");
322
323 /*
324 * Defining this on will allow the use
325 * of unsupported SFP+ modules, note that
326 * doing so you are on your own :)
327 */
328 static int allow_unsupported_sfp = false;
329 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
330 &allow_unsupported_sfp, 0,
331 "Allow unsupported SFP modules...use at your own risk");
332
333 /*
334 * Not sure if Flow Director is fully baked,
335 * so we'll default to turning it off.
336 */
337 static int ixgbe_enable_fdir = 0;
338 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
339 "Enable Flow Director");
340
341 /* Receive-Side Scaling */
342 static int ixgbe_enable_rss = 1;
343 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
344 "Enable Receive-Side Scaling (RSS)");
345
346 /*
347 * AIM: Adaptive Interrupt Moderation
348 * which means that the interrupt rate
349 * is varied over time based on the
350 * traffic for that interrupt vector
351 */
352 static int ixgbe_enable_aim = false;
353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
354 "Enable adaptive interrupt moderation");
355
356 #if 0
357 /* Keep running tab on them for sanity check */
358 static int ixgbe_total_ports;
359 #endif
360
361 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
362
363 /*
364 * For Flow Director: this is the number of TX packets we sample
365 * for the filter pool, this means every 20th packet will be probed.
366 *
367 * This feature can be disabled by setting this to 0.
368 */
369 static int atr_sample_rate = 20;
370
371 extern struct if_txrx ixgbe_txrx;
372
373 static struct if_shared_ctx ixgbe_sctx_init = {
374 .isc_magic = IFLIB_MAGIC,
375 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
376 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
377 .isc_tx_maxsegsize = PAGE_SIZE,
378 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
379 .isc_tso_maxsegsize = PAGE_SIZE,
380 .isc_rx_maxsize = PAGE_SIZE*4,
381 .isc_rx_nsegments = 1,
382 .isc_rx_maxsegsize = PAGE_SIZE*4,
383 .isc_nfl = 1,
384 .isc_ntxqs = 1,
385 .isc_nrxqs = 1,
386
387 .isc_admin_intrcnt = 1,
388 .isc_vendor_info = ixgbe_vendor_info_array,
389 .isc_driver_version = ixgbe_driver_version,
390 .isc_driver = &ixgbe_if_driver,
391 .isc_flags = IFLIB_TSO_INIT_IP,
392
393 .isc_nrxd_min = {MIN_RXD},
394 .isc_ntxd_min = {MIN_TXD},
395 .isc_nrxd_max = {MAX_RXD},
396 .isc_ntxd_max = {MAX_TXD},
397 .isc_nrxd_default = {DEFAULT_RXD},
398 .isc_ntxd_default = {DEFAULT_TXD},
399 };
400
401 /************************************************************************
402 * ixgbe_if_tx_queues_alloc
403 ************************************************************************/
404 static int
ixgbe_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)405 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
406 int ntxqs, int ntxqsets)
407 {
408 struct ixgbe_softc *sc = iflib_get_softc(ctx);
409 if_softc_ctx_t scctx = sc->shared;
410 struct ix_tx_queue *que;
411 int i, j, error;
412
413 MPASS(sc->num_tx_queues > 0);
414 MPASS(sc->num_tx_queues == ntxqsets);
415 MPASS(ntxqs == 1);
416
417 /* Allocate queue structure memory */
418 sc->tx_queues =
419 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
420 M_IXGBE, M_NOWAIT | M_ZERO);
421 if (!sc->tx_queues) {
422 device_printf(iflib_get_dev(ctx),
423 "Unable to allocate TX ring memory\n");
424 return (ENOMEM);
425 }
426
427 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
428 struct tx_ring *txr = &que->txr;
429
430 /* In case SR-IOV is enabled, align the index properly */
431 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
432 i);
433
434 txr->sc = que->sc = sc;
435
436 /* Allocate report status array */
437 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
438 if (txr->tx_rsq == NULL) {
439 error = ENOMEM;
440 goto fail;
441 }
442 for (j = 0; j < scctx->isc_ntxd[0]; j++)
443 txr->tx_rsq[j] = QIDX_INVALID;
444 /* get the virtual and physical address of the hardware queues */
445 txr->tail = IXGBE_TDT(txr->me);
446 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
447 txr->tx_paddr = paddrs[i];
448
449 txr->bytes = 0;
450 txr->total_packets = 0;
451
452 /* Set the rate at which we sample packets */
453 if (sc->feat_en & IXGBE_FEATURE_FDIR)
454 txr->atr_sample = atr_sample_rate;
455
456 }
457
458 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
459 sc->num_tx_queues);
460
461 return (0);
462
463 fail:
464 ixgbe_if_queues_free(ctx);
465
466 return (error);
467 } /* ixgbe_if_tx_queues_alloc */
468
469 /************************************************************************
470 * ixgbe_if_rx_queues_alloc
471 ************************************************************************/
472 static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)473 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
474 int nrxqs, int nrxqsets)
475 {
476 struct ixgbe_softc *sc = iflib_get_softc(ctx);
477 struct ix_rx_queue *que;
478 int i;
479
480 MPASS(sc->num_rx_queues > 0);
481 MPASS(sc->num_rx_queues == nrxqsets);
482 MPASS(nrxqs == 1);
483
484 /* Allocate queue structure memory */
485 sc->rx_queues =
486 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
487 M_IXGBE, M_NOWAIT | M_ZERO);
488 if (!sc->rx_queues) {
489 device_printf(iflib_get_dev(ctx),
490 "Unable to allocate TX ring memory\n");
491 return (ENOMEM);
492 }
493
494 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
495 struct rx_ring *rxr = &que->rxr;
496
497 /* In case SR-IOV is enabled, align the index properly */
498 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
499 i);
500
501 rxr->sc = que->sc = sc;
502
503 /* get the virtual and physical address of the hw queues */
504 rxr->tail = IXGBE_RDT(rxr->me);
505 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
506 rxr->rx_paddr = paddrs[i];
507 rxr->bytes = 0;
508 rxr->que = que;
509 }
510
511 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
512 sc->num_rx_queues);
513
514 return (0);
515 } /* ixgbe_if_rx_queues_alloc */
516
517 /************************************************************************
518 * ixgbe_if_queues_free
519 ************************************************************************/
520 static void
ixgbe_if_queues_free(if_ctx_t ctx)521 ixgbe_if_queues_free(if_ctx_t ctx)
522 {
523 struct ixgbe_softc *sc = iflib_get_softc(ctx);
524 struct ix_tx_queue *tx_que = sc->tx_queues;
525 struct ix_rx_queue *rx_que = sc->rx_queues;
526 int i;
527
528 if (tx_que != NULL) {
529 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
530 struct tx_ring *txr = &tx_que->txr;
531 if (txr->tx_rsq == NULL)
532 break;
533
534 free(txr->tx_rsq, M_IXGBE);
535 txr->tx_rsq = NULL;
536 }
537
538 free(sc->tx_queues, M_IXGBE);
539 sc->tx_queues = NULL;
540 }
541 if (rx_que != NULL) {
542 free(sc->rx_queues, M_IXGBE);
543 sc->rx_queues = NULL;
544 }
545 } /* ixgbe_if_queues_free */
546
547 /************************************************************************
548 * ixgbe_initialize_rss_mapping
549 ************************************************************************/
550 static void
ixgbe_initialize_rss_mapping(struct ixgbe_softc * sc)551 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
552 {
553 struct ixgbe_hw *hw = &sc->hw;
554 u32 reta = 0, mrqc, rss_key[10];
555 int queue_id, table_size, index_mult;
556 int i, j;
557 u32 rss_hash_config;
558
559 if (sc->feat_en & IXGBE_FEATURE_RSS) {
560 /* Fetch the configured RSS key */
561 rss_getkey((uint8_t *)&rss_key);
562 } else {
563 /* set up random bits */
564 arc4rand(&rss_key, sizeof(rss_key), 0);
565 }
566
567 /* Set multiplier for RETA setup and table size based on MAC */
568 index_mult = 0x1;
569 table_size = 128;
570 switch (sc->hw.mac.type) {
571 case ixgbe_mac_82598EB:
572 index_mult = 0x11;
573 break;
574 case ixgbe_mac_X550:
575 case ixgbe_mac_X550EM_x:
576 case ixgbe_mac_X550EM_a:
577 table_size = 512;
578 break;
579 default:
580 break;
581 }
582
583 /* Set up the redirection table */
584 for (i = 0, j = 0; i < table_size; i++, j++) {
585 if (j == sc->num_rx_queues)
586 j = 0;
587
588 if (sc->feat_en & IXGBE_FEATURE_RSS) {
589 /*
590 * Fetch the RSS bucket id for the given indirection
591 * entry. Cap it at the number of configured buckets
592 * (which is num_rx_queues.)
593 */
594 queue_id = rss_get_indirection_to_bucket(i);
595 queue_id = queue_id % sc->num_rx_queues;
596 } else
597 queue_id = (j * index_mult);
598
599 /*
600 * The low 8 bits are for hash value (n+0);
601 * The next 8 bits are for hash value (n+1), etc.
602 */
603 reta = reta >> 8;
604 reta = reta | (((uint32_t)queue_id) << 24);
605 if ((i & 3) == 3) {
606 if (i < 128)
607 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
608 else
609 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
610 reta);
611 reta = 0;
612 }
613 }
614
615 /* Now fill our hash function seeds */
616 for (i = 0; i < 10; i++)
617 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
618
619 /* Perform hash on these packet types */
620 if (sc->feat_en & IXGBE_FEATURE_RSS)
621 rss_hash_config = rss_gethashconfig();
622 else {
623 /*
624 * Disable UDP - IP fragments aren't currently being handled
625 * and so we end up with a mix of 2-tuple and 4-tuple
626 * traffic.
627 */
628 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
629 | RSS_HASHTYPE_RSS_TCP_IPV4
630 | RSS_HASHTYPE_RSS_IPV6
631 | RSS_HASHTYPE_RSS_TCP_IPV6
632 | RSS_HASHTYPE_RSS_IPV6_EX
633 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
634 }
635
636 mrqc = IXGBE_MRQC_RSSEN;
637 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
638 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
639 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
640 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
641 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
642 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
643 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
645 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
647 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
649 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
650 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
651 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
652 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
653 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
654 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
655 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
656 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
657 } /* ixgbe_initialize_rss_mapping */
658
659 /************************************************************************
660 * ixgbe_initialize_receive_units - Setup receive registers and features.
661 ************************************************************************/
662 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
663
664 static void
ixgbe_initialize_receive_units(if_ctx_t ctx)665 ixgbe_initialize_receive_units(if_ctx_t ctx)
666 {
667 struct ixgbe_softc *sc = iflib_get_softc(ctx);
668 if_softc_ctx_t scctx = sc->shared;
669 struct ixgbe_hw *hw = &sc->hw;
670 if_t ifp = iflib_get_ifp(ctx);
671 struct ix_rx_queue *que;
672 int i, j;
673 u32 bufsz, fctrl, srrctl, rxcsum;
674 u32 hlreg;
675
676 /*
677 * Make sure receives are disabled while
678 * setting up the descriptor ring
679 */
680 ixgbe_disable_rx(hw);
681
682 /* Enable broadcasts */
683 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
684 fctrl |= IXGBE_FCTRL_BAM;
685 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
686 fctrl |= IXGBE_FCTRL_DPF;
687 fctrl |= IXGBE_FCTRL_PMCF;
688 }
689 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
690
691 /* Set for Jumbo Frames? */
692 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
693 if (if_getmtu(ifp) > ETHERMTU)
694 hlreg |= IXGBE_HLREG0_JUMBOEN;
695 else
696 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
697 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
698
699 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
700 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
701
702 /* Setup the Base and Length of the Rx Descriptor Ring */
703 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
704 struct rx_ring *rxr = &que->rxr;
705 u64 rdba = rxr->rx_paddr;
706
707 j = rxr->me;
708
709 /* Setup the Base and Length of the Rx Descriptor Ring */
710 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
711 (rdba & 0x00000000ffffffffULL));
712 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
713 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
714 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
715
716 /* Set up the SRRCTL register */
717 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
718 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
719 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
720 srrctl |= bufsz;
721 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
722
723 /*
724 * Set DROP_EN iff we have no flow control and >1 queue.
725 * Note that srrctl was cleared shortly before during reset,
726 * so we do not need to clear the bit, but do it just in case
727 * this code is moved elsewhere.
728 */
729 if (sc->num_rx_queues > 1 &&
730 sc->hw.fc.requested_mode == ixgbe_fc_none) {
731 srrctl |= IXGBE_SRRCTL_DROP_EN;
732 } else {
733 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
734 }
735
736 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
737
738 /* Setup the HW Rx Head and Tail Descriptor Pointers */
739 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
740 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
741
742 /* Set the driver rx tail address */
743 rxr->tail = IXGBE_RDT(rxr->me);
744 }
745
746 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
747 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
748 | IXGBE_PSRTYPE_UDPHDR
749 | IXGBE_PSRTYPE_IPV4HDR
750 | IXGBE_PSRTYPE_IPV6HDR;
751 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
752 }
753
754 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
755
756 ixgbe_initialize_rss_mapping(sc);
757
758 if (sc->feat_en & IXGBE_FEATURE_RSS) {
759 /* RSS and RX IPP Checksum are mutually exclusive */
760 rxcsum |= IXGBE_RXCSUM_PCSD;
761 }
762
763 if (if_getcapenable(ifp) & IFCAP_RXCSUM)
764 rxcsum |= IXGBE_RXCSUM_PCSD;
765
766 /* This is useful for calculating UDP/IP fragment checksums */
767 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
768 rxcsum |= IXGBE_RXCSUM_IPPCSE;
769
770 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
771
772 } /* ixgbe_initialize_receive_units */
773
774 /************************************************************************
775 * ixgbe_initialize_transmit_units - Enable transmit units.
776 ************************************************************************/
777 static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)778 ixgbe_initialize_transmit_units(if_ctx_t ctx)
779 {
780 struct ixgbe_softc *sc = iflib_get_softc(ctx);
781 struct ixgbe_hw *hw = &sc->hw;
782 if_softc_ctx_t scctx = sc->shared;
783 struct ix_tx_queue *que;
784 int i;
785
786 /* Setup the Base and Length of the Tx Descriptor Ring */
787 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
788 i++, que++) {
789 struct tx_ring *txr = &que->txr;
790 u64 tdba = txr->tx_paddr;
791 u32 txctrl = 0;
792 int j = txr->me;
793
794 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
795 (tdba & 0x00000000ffffffffULL));
796 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
797 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
798 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
799
800 /* Setup the HW Tx Head and Tail descriptor pointers */
801 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
802 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
803
804 /* Cache the tail address */
805 txr->tail = IXGBE_TDT(txr->me);
806
807 txr->tx_rs_cidx = txr->tx_rs_pidx;
808 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
809 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
810 txr->tx_rsq[k] = QIDX_INVALID;
811
812 /* Disable Head Writeback */
813 /*
814 * Note: for X550 series devices, these registers are actually
815 * prefixed with TPH_ isntead of DCA_, but the addresses and
816 * fields remain the same.
817 */
818 switch (hw->mac.type) {
819 case ixgbe_mac_82598EB:
820 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
821 break;
822 default:
823 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
824 break;
825 }
826 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
827 switch (hw->mac.type) {
828 case ixgbe_mac_82598EB:
829 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
830 break;
831 default:
832 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
833 break;
834 }
835
836 }
837
838 if (hw->mac.type != ixgbe_mac_82598EB) {
839 u32 dmatxctl, rttdcs;
840
841 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
842 dmatxctl |= IXGBE_DMATXCTL_TE;
843 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
844 /* Disable arbiter to set MTQC */
845 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
846 rttdcs |= IXGBE_RTTDCS_ARBDIS;
847 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
848 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
849 ixgbe_get_mtqc(sc->iov_mode));
850 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
851 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
852 }
853
854 } /* ixgbe_initialize_transmit_units */
855
856 /************************************************************************
857 * ixgbe_register
858 ************************************************************************/
859 static void *
ixgbe_register(device_t dev)860 ixgbe_register(device_t dev)
861 {
862 return (&ixgbe_sctx_init);
863 } /* ixgbe_register */
864
865 /************************************************************************
866 * ixgbe_if_attach_pre - Device initialization routine, part 1
867 *
868 * Called when the driver is being loaded.
869 * Identifies the type of hardware, initializes the hardware,
870 * and initializes iflib structures.
871 *
872 * return 0 on success, positive on failure
873 ************************************************************************/
874 static int
ixgbe_if_attach_pre(if_ctx_t ctx)875 ixgbe_if_attach_pre(if_ctx_t ctx)
876 {
877 struct ixgbe_softc *sc;
878 device_t dev;
879 if_softc_ctx_t scctx;
880 struct ixgbe_hw *hw;
881 int error = 0;
882 u32 ctrl_ext;
883 size_t i;
884
885 INIT_DEBUGOUT("ixgbe_attach: begin");
886
887 /* Allocate, clear, and link in our adapter structure */
888 dev = iflib_get_dev(ctx);
889 sc = iflib_get_softc(ctx);
890 sc->hw.back = sc;
891 sc->ctx = ctx;
892 sc->dev = dev;
893 scctx = sc->shared = iflib_get_softc_ctx(ctx);
894 sc->media = iflib_get_media(ctx);
895 hw = &sc->hw;
896
897 /* Determine hardware revision */
898 hw->vendor_id = pci_get_vendor(dev);
899 hw->device_id = pci_get_device(dev);
900 hw->revision_id = pci_get_revid(dev);
901 hw->subsystem_vendor_id = pci_get_subvendor(dev);
902 hw->subsystem_device_id = pci_get_subdevice(dev);
903
904 /* Do base PCI setup - map BAR0 */
905 if (ixgbe_allocate_pci_resources(ctx)) {
906 device_printf(dev, "Allocation of PCI resources failed\n");
907 return (ENXIO);
908 }
909
910 /* let hardware know driver is loaded */
911 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
912 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
913 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
914
915 /*
916 * Initialize the shared code
917 */
918 if (ixgbe_init_shared_code(hw) != 0) {
919 device_printf(dev, "Unable to initialize the shared code\n");
920 error = ENXIO;
921 goto err_pci;
922 }
923
924 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
925 device_printf(dev, "Firmware recovery mode detected. Limiting "
926 "functionality.\nRefer to the Intel(R) Ethernet Adapters "
927 "and Devices User Guide for details on firmware recovery "
928 "mode.");
929 error = ENOSYS;
930 goto err_pci;
931 }
932
933 /* 82598 Does not support SR-IOV, initialize everything else */
934 if (hw->mac.type >= ixgbe_mac_82599_vf) {
935 for (i = 0; i < sc->num_vfs; i++)
936 hw->mbx.ops[i].init_params(hw);
937 }
938
939 hw->allow_unsupported_sfp = allow_unsupported_sfp;
940
941 if (hw->mac.type != ixgbe_mac_82598EB)
942 hw->phy.smart_speed = ixgbe_smart_speed;
943
944 ixgbe_init_device_features(sc);
945
946 /* Enable WoL (if supported) */
947 ixgbe_check_wol_support(sc);
948
949 /* Verify adapter fan is still functional (if applicable) */
950 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
951 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
952 ixgbe_check_fan_failure(sc, esdp, false);
953 }
954
955 /* Ensure SW/FW semaphore is free */
956 ixgbe_init_swfw_semaphore(hw);
957
958 /* Set an initial default flow control value */
959 hw->fc.requested_mode = ixgbe_flow_control;
960
961 hw->phy.reset_if_overtemp = true;
962 error = ixgbe_reset_hw(hw);
963 hw->phy.reset_if_overtemp = false;
964 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
965 /*
966 * No optics in this port, set up
967 * so the timer routine will probe
968 * for later insertion.
969 */
970 sc->sfp_probe = true;
971 error = 0;
972 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
973 device_printf(dev, "Unsupported SFP+ module detected!\n");
974 error = EIO;
975 goto err_pci;
976 } else if (error) {
977 device_printf(dev, "Hardware initialization failed\n");
978 error = EIO;
979 goto err_pci;
980 }
981
982 /* Make sure we have a good EEPROM before we read from it */
983 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
984 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
985 error = EIO;
986 goto err_pci;
987 }
988
989 error = ixgbe_start_hw(hw);
990 switch (error) {
991 case IXGBE_ERR_EEPROM_VERSION:
992 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
993 break;
994 case IXGBE_ERR_SFP_NOT_SUPPORTED:
995 device_printf(dev, "Unsupported SFP+ Module\n");
996 error = EIO;
997 goto err_pci;
998 case IXGBE_ERR_SFP_NOT_PRESENT:
999 device_printf(dev, "No SFP+ Module found\n");
1000 /* falls thru */
1001 default:
1002 break;
1003 }
1004
1005 /* Most of the iflib initialization... */
1006
1007 iflib_set_mac(ctx, hw->mac.addr);
1008 switch (sc->hw.mac.type) {
1009 case ixgbe_mac_X550:
1010 case ixgbe_mac_X550EM_x:
1011 case ixgbe_mac_X550EM_a:
1012 scctx->isc_rss_table_size = 512;
1013 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1014 break;
1015 default:
1016 scctx->isc_rss_table_size = 128;
1017 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1018 }
1019
1020 /* Allow legacy interrupts */
1021 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1022
1023 scctx->isc_txqsizes[0] =
1024 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1025 sizeof(u32), DBA_ALIGN),
1026 scctx->isc_rxqsizes[0] =
1027 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1028 DBA_ALIGN);
1029
1030 /* XXX */
1031 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1032 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1033 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1034 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1035 } else {
1036 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1037 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1038 }
1039
1040 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1041
1042 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1043 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1044 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1045
1046 scctx->isc_txrx = &ixgbe_txrx;
1047
1048 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1049
1050 return (0);
1051
1052 err_pci:
1053 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1054 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1055 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1056 ixgbe_free_pci_resources(ctx);
1057
1058 return (error);
1059 } /* ixgbe_if_attach_pre */
1060
1061 /*********************************************************************
1062 * ixgbe_if_attach_post - Device initialization routine, part 2
1063 *
1064 * Called during driver load, but after interrupts and
1065 * resources have been allocated and configured.
1066 * Sets up some data structures not relevant to iflib.
1067 *
1068 * return 0 on success, positive on failure
1069 *********************************************************************/
1070 static int
ixgbe_if_attach_post(if_ctx_t ctx)1071 ixgbe_if_attach_post(if_ctx_t ctx)
1072 {
1073 device_t dev;
1074 struct ixgbe_softc *sc;
1075 struct ixgbe_hw *hw;
1076 int error = 0;
1077
1078 dev = iflib_get_dev(ctx);
1079 sc = iflib_get_softc(ctx);
1080 hw = &sc->hw;
1081
1082
1083 if (sc->intr_type == IFLIB_INTR_LEGACY &&
1084 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1085 device_printf(dev, "Device does not support legacy interrupts");
1086 error = ENXIO;
1087 goto err;
1088 }
1089
1090 /* Allocate multicast array memory. */
1091 sc->mta = malloc(sizeof(*sc->mta) *
1092 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1093 if (sc->mta == NULL) {
1094 device_printf(dev, "Can not allocate multicast setup array\n");
1095 error = ENOMEM;
1096 goto err;
1097 }
1098
1099 /* hw.ix defaults init */
1100 ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1101
1102 /* Enable the optics for 82599 SFP+ fiber */
1103 ixgbe_enable_tx_laser(hw);
1104
1105 /* Enable power to the phy. */
1106 ixgbe_set_phy_power(hw, true);
1107
1108 ixgbe_initialize_iov(sc);
1109
1110 error = ixgbe_setup_interface(ctx);
1111 if (error) {
1112 device_printf(dev, "Interface setup failed: %d\n", error);
1113 goto err;
1114 }
1115
1116 ixgbe_if_update_admin_status(ctx);
1117
1118 /* Initialize statistics */
1119 ixgbe_update_stats_counters(sc);
1120 ixgbe_add_hw_stats(sc);
1121
1122 /* Check PCIE slot type/speed/width */
1123 ixgbe_get_slot_info(sc);
1124
1125 /*
1126 * Do time init and sysctl init here, but
1127 * only on the first port of a bypass sc.
1128 */
1129 ixgbe_bypass_init(sc);
1130
1131 /* Display NVM and Option ROM versions */
1132 ixgbe_print_fw_version(ctx);
1133
1134 /* Set an initial dmac value */
1135 sc->dmac = 0;
1136 /* Set initial advertised speeds (if applicable) */
1137 sc->advertise = ixgbe_get_default_advertise(sc);
1138
1139 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1140 ixgbe_define_iov_schemas(dev, &error);
1141
1142 /* Add sysctls */
1143 ixgbe_add_device_sysctls(ctx);
1144
1145 /* Init recovery mode timer and state variable */
1146 if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1147 sc->recovery_mode = 0;
1148
1149 /* Set up the timer callout */
1150 callout_init(&sc->fw_mode_timer, true);
1151
1152 /* Start the task */
1153 callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
1154 }
1155
1156 return (0);
1157 err:
1158 return (error);
1159 } /* ixgbe_if_attach_post */
1160
1161 /************************************************************************
1162 * ixgbe_check_wol_support
1163 *
1164 * Checks whether the adapter's ports are capable of
1165 * Wake On LAN by reading the adapter's NVM.
1166 *
1167 * Sets each port's hw->wol_enabled value depending
1168 * on the value read here.
1169 ************************************************************************/
1170 static void
ixgbe_check_wol_support(struct ixgbe_softc * sc)1171 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1172 {
1173 struct ixgbe_hw *hw = &sc->hw;
1174 u16 dev_caps = 0;
1175
1176 /* Find out WoL support for port */
1177 sc->wol_support = hw->wol_enabled = 0;
1178 ixgbe_get_device_caps(hw, &dev_caps);
1179 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1180 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1181 hw->bus.func == 0))
1182 sc->wol_support = hw->wol_enabled = 1;
1183
1184 /* Save initial wake up filter configuration */
1185 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1186
1187 return;
1188 } /* ixgbe_check_wol_support */
1189
1190 /************************************************************************
1191 * ixgbe_setup_interface
1192 *
1193 * Setup networking device structure and register an interface.
1194 ************************************************************************/
1195 static int
ixgbe_setup_interface(if_ctx_t ctx)1196 ixgbe_setup_interface(if_ctx_t ctx)
1197 {
1198 if_t ifp = iflib_get_ifp(ctx);
1199 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1200
1201 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1202
1203 if_setbaudrate(ifp, IF_Gbps(10));
1204
1205 sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1206
1207 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1208
1209 ixgbe_add_media_types(ctx);
1210
1211 /* Autoselect media by default */
1212 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1213
1214 return (0);
1215 } /* ixgbe_setup_interface */
1216
1217 /************************************************************************
1218 * ixgbe_if_get_counter
1219 ************************************************************************/
1220 static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx,ift_counter cnt)1221 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1222 {
1223 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1224 if_t ifp = iflib_get_ifp(ctx);
1225
1226 switch (cnt) {
1227 case IFCOUNTER_IPACKETS:
1228 return (sc->ipackets);
1229 case IFCOUNTER_OPACKETS:
1230 return (sc->opackets);
1231 case IFCOUNTER_IBYTES:
1232 return (sc->ibytes);
1233 case IFCOUNTER_OBYTES:
1234 return (sc->obytes);
1235 case IFCOUNTER_IMCASTS:
1236 return (sc->imcasts);
1237 case IFCOUNTER_OMCASTS:
1238 return (sc->omcasts);
1239 case IFCOUNTER_COLLISIONS:
1240 return (0);
1241 case IFCOUNTER_IQDROPS:
1242 return (sc->iqdrops);
1243 case IFCOUNTER_OQDROPS:
1244 return (0);
1245 case IFCOUNTER_IERRORS:
1246 return (sc->ierrors);
1247 default:
1248 return (if_get_counter_default(ifp, cnt));
1249 }
1250 } /* ixgbe_if_get_counter */
1251
1252 /************************************************************************
1253 * ixgbe_if_i2c_req
1254 ************************************************************************/
1255 static int
ixgbe_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1256 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1257 {
1258 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1259 struct ixgbe_hw *hw = &sc->hw;
1260 int i;
1261
1262
1263 if (hw->phy.ops.read_i2c_byte == NULL)
1264 return (ENXIO);
1265 for (i = 0; i < req->len; i++)
1266 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1267 req->dev_addr, &req->data[i]);
1268 return (0);
1269 } /* ixgbe_if_i2c_req */
1270
1271 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1272 * @ctx: iflib context
1273 * @event: event code to check
1274 *
1275 * Defaults to returning false for unknown events.
1276 *
1277 * @returns true if iflib needs to reinit the interface
1278 */
1279 static bool
ixgbe_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1280 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1281 {
1282 switch (event) {
1283 case IFLIB_RESTART_VLAN_CONFIG:
1284 default:
1285 return (false);
1286 }
1287 }
1288
1289 /************************************************************************
1290 * ixgbe_add_media_types
1291 ************************************************************************/
1292 static void
ixgbe_add_media_types(if_ctx_t ctx)1293 ixgbe_add_media_types(if_ctx_t ctx)
1294 {
1295 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1296 struct ixgbe_hw *hw = &sc->hw;
1297 device_t dev = iflib_get_dev(ctx);
1298 u64 layer;
1299
1300 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1301
1302 /* Media types with matching FreeBSD media defines */
1303 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1304 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1305 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1306 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1307 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1308 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1309 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1310 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1311
1312 if (hw->mac.type == ixgbe_mac_X550) {
1313 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1314 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1315 }
1316
1317 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1318 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1319 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1320 NULL);
1321
1322 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1323 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1324 if (hw->phy.multispeed_fiber)
1325 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1326 NULL);
1327 }
1328 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1329 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1330 if (hw->phy.multispeed_fiber)
1331 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1332 NULL);
1333 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1334 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1335 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1336 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1337
1338 #ifdef IFM_ETH_XTYPE
1339 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1340 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1341 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1342 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1343 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1344 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1345 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1346 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1347 #else
1348 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1349 device_printf(dev, "Media supported: 10GbaseKR\n");
1350 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1351 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1352 }
1353 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1354 device_printf(dev, "Media supported: 10GbaseKX4\n");
1355 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1356 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1357 }
1358 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1359 device_printf(dev, "Media supported: 1000baseKX\n");
1360 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1361 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1362 }
1363 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1364 device_printf(dev, "Media supported: 2500baseKX\n");
1365 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1366 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1367 }
1368 #endif
1369 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1370 device_printf(dev, "Media supported: 1000baseBX\n");
1371
1372 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1373 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1374 0, NULL);
1375 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1376 }
1377
1378 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1379 } /* ixgbe_add_media_types */
1380
1381 /************************************************************************
1382 * ixgbe_is_sfp
1383 ************************************************************************/
1384 static inline bool
ixgbe_is_sfp(struct ixgbe_hw * hw)1385 ixgbe_is_sfp(struct ixgbe_hw *hw)
1386 {
1387 switch (hw->mac.type) {
1388 case ixgbe_mac_82598EB:
1389 if (hw->phy.type == ixgbe_phy_nl)
1390 return (true);
1391 return (false);
1392 case ixgbe_mac_82599EB:
1393 switch (hw->mac.ops.get_media_type(hw)) {
1394 case ixgbe_media_type_fiber:
1395 case ixgbe_media_type_fiber_qsfp:
1396 return (true);
1397 default:
1398 return (false);
1399 }
1400 case ixgbe_mac_X550EM_x:
1401 case ixgbe_mac_X550EM_a:
1402 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1403 return (true);
1404 return (false);
1405 default:
1406 return (false);
1407 }
1408 } /* ixgbe_is_sfp */
1409
1410 /************************************************************************
1411 * ixgbe_config_link
1412 ************************************************************************/
1413 static void
ixgbe_config_link(if_ctx_t ctx)1414 ixgbe_config_link(if_ctx_t ctx)
1415 {
1416 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1417 struct ixgbe_hw *hw = &sc->hw;
1418 u32 autoneg, err = 0;
1419 bool sfp, negotiate;
1420
1421 sfp = ixgbe_is_sfp(hw);
1422
1423 if (sfp) {
1424 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1425 iflib_admin_intr_deferred(ctx);
1426 } else {
1427 if (hw->mac.ops.check_link)
1428 err = ixgbe_check_link(hw, &sc->link_speed,
1429 &sc->link_up, false);
1430 if (err)
1431 return;
1432 autoneg = hw->phy.autoneg_advertised;
1433 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1434 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1435 &negotiate);
1436 if (err)
1437 return;
1438
1439 if (hw->mac.type == ixgbe_mac_X550 &&
1440 hw->phy.autoneg_advertised == 0) {
1441 /*
1442 * 2.5G and 5G autonegotiation speeds on X550
1443 * are disabled by default due to reported
1444 * interoperability issues with some switches.
1445 *
1446 * The second condition checks if any operations
1447 * involving setting autonegotiation speeds have
1448 * been performed prior to this ixgbe_config_link()
1449 * call.
1450 *
1451 * If hw->phy.autoneg_advertised does not
1452 * equal 0, this means that the user might have
1453 * set autonegotiation speeds via the sysctl
1454 * before bringing the interface up. In this
1455 * case, we should not disable 2.5G and 5G
1456 * since that speeds might be selected by the
1457 * user.
1458 *
1459 * Otherwise (i.e. if hw->phy.autoneg_advertised
1460 * is set to 0), it is the first time we set
1461 * autonegotiation preferences and the default
1462 * set of speeds should exclude 2.5G and 5G.
1463 */
1464 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1465 IXGBE_LINK_SPEED_5GB_FULL);
1466 }
1467
1468 if (hw->mac.ops.setup_link)
1469 err = hw->mac.ops.setup_link(hw, autoneg,
1470 sc->link_up);
1471 }
1472 } /* ixgbe_config_link */
1473
1474 /************************************************************************
1475 * ixgbe_update_stats_counters - Update board statistics counters.
1476 ************************************************************************/
1477 static void
ixgbe_update_stats_counters(struct ixgbe_softc * sc)1478 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1479 {
1480 struct ixgbe_hw *hw = &sc->hw;
1481 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1482 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1483 u32 lxoffrxc;
1484 u64 total_missed_rx = 0;
1485
1486 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1487 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1488 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1489 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1490 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1491
1492 for (int i = 0; i < 16; i++) {
1493 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1494 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1495 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1496 }
1497 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1498 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1499 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1500
1501 /* Hardware workaround, gprc counts missed packets */
1502 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1503 stats->gprc -= missed_rx;
1504
1505 if (hw->mac.type != ixgbe_mac_82598EB) {
1506 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1507 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1508 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1509 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1510 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1511 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1512 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1513 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1514 stats->lxoffrxc += lxoffrxc;
1515 } else {
1516 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1517 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1518 stats->lxoffrxc += lxoffrxc;
1519 /* 82598 only has a counter in the high register */
1520 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1521 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1522 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1523 }
1524
1525 /*
1526 * For watchdog management we need to know if we have been paused
1527 * during the last interval, so capture that here.
1528 */
1529 if (lxoffrxc)
1530 sc->shared->isc_pause_frames = 1;
1531
1532 /*
1533 * Workaround: mprc hardware is incorrectly counting
1534 * broadcasts, so for now we subtract those.
1535 */
1536 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1537 stats->bprc += bprc;
1538 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1539 if (hw->mac.type == ixgbe_mac_82598EB)
1540 stats->mprc -= bprc;
1541
1542 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1543 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1544 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1545 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1546 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1547 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1548
1549 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1550 stats->lxontxc += lxon;
1551 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1552 stats->lxofftxc += lxoff;
1553 total = lxon + lxoff;
1554
1555 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1556 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1557 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1558 stats->gptc -= total;
1559 stats->mptc -= total;
1560 stats->ptc64 -= total;
1561 stats->gotc -= total * ETHER_MIN_LEN;
1562
1563 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1564 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1565 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1566 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1567 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1568 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1569 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1570 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1571 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1572 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1573 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1574 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1575 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1576 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1577 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1578 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1579 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1580 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1581 /* Only read FCOE on 82599 */
1582 if (hw->mac.type != ixgbe_mac_82598EB) {
1583 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1584 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1585 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1586 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1587 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1588 }
1589
1590 /* Fill out the OS statistics structure */
1591 IXGBE_SET_IPACKETS(sc, stats->gprc);
1592 IXGBE_SET_OPACKETS(sc, stats->gptc);
1593 IXGBE_SET_IBYTES(sc, stats->gorc);
1594 IXGBE_SET_OBYTES(sc, stats->gotc);
1595 IXGBE_SET_IMCASTS(sc, stats->mprc);
1596 IXGBE_SET_OMCASTS(sc, stats->mptc);
1597 IXGBE_SET_COLLISIONS(sc, 0);
1598 IXGBE_SET_IQDROPS(sc, total_missed_rx);
1599
1600 /*
1601 * Aggregate following types of errors as RX errors:
1602 * - CRC error count,
1603 * - illegal byte error count,
1604 * - missed packets count,
1605 * - length error count,
1606 * - undersized packets count,
1607 * - fragmented packets count,
1608 * - oversized packets count,
1609 * - jabber count.
1610 */
1611 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1612 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1613 stats->rjc);
1614 } /* ixgbe_update_stats_counters */
1615
1616 /************************************************************************
1617 * ixgbe_add_hw_stats
1618 *
1619 * Add sysctl variables, one per statistic, to the system.
1620 ************************************************************************/
1621 static void
ixgbe_add_hw_stats(struct ixgbe_softc * sc)1622 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1623 {
1624 device_t dev = iflib_get_dev(sc->ctx);
1625 struct ix_rx_queue *rx_que;
1626 struct ix_tx_queue *tx_que;
1627 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1628 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1629 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1630 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1631 struct sysctl_oid *stat_node, *queue_node;
1632 struct sysctl_oid_list *stat_list, *queue_list;
1633 int i;
1634
1635 #define QUEUE_NAME_LEN 32
1636 char namebuf[QUEUE_NAME_LEN];
1637
1638 /* Driver Statistics */
1639 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1640 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1641 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1642 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1643 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1644 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1645
1646 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
1647 struct tx_ring *txr = &tx_que->txr;
1648 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1649 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1650 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1651 queue_list = SYSCTL_CHILDREN(queue_node);
1652
1653 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1654 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1655 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1656 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1657 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1658 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1659 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1660 CTLFLAG_RD, &txr->tso_tx, "TSO");
1661 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1662 CTLFLAG_RD, &txr->total_packets,
1663 "Queue Packets Transmitted");
1664 }
1665
1666 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
1667 struct rx_ring *rxr = &rx_que->rxr;
1668 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1669 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1670 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1671 queue_list = SYSCTL_CHILDREN(queue_node);
1672
1673 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1674 CTLTYPE_UINT | CTLFLAG_RW,
1675 &sc->rx_queues[i], 0,
1676 ixgbe_sysctl_interrupt_rate_handler, "IU",
1677 "Interrupt Rate");
1678 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1679 CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1680 "irqs on this queue");
1681 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1682 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1683 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1684 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1685 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1686 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1687 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1688 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1689 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1690 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1691 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1692 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1693 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1694 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1695 }
1696
1697 /* MAC stats get their own sub node */
1698
1699 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1700 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1701 stat_list = SYSCTL_CHILDREN(stat_node);
1702
1703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1704 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1705 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1706 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1707 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1708 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1709 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1710 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1712 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1713 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1714 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1716 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1717 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1718 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1719 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1720 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1721
1722 /* Flow Control stats */
1723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1724 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1725 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1726 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1728 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1730 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1731
1732 /* Packet Reception Stats */
1733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1734 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1735 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1736 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1737 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1738 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1740 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1741 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1742 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1743 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1744 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1745 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1746 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1747 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1748 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1749 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1750 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1751 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1752 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1753 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1754 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1755 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1756 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1757 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1758 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1759 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1760 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1761 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1762 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1763 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1764 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1765 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1766 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1767 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1768 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1769 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1770 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1771
1772 /* Packet Transmission Stats */
1773 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1774 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1775 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1776 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1777 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1778 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1779 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1780 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1781 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1782 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1783 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1784 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1785 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1786 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1787 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1788 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1789 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1790 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1791 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1792 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1793 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1794 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1795 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1796 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1797 } /* ixgbe_add_hw_stats */
1798
1799 /************************************************************************
1800 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1801 *
1802 * Retrieves the TDH value from the hardware
1803 ************************************************************************/
1804 static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)1805 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1806 {
1807 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1808 int error;
1809 unsigned int val;
1810
1811 if (!txr)
1812 return (0);
1813
1814
1815 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1816 return (EPERM);
1817
1818 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1819 error = sysctl_handle_int(oidp, &val, 0, req);
1820 if (error || !req->newptr)
1821 return error;
1822
1823 return (0);
1824 } /* ixgbe_sysctl_tdh_handler */
1825
1826 /************************************************************************
1827 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1828 *
1829 * Retrieves the TDT value from the hardware
1830 ************************************************************************/
1831 static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)1832 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1833 {
1834 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1835 int error;
1836 unsigned int val;
1837
1838 if (!txr)
1839 return (0);
1840
1841 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1842 return (EPERM);
1843
1844 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1845 error = sysctl_handle_int(oidp, &val, 0, req);
1846 if (error || !req->newptr)
1847 return error;
1848
1849 return (0);
1850 } /* ixgbe_sysctl_tdt_handler */
1851
1852 /************************************************************************
1853 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1854 *
1855 * Retrieves the RDH value from the hardware
1856 ************************************************************************/
1857 static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)1858 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1859 {
1860 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1861 int error;
1862 unsigned int val;
1863
1864 if (!rxr)
1865 return (0);
1866
1867 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1868 return (EPERM);
1869
1870 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
1871 error = sysctl_handle_int(oidp, &val, 0, req);
1872 if (error || !req->newptr)
1873 return error;
1874
1875 return (0);
1876 } /* ixgbe_sysctl_rdh_handler */
1877
1878 /************************************************************************
1879 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1880 *
1881 * Retrieves the RDT value from the hardware
1882 ************************************************************************/
1883 static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)1884 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1885 {
1886 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1887 int error;
1888 unsigned int val;
1889
1890 if (!rxr)
1891 return (0);
1892
1893 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1894 return (EPERM);
1895
1896 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
1897 error = sysctl_handle_int(oidp, &val, 0, req);
1898 if (error || !req->newptr)
1899 return error;
1900
1901 return (0);
1902 } /* ixgbe_sysctl_rdt_handler */
1903
1904 /************************************************************************
1905 * ixgbe_if_vlan_register
1906 *
1907 * Run via vlan config EVENT, it enables us to use the
1908 * HW Filter table since we can get the vlan id. This
1909 * just creates the entry in the soft version of the
1910 * VFTA, init will repopulate the real table.
1911 ************************************************************************/
1912 static void
ixgbe_if_vlan_register(if_ctx_t ctx,u16 vtag)1913 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1914 {
1915 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1916 u16 index, bit;
1917
1918 index = (vtag >> 5) & 0x7F;
1919 bit = vtag & 0x1F;
1920 sc->shadow_vfta[index] |= (1 << bit);
1921 ++sc->num_vlans;
1922 ixgbe_setup_vlan_hw_support(ctx);
1923 } /* ixgbe_if_vlan_register */
1924
1925 /************************************************************************
1926 * ixgbe_if_vlan_unregister
1927 *
1928 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1929 ************************************************************************/
1930 static void
ixgbe_if_vlan_unregister(if_ctx_t ctx,u16 vtag)1931 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1932 {
1933 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1934 u16 index, bit;
1935
1936 index = (vtag >> 5) & 0x7F;
1937 bit = vtag & 0x1F;
1938 sc->shadow_vfta[index] &= ~(1 << bit);
1939 --sc->num_vlans;
1940 /* Re-init to load the changes */
1941 ixgbe_setup_vlan_hw_support(ctx);
1942 } /* ixgbe_if_vlan_unregister */
1943
1944 /************************************************************************
1945 * ixgbe_setup_vlan_hw_support
1946 ************************************************************************/
1947 static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)1948 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1949 {
1950 if_t ifp = iflib_get_ifp(ctx);
1951 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1952 struct ixgbe_hw *hw = &sc->hw;
1953 struct rx_ring *rxr;
1954 int i;
1955 u32 ctrl;
1956
1957
1958 /*
1959 * We get here thru init_locked, meaning
1960 * a soft reset, this has already cleared
1961 * the VFTA and other state, so if there
1962 * have been no vlan's registered do nothing.
1963 */
1964 if (sc->num_vlans == 0 || (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
1965 /* Clear the vlan hw flag */
1966 for (i = 0; i < sc->num_rx_queues; i++) {
1967 rxr = &sc->rx_queues[i].rxr;
1968 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1969 if (hw->mac.type != ixgbe_mac_82598EB) {
1970 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1971 ctrl &= ~IXGBE_RXDCTL_VME;
1972 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1973 }
1974 rxr->vtag_strip = false;
1975 }
1976 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1977 /* Enable the Filter Table if enabled */
1978 ctrl |= IXGBE_VLNCTRL_CFIEN;
1979 ctrl &= ~IXGBE_VLNCTRL_VFE;
1980 if (hw->mac.type == ixgbe_mac_82598EB)
1981 ctrl &= ~IXGBE_VLNCTRL_VME;
1982 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1983 return;
1984 }
1985
1986 /* Setup the queues for vlans */
1987 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1988 for (i = 0; i < sc->num_rx_queues; i++) {
1989 rxr = &sc->rx_queues[i].rxr;
1990 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1991 if (hw->mac.type != ixgbe_mac_82598EB) {
1992 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1993 ctrl |= IXGBE_RXDCTL_VME;
1994 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1995 }
1996 rxr->vtag_strip = true;
1997 }
1998 }
1999
2000 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
2001 return;
2002 /*
2003 * A soft reset zero's out the VFTA, so
2004 * we need to repopulate it now.
2005 */
2006 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2007 if (sc->shadow_vfta[i] != 0)
2008 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2009 sc->shadow_vfta[i]);
2010
2011 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2012 /* Enable the Filter Table if enabled */
2013 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) {
2014 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2015 ctrl |= IXGBE_VLNCTRL_VFE;
2016 }
2017 if (hw->mac.type == ixgbe_mac_82598EB)
2018 ctrl |= IXGBE_VLNCTRL_VME;
2019 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2020 } /* ixgbe_setup_vlan_hw_support */
2021
2022 /************************************************************************
2023 * ixgbe_get_slot_info
2024 *
2025 * Get the width and transaction speed of
2026 * the slot this adapter is plugged into.
2027 ************************************************************************/
2028 static void
ixgbe_get_slot_info(struct ixgbe_softc * sc)2029 ixgbe_get_slot_info(struct ixgbe_softc *sc)
2030 {
2031 device_t dev = iflib_get_dev(sc->ctx);
2032 struct ixgbe_hw *hw = &sc->hw;
2033 int bus_info_valid = true;
2034 u32 offset;
2035 u16 link;
2036
2037 /* Some devices are behind an internal bridge */
2038 switch (hw->device_id) {
2039 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2040 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2041 goto get_parent_info;
2042 default:
2043 break;
2044 }
2045
2046 ixgbe_get_bus_info(hw);
2047
2048 /*
2049 * Some devices don't use PCI-E, but there is no need
2050 * to display "Unknown" for bus speed and width.
2051 */
2052 switch (hw->mac.type) {
2053 case ixgbe_mac_X550EM_x:
2054 case ixgbe_mac_X550EM_a:
2055 return;
2056 default:
2057 goto display;
2058 }
2059
2060 get_parent_info:
2061 /*
2062 * For the Quad port adapter we need to parse back
2063 * up the PCI tree to find the speed of the expansion
2064 * slot into which this adapter is plugged. A bit more work.
2065 */
2066 dev = device_get_parent(device_get_parent(dev));
2067 #ifdef IXGBE_DEBUG
2068 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2069 pci_get_slot(dev), pci_get_function(dev));
2070 #endif
2071 dev = device_get_parent(device_get_parent(dev));
2072 #ifdef IXGBE_DEBUG
2073 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2074 pci_get_slot(dev), pci_get_function(dev));
2075 #endif
2076 /* Now get the PCI Express Capabilities offset */
2077 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2078 /*
2079 * Hmm...can't get PCI-Express capabilities.
2080 * Falling back to default method.
2081 */
2082 bus_info_valid = false;
2083 ixgbe_get_bus_info(hw);
2084 goto display;
2085 }
2086 /* ...and read the Link Status Register */
2087 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2088 ixgbe_set_pci_config_data_generic(hw, link);
2089
2090 display:
2091 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
2092 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2093 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2094 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2095 "Unknown"),
2096 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2097 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2098 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2099 "Unknown"));
2100
2101 if (bus_info_valid) {
2102 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2103 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2104 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2105 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2106 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2107 }
2108 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2109 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2110 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2111 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2112 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2113 }
2114 } else
2115 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2116
2117 return;
2118 } /* ixgbe_get_slot_info */
2119
2120 /************************************************************************
2121 * ixgbe_if_msix_intr_assign
2122 *
2123 * Setup MSI-X Interrupt resources and handlers
2124 ************************************************************************/
2125 static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx,int msix)2126 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2127 {
2128 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2129 struct ix_rx_queue *rx_que = sc->rx_queues;
2130 struct ix_tx_queue *tx_que;
2131 int error, rid, vector = 0;
2132 char buf[16];
2133
2134 /* Admin Que is vector 0*/
2135 rid = vector + 1;
2136 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2137 rid = vector + 1;
2138
2139 snprintf(buf, sizeof(buf), "rxq%d", i);
2140 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2141 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2142
2143 if (error) {
2144 device_printf(iflib_get_dev(ctx),
2145 "Failed to allocate que int %d err: %d", i, error);
2146 sc->num_rx_queues = i + 1;
2147 goto fail;
2148 }
2149
2150 rx_que->msix = vector;
2151 }
2152 for (int i = 0; i < sc->num_tx_queues; i++) {
2153 snprintf(buf, sizeof(buf), "txq%d", i);
2154 tx_que = &sc->tx_queues[i];
2155 tx_que->msix = i % sc->num_rx_queues;
2156 iflib_softirq_alloc_generic(ctx,
2157 &sc->rx_queues[tx_que->msix].que_irq,
2158 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2159 }
2160 rid = vector + 1;
2161 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2162 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2163 if (error) {
2164 device_printf(iflib_get_dev(ctx),
2165 "Failed to register admin handler");
2166 return (error);
2167 }
2168
2169 sc->vector = vector;
2170
2171 return (0);
2172 fail:
2173 iflib_irq_free(ctx, &sc->irq);
2174 rx_que = sc->rx_queues;
2175 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2176 iflib_irq_free(ctx, &rx_que->que_irq);
2177
2178 return (error);
2179 } /* ixgbe_if_msix_intr_assign */
2180
2181 static inline void
ixgbe_perform_aim(struct ixgbe_softc * sc,struct ix_rx_queue * que)2182 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2183 {
2184 uint32_t newitr = 0;
2185 struct rx_ring *rxr = &que->rxr;
2186 /* FIXME struct tx_ring *txr = ... ->txr; */
2187
2188 /*
2189 * Do Adaptive Interrupt Moderation:
2190 * - Write out last calculated setting
2191 * - Calculate based on average size over
2192 * the last interval.
2193 */
2194 if (que->eitr_setting) {
2195 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2196 que->eitr_setting);
2197 }
2198
2199 que->eitr_setting = 0;
2200 /* Idle, do nothing */
2201 if (rxr->bytes == 0) {
2202 /* FIXME && txr->bytes == 0 */
2203 return;
2204 }
2205
2206 if ((rxr->bytes) && (rxr->packets))
2207 newitr = rxr->bytes / rxr->packets;
2208 /* FIXME for transmit accounting
2209 * if ((txr->bytes) && (txr->packets))
2210 * newitr = txr->bytes/txr->packets;
2211 * if ((rxr->bytes) && (rxr->packets))
2212 * newitr = max(newitr, (rxr->bytes / rxr->packets));
2213 */
2214
2215 newitr += 24; /* account for hardware frame, crc */
2216 /* set an upper boundary */
2217 newitr = min(newitr, 3000);
2218
2219 /* Be nice to the mid range */
2220 if ((newitr > 300) && (newitr < 1200)) {
2221 newitr = (newitr / 3);
2222 } else {
2223 newitr = (newitr / 2);
2224 }
2225
2226 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2227 newitr |= newitr << 16;
2228 } else {
2229 newitr |= IXGBE_EITR_CNT_WDIS;
2230 }
2231
2232 /* save for next interrupt */
2233 que->eitr_setting = newitr;
2234
2235 /* Reset state */
2236 /* FIXME txr->bytes = 0; */
2237 /* FIXME txr->packets = 0; */
2238 rxr->bytes = 0;
2239 rxr->packets = 0;
2240
2241 return;
2242 }
2243
2244 /*********************************************************************
2245 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2246 **********************************************************************/
2247 static int
ixgbe_msix_que(void * arg)2248 ixgbe_msix_que(void *arg)
2249 {
2250 struct ix_rx_queue *que = arg;
2251 struct ixgbe_softc *sc = que->sc;
2252 if_t ifp = iflib_get_ifp(que->sc->ctx);
2253
2254 /* Protect against spurious interrupts */
2255 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2256 return (FILTER_HANDLED);
2257
2258 ixgbe_disable_queue(sc, que->msix);
2259 ++que->irqs;
2260
2261 /* Check for AIM */
2262 if (sc->enable_aim) {
2263 ixgbe_perform_aim(sc, que);
2264 }
2265
2266 return (FILTER_SCHEDULE_THREAD);
2267 } /* ixgbe_msix_que */
2268
2269 /************************************************************************
2270 * ixgbe_media_status - Media Ioctl callback
2271 *
2272 * Called whenever the user queries the status of
2273 * the interface using ifconfig.
2274 ************************************************************************/
2275 static void
ixgbe_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)2276 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2277 {
2278 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2279 struct ixgbe_hw *hw = &sc->hw;
2280 int layer;
2281
2282 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2283
2284 ifmr->ifm_status = IFM_AVALID;
2285 ifmr->ifm_active = IFM_ETHER;
2286
2287 if (!sc->link_active)
2288 return;
2289
2290 ifmr->ifm_status |= IFM_ACTIVE;
2291 layer = sc->phy_layer;
2292
2293 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2294 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2295 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2296 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2297 switch (sc->link_speed) {
2298 case IXGBE_LINK_SPEED_10GB_FULL:
2299 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2300 break;
2301 case IXGBE_LINK_SPEED_1GB_FULL:
2302 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2303 break;
2304 case IXGBE_LINK_SPEED_100_FULL:
2305 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2306 break;
2307 case IXGBE_LINK_SPEED_10_FULL:
2308 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2309 break;
2310 }
2311 if (hw->mac.type == ixgbe_mac_X550)
2312 switch (sc->link_speed) {
2313 case IXGBE_LINK_SPEED_5GB_FULL:
2314 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2315 break;
2316 case IXGBE_LINK_SPEED_2_5GB_FULL:
2317 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2318 break;
2319 }
2320 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2321 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2322 switch (sc->link_speed) {
2323 case IXGBE_LINK_SPEED_10GB_FULL:
2324 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2325 break;
2326 }
2327 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2328 switch (sc->link_speed) {
2329 case IXGBE_LINK_SPEED_10GB_FULL:
2330 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2331 break;
2332 case IXGBE_LINK_SPEED_1GB_FULL:
2333 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2334 break;
2335 }
2336 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2337 switch (sc->link_speed) {
2338 case IXGBE_LINK_SPEED_10GB_FULL:
2339 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2340 break;
2341 case IXGBE_LINK_SPEED_1GB_FULL:
2342 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2343 break;
2344 }
2345 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2346 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2347 switch (sc->link_speed) {
2348 case IXGBE_LINK_SPEED_10GB_FULL:
2349 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2350 break;
2351 case IXGBE_LINK_SPEED_1GB_FULL:
2352 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2353 break;
2354 }
2355 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2356 switch (sc->link_speed) {
2357 case IXGBE_LINK_SPEED_10GB_FULL:
2358 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2359 break;
2360 }
2361 /*
2362 * XXX: These need to use the proper media types once
2363 * they're added.
2364 */
2365 #ifndef IFM_ETH_XTYPE
2366 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2367 switch (sc->link_speed) {
2368 case IXGBE_LINK_SPEED_10GB_FULL:
2369 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2370 break;
2371 case IXGBE_LINK_SPEED_2_5GB_FULL:
2372 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2373 break;
2374 case IXGBE_LINK_SPEED_1GB_FULL:
2375 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2376 break;
2377 }
2378 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2379 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2380 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2381 switch (sc->link_speed) {
2382 case IXGBE_LINK_SPEED_10GB_FULL:
2383 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2384 break;
2385 case IXGBE_LINK_SPEED_2_5GB_FULL:
2386 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2387 break;
2388 case IXGBE_LINK_SPEED_1GB_FULL:
2389 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2390 break;
2391 }
2392 #else
2393 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2394 switch (sc->link_speed) {
2395 case IXGBE_LINK_SPEED_10GB_FULL:
2396 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2397 break;
2398 case IXGBE_LINK_SPEED_2_5GB_FULL:
2399 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2400 break;
2401 case IXGBE_LINK_SPEED_1GB_FULL:
2402 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2403 break;
2404 }
2405 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2406 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2407 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2408 switch (sc->link_speed) {
2409 case IXGBE_LINK_SPEED_10GB_FULL:
2410 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2411 break;
2412 case IXGBE_LINK_SPEED_2_5GB_FULL:
2413 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2414 break;
2415 case IXGBE_LINK_SPEED_1GB_FULL:
2416 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2417 break;
2418 }
2419 #endif
2420
2421 /* If nothing is recognized... */
2422 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2423 ifmr->ifm_active |= IFM_UNKNOWN;
2424
2425 /* Display current flow control setting used on link */
2426 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2427 hw->fc.current_mode == ixgbe_fc_full)
2428 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2429 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2430 hw->fc.current_mode == ixgbe_fc_full)
2431 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2432 } /* ixgbe_media_status */
2433
2434 /************************************************************************
2435 * ixgbe_media_change - Media Ioctl callback
2436 *
2437 * Called when the user changes speed/duplex using
2438 * media/mediopt option with ifconfig.
2439 ************************************************************************/
2440 static int
ixgbe_if_media_change(if_ctx_t ctx)2441 ixgbe_if_media_change(if_ctx_t ctx)
2442 {
2443 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2444 struct ifmedia *ifm = iflib_get_media(ctx);
2445 struct ixgbe_hw *hw = &sc->hw;
2446 ixgbe_link_speed speed = 0;
2447
2448 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2449
2450 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2451 return (EINVAL);
2452
2453 if (hw->phy.media_type == ixgbe_media_type_backplane)
2454 return (EPERM);
2455
2456 /*
2457 * We don't actually need to check against the supported
2458 * media types of the adapter; ifmedia will take care of
2459 * that for us.
2460 */
2461 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2462 case IFM_AUTO:
2463 case IFM_10G_T:
2464 speed |= IXGBE_LINK_SPEED_100_FULL;
2465 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2466 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2467 break;
2468 case IFM_10G_LRM:
2469 case IFM_10G_LR:
2470 #ifndef IFM_ETH_XTYPE
2471 case IFM_10G_SR: /* KR, too */
2472 case IFM_10G_CX4: /* KX4 */
2473 #else
2474 case IFM_10G_KR:
2475 case IFM_10G_KX4:
2476 #endif
2477 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2478 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2479 break;
2480 #ifndef IFM_ETH_XTYPE
2481 case IFM_1000_CX: /* KX */
2482 #else
2483 case IFM_1000_KX:
2484 #endif
2485 case IFM_1000_LX:
2486 case IFM_1000_SX:
2487 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2488 break;
2489 case IFM_1000_T:
2490 speed |= IXGBE_LINK_SPEED_100_FULL;
2491 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2492 break;
2493 case IFM_10G_TWINAX:
2494 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2495 break;
2496 case IFM_5000_T:
2497 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2498 break;
2499 case IFM_2500_T:
2500 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2501 break;
2502 case IFM_100_TX:
2503 speed |= IXGBE_LINK_SPEED_100_FULL;
2504 break;
2505 case IFM_10_T:
2506 speed |= IXGBE_LINK_SPEED_10_FULL;
2507 break;
2508 default:
2509 goto invalid;
2510 }
2511
2512 hw->mac.autotry_restart = true;
2513 hw->mac.ops.setup_link(hw, speed, true);
2514 sc->advertise =
2515 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
2516 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
2517 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2518 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
2519 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
2520 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
2521
2522 return (0);
2523
2524 invalid:
2525 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2526
2527 return (EINVAL);
2528 } /* ixgbe_if_media_change */
2529
2530 /************************************************************************
2531 * ixgbe_set_promisc
2532 ************************************************************************/
2533 static int
ixgbe_if_promisc_set(if_ctx_t ctx,int flags)2534 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2535 {
2536 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2537 if_t ifp = iflib_get_ifp(ctx);
2538 u32 rctl;
2539 int mcnt = 0;
2540
2541 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2542 rctl &= (~IXGBE_FCTRL_UPE);
2543 if (if_getflags(ifp) & IFF_ALLMULTI)
2544 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2545 else {
2546 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2547 }
2548 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2549 rctl &= (~IXGBE_FCTRL_MPE);
2550 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2551
2552 if (if_getflags(ifp) & IFF_PROMISC) {
2553 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2554 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2555 } else if (if_getflags(ifp) & IFF_ALLMULTI) {
2556 rctl |= IXGBE_FCTRL_MPE;
2557 rctl &= ~IXGBE_FCTRL_UPE;
2558 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2559 }
2560 return (0);
2561 } /* ixgbe_if_promisc_set */
2562
2563 /************************************************************************
2564 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2565 ************************************************************************/
2566 static int
ixgbe_msix_link(void * arg)2567 ixgbe_msix_link(void *arg)
2568 {
2569 struct ixgbe_softc *sc = arg;
2570 struct ixgbe_hw *hw = &sc->hw;
2571 u32 eicr, eicr_mask;
2572 s32 retval;
2573
2574 ++sc->link_irq;
2575
2576 /* Pause other interrupts */
2577 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2578
2579 /* First get the cause */
2580 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2581 /* Be sure the queue bits are not cleared */
2582 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2583 /* Clear interrupt with write */
2584 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2585
2586 /* Link status change */
2587 if (eicr & IXGBE_EICR_LSC) {
2588 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2589 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2590 }
2591
2592 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2593 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2594 (eicr & IXGBE_EICR_FLOW_DIR)) {
2595 /* This is probably overkill :) */
2596 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2597 return (FILTER_HANDLED);
2598 /* Disable the interrupt */
2599 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2600 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2601 } else
2602 if (eicr & IXGBE_EICR_ECC) {
2603 device_printf(iflib_get_dev(sc->ctx),
2604 "Received ECC Err, initiating reset\n");
2605 hw->mac.flags |= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2606 ixgbe_reset_hw(hw);
2607 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2608 }
2609
2610 /* Check for over temp condition */
2611 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2612 switch (sc->hw.mac.type) {
2613 case ixgbe_mac_X550EM_a:
2614 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2615 break;
2616 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2617 IXGBE_EICR_GPI_SDP0_X550EM_a);
2618 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2619 IXGBE_EICR_GPI_SDP0_X550EM_a);
2620 retval = hw->phy.ops.check_overtemp(hw);
2621 if (retval != IXGBE_ERR_OVERTEMP)
2622 break;
2623 device_printf(iflib_get_dev(sc->ctx),
2624 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2625 device_printf(iflib_get_dev(sc->ctx),
2626 "System shutdown required!\n");
2627 break;
2628 default:
2629 if (!(eicr & IXGBE_EICR_TS))
2630 break;
2631 retval = hw->phy.ops.check_overtemp(hw);
2632 if (retval != IXGBE_ERR_OVERTEMP)
2633 break;
2634 device_printf(iflib_get_dev(sc->ctx),
2635 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2636 device_printf(iflib_get_dev(sc->ctx),
2637 "System shutdown required!\n");
2638 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2639 break;
2640 }
2641 }
2642
2643 /* Check for VF message */
2644 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2645 (eicr & IXGBE_EICR_MAILBOX))
2646 sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2647 }
2648
2649 if (ixgbe_is_sfp(hw)) {
2650 /* Pluggable optics-related interrupt */
2651 if (hw->mac.type >= ixgbe_mac_X540)
2652 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2653 else
2654 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2655
2656 if (eicr & eicr_mask) {
2657 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2658 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2659 }
2660
2661 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2662 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2663 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2664 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2665 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2666 }
2667 }
2668
2669 /* Check for fan failure */
2670 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2671 ixgbe_check_fan_failure(sc, eicr, true);
2672 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2673 }
2674
2675 /* External PHY interrupt */
2676 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2677 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2678 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2679 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2680 }
2681
2682 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2683 } /* ixgbe_msix_link */
2684
2685 /************************************************************************
2686 * ixgbe_sysctl_interrupt_rate_handler
2687 ************************************************************************/
2688 static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)2689 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2690 {
2691 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2692 int error;
2693 unsigned int reg, usec, rate;
2694
2695 if (atomic_load_acq_int(&que->sc->recovery_mode))
2696 return (EPERM);
2697
2698 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2699 usec = ((reg & 0x0FF8) >> 3);
2700 if (usec > 0)
2701 rate = 500000 / usec;
2702 else
2703 rate = 0;
2704 error = sysctl_handle_int(oidp, &rate, 0, req);
2705 if (error || !req->newptr)
2706 return error;
2707 reg &= ~0xfff; /* default, no limitation */
2708 ixgbe_max_interrupt_rate = 0;
2709 if (rate > 0 && rate < 500000) {
2710 if (rate < 1000)
2711 rate = 1000;
2712 ixgbe_max_interrupt_rate = rate;
2713 reg |= ((4000000/rate) & 0xff8);
2714 }
2715 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2716
2717 return (0);
2718 } /* ixgbe_sysctl_interrupt_rate_handler */
2719
2720 /************************************************************************
2721 * ixgbe_add_device_sysctls
2722 ************************************************************************/
2723 static void
ixgbe_add_device_sysctls(if_ctx_t ctx)2724 ixgbe_add_device_sysctls(if_ctx_t ctx)
2725 {
2726 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2727 device_t dev = iflib_get_dev(ctx);
2728 struct ixgbe_hw *hw = &sc->hw;
2729 struct sysctl_oid_list *child;
2730 struct sysctl_ctx_list *ctx_list;
2731
2732 ctx_list = device_get_sysctl_ctx(dev);
2733 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2734
2735 /* Sysctls for all devices */
2736 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2737 CTLTYPE_INT | CTLFLAG_RW,
2738 sc, 0, ixgbe_sysctl_flowcntl, "I",
2739 IXGBE_SYSCTL_DESC_SET_FC);
2740
2741 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2742 CTLTYPE_INT | CTLFLAG_RW,
2743 sc, 0, ixgbe_sysctl_advertise, "I",
2744 IXGBE_SYSCTL_DESC_ADV_SPEED);
2745
2746 sc->enable_aim = ixgbe_enable_aim;
2747 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2748 &sc->enable_aim, 0, "Interrupt Moderation");
2749
2750 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2751 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2752 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2753
2754 #ifdef IXGBE_DEBUG
2755 /* testing sysctls (for all devices) */
2756 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2757 CTLTYPE_INT | CTLFLAG_RW,
2758 sc, 0, ixgbe_sysctl_power_state,
2759 "I", "PCI Power State");
2760
2761 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2762 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2763 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2764 #endif
2765 /* for X550 series devices */
2766 if (hw->mac.type >= ixgbe_mac_X550)
2767 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2768 CTLTYPE_U16 | CTLFLAG_RW,
2769 sc, 0, ixgbe_sysctl_dmac,
2770 "I", "DMA Coalesce");
2771
2772 /* for WoL-capable devices */
2773 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2774 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2775 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2776 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2777
2778 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2779 CTLTYPE_U32 | CTLFLAG_RW,
2780 sc, 0, ixgbe_sysctl_wufc,
2781 "I", "Enable/Disable Wake Up Filters");
2782 }
2783
2784 /* for X552/X557-AT devices */
2785 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2786 struct sysctl_oid *phy_node;
2787 struct sysctl_oid_list *phy_list;
2788
2789 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2790 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2791 phy_list = SYSCTL_CHILDREN(phy_node);
2792
2793 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2794 CTLTYPE_U16 | CTLFLAG_RD,
2795 sc, 0, ixgbe_sysctl_phy_temp,
2796 "I", "Current External PHY Temperature (Celsius)");
2797
2798 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2799 "overtemp_occurred",
2800 CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
2801 ixgbe_sysctl_phy_overtemp_occurred, "I",
2802 "External PHY High Temperature Event Occurred");
2803 }
2804
2805 if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2806 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2807 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2808 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2809 }
2810 } /* ixgbe_add_device_sysctls */
2811
2812 /************************************************************************
2813 * ixgbe_allocate_pci_resources
2814 ************************************************************************/
2815 static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)2816 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2817 {
2818 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2819 device_t dev = iflib_get_dev(ctx);
2820 int rid;
2821
2822 rid = PCIR_BAR(0);
2823 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2824 RF_ACTIVE);
2825
2826 if (!(sc->pci_mem)) {
2827 device_printf(dev, "Unable to allocate bus resource: memory\n");
2828 return (ENXIO);
2829 }
2830
2831 /* Save bus_space values for READ/WRITE_REG macros */
2832 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
2833 sc->osdep.mem_bus_space_handle =
2834 rman_get_bushandle(sc->pci_mem);
2835 /* Set hw values for shared code */
2836 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2837
2838 return (0);
2839 } /* ixgbe_allocate_pci_resources */
2840
2841 /************************************************************************
2842 * ixgbe_detach - Device removal routine
2843 *
2844 * Called when the driver is being removed.
2845 * Stops the adapter and deallocates all the resources
2846 * that were allocated for driver operation.
2847 *
2848 * return 0 on success, positive on failure
2849 ************************************************************************/
2850 static int
ixgbe_if_detach(if_ctx_t ctx)2851 ixgbe_if_detach(if_ctx_t ctx)
2852 {
2853 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2854 device_t dev = iflib_get_dev(ctx);
2855 u32 ctrl_ext;
2856
2857 INIT_DEBUGOUT("ixgbe_detach: begin");
2858
2859 if (ixgbe_pci_iov_detach(dev) != 0) {
2860 device_printf(dev, "SR-IOV in use; detach first.\n");
2861 return (EBUSY);
2862 }
2863
2864 ixgbe_setup_low_power_mode(ctx);
2865
2866 /* let hardware know driver is unloading */
2867 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
2868 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2869 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
2870
2871 callout_drain(&sc->fw_mode_timer);
2872
2873 ixgbe_free_pci_resources(ctx);
2874 free(sc->mta, M_IXGBE);
2875
2876 return (0);
2877 } /* ixgbe_if_detach */
2878
2879 /************************************************************************
2880 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2881 *
2882 * Prepare the adapter/port for LPLU and/or WoL
2883 ************************************************************************/
2884 static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)2885 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2886 {
2887 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2888 struct ixgbe_hw *hw = &sc->hw;
2889 device_t dev = iflib_get_dev(ctx);
2890 s32 error = 0;
2891
2892 if (!hw->wol_enabled)
2893 ixgbe_set_phy_power(hw, false);
2894
2895 /* Limit power management flow to X550EM baseT */
2896 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2897 hw->phy.ops.enter_lplu) {
2898 /* Turn off support for APM wakeup. (Using ACPI instead) */
2899 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
2900 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
2901
2902 /*
2903 * Clear Wake Up Status register to prevent any previous wakeup
2904 * events from waking us up immediately after we suspend.
2905 */
2906 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2907
2908 /*
2909 * Program the Wakeup Filter Control register with user filter
2910 * settings
2911 */
2912 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
2913
2914 /* Enable wakeups and power management in Wakeup Control */
2915 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2916 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2917
2918 /* X550EM baseT adapters need a special LPLU flow */
2919 hw->phy.reset_disable = true;
2920 ixgbe_if_stop(ctx);
2921 error = hw->phy.ops.enter_lplu(hw);
2922 if (error)
2923 device_printf(dev, "Error entering LPLU: %d\n", error);
2924 hw->phy.reset_disable = false;
2925 } else {
2926 /* Just stop for other adapters */
2927 ixgbe_if_stop(ctx);
2928 }
2929
2930 return error;
2931 } /* ixgbe_setup_low_power_mode */
2932
2933 /************************************************************************
2934 * ixgbe_shutdown - Shutdown entry point
2935 ************************************************************************/
2936 static int
ixgbe_if_shutdown(if_ctx_t ctx)2937 ixgbe_if_shutdown(if_ctx_t ctx)
2938 {
2939 int error = 0;
2940
2941 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2942
2943 error = ixgbe_setup_low_power_mode(ctx);
2944
2945 return (error);
2946 } /* ixgbe_if_shutdown */
2947
2948 /************************************************************************
2949 * ixgbe_suspend
2950 *
2951 * From D0 to D3
2952 ************************************************************************/
2953 static int
ixgbe_if_suspend(if_ctx_t ctx)2954 ixgbe_if_suspend(if_ctx_t ctx)
2955 {
2956 int error = 0;
2957
2958 INIT_DEBUGOUT("ixgbe_suspend: begin");
2959
2960 error = ixgbe_setup_low_power_mode(ctx);
2961
2962 return (error);
2963 } /* ixgbe_if_suspend */
2964
2965 /************************************************************************
2966 * ixgbe_resume
2967 *
2968 * From D3 to D0
2969 ************************************************************************/
2970 static int
ixgbe_if_resume(if_ctx_t ctx)2971 ixgbe_if_resume(if_ctx_t ctx)
2972 {
2973 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2974 device_t dev = iflib_get_dev(ctx);
2975 if_t ifp = iflib_get_ifp(ctx);
2976 struct ixgbe_hw *hw = &sc->hw;
2977 u32 wus;
2978
2979 INIT_DEBUGOUT("ixgbe_resume: begin");
2980
2981 /* Read & clear WUS register */
2982 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2983 if (wus)
2984 device_printf(dev, "Woken up by (WUS): %#010x\n",
2985 IXGBE_READ_REG(hw, IXGBE_WUS));
2986 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2987 /* And clear WUFC until next low-power transition */
2988 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2989
2990 /*
2991 * Required after D3->D0 transition;
2992 * will re-advertise all previous advertised speeds
2993 */
2994 if (if_getflags(ifp) & IFF_UP)
2995 ixgbe_if_init(ctx);
2996
2997 return (0);
2998 } /* ixgbe_if_resume */
2999
3000 /************************************************************************
3001 * ixgbe_if_mtu_set - Ioctl mtu entry point
3002 *
3003 * Return 0 on success, EINVAL on failure
3004 ************************************************************************/
3005 static int
ixgbe_if_mtu_set(if_ctx_t ctx,uint32_t mtu)3006 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
3007 {
3008 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3009 int error = 0;
3010
3011 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
3012
3013 if (mtu > IXGBE_MAX_MTU) {
3014 error = EINVAL;
3015 } else {
3016 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
3017 }
3018
3019 return error;
3020 } /* ixgbe_if_mtu_set */
3021
3022 /************************************************************************
3023 * ixgbe_if_crcstrip_set
3024 ************************************************************************/
3025 static void
ixgbe_if_crcstrip_set(if_ctx_t ctx,int onoff,int crcstrip)3026 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
3027 {
3028 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3029 struct ixgbe_hw *hw = &sc->hw;
3030 /* crc stripping is set in two places:
3031 * IXGBE_HLREG0 (modified on init_locked and hw reset)
3032 * IXGBE_RDRXCTL (set by the original driver in
3033 * ixgbe_setup_hw_rsc() called in init_locked.
3034 * We disable the setting when netmap is compiled in).
3035 * We update the values here, but also in ixgbe.c because
3036 * init_locked sometimes is called outside our control.
3037 */
3038 uint32_t hl, rxc;
3039
3040 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3041 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3042 #ifdef NETMAP
3043 if (netmap_verbose)
3044 D("%s read HLREG 0x%x rxc 0x%x",
3045 onoff ? "enter" : "exit", hl, rxc);
3046 #endif
3047 /* hw requirements ... */
3048 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3049 rxc |= IXGBE_RDRXCTL_RSCACKC;
3050 if (onoff && !crcstrip) {
3051 /* keep the crc. Fast rx */
3052 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
3053 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
3054 } else {
3055 /* reset default mode */
3056 hl |= IXGBE_HLREG0_RXCRCSTRP;
3057 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
3058 }
3059 #ifdef NETMAP
3060 if (netmap_verbose)
3061 D("%s write HLREG 0x%x rxc 0x%x",
3062 onoff ? "enter" : "exit", hl, rxc);
3063 #endif
3064 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3065 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3066 } /* ixgbe_if_crcstrip_set */
3067
3068 /*********************************************************************
3069 * ixgbe_if_init - Init entry point
3070 *
3071 * Used in two ways: It is used by the stack as an init
3072 * entry point in network interface structure. It is also
3073 * used by the driver as a hw/sw initialization routine to
3074 * get to a consistent state.
3075 *
3076 * Return 0 on success, positive on failure
3077 **********************************************************************/
3078 void
ixgbe_if_init(if_ctx_t ctx)3079 ixgbe_if_init(if_ctx_t ctx)
3080 {
3081 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3082 if_t ifp = iflib_get_ifp(ctx);
3083 device_t dev = iflib_get_dev(ctx);
3084 struct ixgbe_hw *hw = &sc->hw;
3085 struct ix_rx_queue *rx_que;
3086 struct ix_tx_queue *tx_que;
3087 u32 txdctl, mhadd;
3088 u32 rxdctl, rxctrl;
3089 u32 ctrl_ext;
3090
3091 int i, j, err;
3092
3093 INIT_DEBUGOUT("ixgbe_if_init: begin");
3094
3095 /* Queue indices may change with IOV mode */
3096 ixgbe_align_all_queue_indices(sc);
3097
3098 /* reprogram the RAR[0] in case user changed it. */
3099 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3100
3101 /* Get the latest mac address, User can use a LAA */
3102 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3103 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3104 hw->addr_ctrl.rar_used_count = 1;
3105
3106 ixgbe_init_hw(hw);
3107
3108 ixgbe_initialize_iov(sc);
3109
3110 ixgbe_initialize_transmit_units(ctx);
3111
3112 /* Setup Multicast table */
3113 ixgbe_if_multi_set(ctx);
3114
3115 /* Determine the correct mbuf pool, based on frame size */
3116 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3117
3118 /* Configure RX settings */
3119 ixgbe_initialize_receive_units(ctx);
3120
3121 /*
3122 * Initialize variable holding task enqueue requests
3123 * from MSI-X interrupts
3124 */
3125 sc->task_requests = 0;
3126
3127 /* Enable SDP & MSI-X interrupts based on adapter */
3128 ixgbe_config_gpie(sc);
3129
3130 /* Set MTU size */
3131 if (if_getmtu(ifp) > ETHERMTU) {
3132 /* aka IXGBE_MAXFRS on 82599 and newer */
3133 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3134 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3135 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3136 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3137 }
3138
3139 /* Now enable all the queues */
3140 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
3141 struct tx_ring *txr = &tx_que->txr;
3142
3143 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3144 txdctl |= IXGBE_TXDCTL_ENABLE;
3145 /* Set WTHRESH to 8, burst writeback */
3146 txdctl |= (8 << 16);
3147 /*
3148 * When the internal queue falls below PTHRESH (32),
3149 * start prefetching as long as there are at least
3150 * HTHRESH (1) buffers ready. The values are taken
3151 * from the Intel linux driver 3.8.21.
3152 * Prefetching enables tx line rate even with 1 queue.
3153 */
3154 txdctl |= (32 << 0) | (1 << 8);
3155 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3156 }
3157
3158 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
3159 struct rx_ring *rxr = &rx_que->rxr;
3160
3161 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3162 if (hw->mac.type == ixgbe_mac_82598EB) {
3163 /*
3164 * PTHRESH = 21
3165 * HTHRESH = 4
3166 * WTHRESH = 8
3167 */
3168 rxdctl &= ~0x3FFFFF;
3169 rxdctl |= 0x080420;
3170 }
3171 rxdctl |= IXGBE_RXDCTL_ENABLE;
3172 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3173 for (j = 0; j < 10; j++) {
3174 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3175 IXGBE_RXDCTL_ENABLE)
3176 break;
3177 else
3178 msec_delay(1);
3179 }
3180 wmb();
3181 }
3182
3183 /* Enable Receive engine */
3184 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3185 if (hw->mac.type == ixgbe_mac_82598EB)
3186 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3187 rxctrl |= IXGBE_RXCTRL_RXEN;
3188 ixgbe_enable_rx_dma(hw, rxctrl);
3189
3190 /* Set up MSI/MSI-X routing */
3191 if (ixgbe_enable_msix) {
3192 ixgbe_configure_ivars(sc);
3193 /* Set up auto-mask */
3194 if (hw->mac.type == ixgbe_mac_82598EB)
3195 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3196 else {
3197 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3198 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3199 }
3200 } else { /* Simple settings for Legacy/MSI */
3201 ixgbe_set_ivar(sc, 0, 0, 0);
3202 ixgbe_set_ivar(sc, 0, 0, 1);
3203 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3204 }
3205
3206 ixgbe_init_fdir(sc);
3207
3208 /*
3209 * Check on any SFP devices that
3210 * need to be kick-started
3211 */
3212 if (hw->phy.type == ixgbe_phy_none) {
3213 err = hw->phy.ops.identify(hw);
3214 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3215 device_printf(dev,
3216 "Unsupported SFP+ module type was detected.\n");
3217 return;
3218 }
3219 }
3220
3221 /* Set moderation on the Link interrupt */
3222 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3223
3224 /* Enable power to the phy. */
3225 ixgbe_set_phy_power(hw, true);
3226
3227 /* Config/Enable Link */
3228 ixgbe_config_link(ctx);
3229
3230 /* Hardware Packet Buffer & Flow Control setup */
3231 ixgbe_config_delay_values(sc);
3232
3233 /* Initialize the FC settings */
3234 ixgbe_start_hw(hw);
3235
3236 /* Set up VLAN support and filter */
3237 ixgbe_setup_vlan_hw_support(ctx);
3238
3239 /* Setup DMA Coalescing */
3240 ixgbe_config_dmac(sc);
3241
3242 /* And now turn on interrupts */
3243 ixgbe_if_enable_intr(ctx);
3244
3245 /* Enable the use of the MBX by the VF's */
3246 if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3247 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3248 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3249 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3250 }
3251
3252 } /* ixgbe_init_locked */
3253
3254 /************************************************************************
3255 * ixgbe_set_ivar
3256 *
3257 * Setup the correct IVAR register for a particular MSI-X interrupt
3258 * (yes this is all very magic and confusing :)
3259 * - entry is the register array entry
3260 * - vector is the MSI-X vector for this queue
3261 * - type is RX/TX/MISC
3262 ************************************************************************/
3263 static void
ixgbe_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)3264 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3265 {
3266 struct ixgbe_hw *hw = &sc->hw;
3267 u32 ivar, index;
3268
3269 vector |= IXGBE_IVAR_ALLOC_VAL;
3270
3271 switch (hw->mac.type) {
3272 case ixgbe_mac_82598EB:
3273 if (type == -1)
3274 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3275 else
3276 entry += (type * 64);
3277 index = (entry >> 2) & 0x1F;
3278 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3279 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3280 ivar |= (vector << (8 * (entry & 0x3)));
3281 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3282 break;
3283 case ixgbe_mac_82599EB:
3284 case ixgbe_mac_X540:
3285 case ixgbe_mac_X550:
3286 case ixgbe_mac_X550EM_x:
3287 case ixgbe_mac_X550EM_a:
3288 if (type == -1) { /* MISC IVAR */
3289 index = (entry & 1) * 8;
3290 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3291 ivar &= ~(0xFF << index);
3292 ivar |= (vector << index);
3293 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3294 } else { /* RX/TX IVARS */
3295 index = (16 * (entry & 1)) + (8 * type);
3296 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3297 ivar &= ~(0xFF << index);
3298 ivar |= (vector << index);
3299 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3300 }
3301 default:
3302 break;
3303 }
3304 } /* ixgbe_set_ivar */
3305
3306 /************************************************************************
3307 * ixgbe_configure_ivars
3308 ************************************************************************/
3309 static void
ixgbe_configure_ivars(struct ixgbe_softc * sc)3310 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3311 {
3312 struct ix_rx_queue *rx_que = sc->rx_queues;
3313 struct ix_tx_queue *tx_que = sc->tx_queues;
3314 u32 newitr;
3315
3316 if (ixgbe_max_interrupt_rate > 0)
3317 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3318 else {
3319 /*
3320 * Disable DMA coalescing if interrupt moderation is
3321 * disabled.
3322 */
3323 sc->dmac = 0;
3324 newitr = 0;
3325 }
3326
3327 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3328 struct rx_ring *rxr = &rx_que->rxr;
3329
3330 /* First the RX queue entry */
3331 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3332
3333 /* Set an Initial EITR value */
3334 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3335 }
3336 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3337 struct tx_ring *txr = &tx_que->txr;
3338
3339 /* ... and the TX */
3340 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3341 }
3342 /* For the Link interrupt */
3343 ixgbe_set_ivar(sc, 1, sc->vector, -1);
3344 } /* ixgbe_configure_ivars */
3345
3346 /************************************************************************
3347 * ixgbe_config_gpie
3348 ************************************************************************/
3349 static void
ixgbe_config_gpie(struct ixgbe_softc * sc)3350 ixgbe_config_gpie(struct ixgbe_softc *sc)
3351 {
3352 struct ixgbe_hw *hw = &sc->hw;
3353 u32 gpie;
3354
3355 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3356
3357 if (sc->intr_type == IFLIB_INTR_MSIX) {
3358 /* Enable Enhanced MSI-X mode */
3359 gpie |= IXGBE_GPIE_MSIX_MODE
3360 | IXGBE_GPIE_EIAME
3361 | IXGBE_GPIE_PBA_SUPPORT
3362 | IXGBE_GPIE_OCD;
3363 }
3364
3365 /* Fan Failure Interrupt */
3366 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3367 gpie |= IXGBE_SDP1_GPIEN;
3368
3369 /* Thermal Sensor Interrupt */
3370 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3371 gpie |= IXGBE_SDP0_GPIEN_X540;
3372
3373 /* Link detection */
3374 switch (hw->mac.type) {
3375 case ixgbe_mac_82599EB:
3376 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3377 break;
3378 case ixgbe_mac_X550EM_x:
3379 case ixgbe_mac_X550EM_a:
3380 gpie |= IXGBE_SDP0_GPIEN_X540;
3381 break;
3382 default:
3383 break;
3384 }
3385
3386 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3387
3388 } /* ixgbe_config_gpie */
3389
3390 /************************************************************************
3391 * ixgbe_config_delay_values
3392 *
3393 * Requires sc->max_frame_size to be set.
3394 ************************************************************************/
3395 static void
ixgbe_config_delay_values(struct ixgbe_softc * sc)3396 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3397 {
3398 struct ixgbe_hw *hw = &sc->hw;
3399 u32 rxpb, frame, size, tmp;
3400
3401 frame = sc->max_frame_size;
3402
3403 /* Calculate High Water */
3404 switch (hw->mac.type) {
3405 case ixgbe_mac_X540:
3406 case ixgbe_mac_X550:
3407 case ixgbe_mac_X550EM_x:
3408 case ixgbe_mac_X550EM_a:
3409 tmp = IXGBE_DV_X540(frame, frame);
3410 break;
3411 default:
3412 tmp = IXGBE_DV(frame, frame);
3413 break;
3414 }
3415 size = IXGBE_BT2KB(tmp);
3416 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3417 hw->fc.high_water[0] = rxpb - size;
3418
3419 /* Now calculate Low Water */
3420 switch (hw->mac.type) {
3421 case ixgbe_mac_X540:
3422 case ixgbe_mac_X550:
3423 case ixgbe_mac_X550EM_x:
3424 case ixgbe_mac_X550EM_a:
3425 tmp = IXGBE_LOW_DV_X540(frame);
3426 break;
3427 default:
3428 tmp = IXGBE_LOW_DV(frame);
3429 break;
3430 }
3431 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3432
3433 hw->fc.pause_time = IXGBE_FC_PAUSE;
3434 hw->fc.send_xon = true;
3435 } /* ixgbe_config_delay_values */
3436
3437 /************************************************************************
3438 * ixgbe_set_multi - Multicast Update
3439 *
3440 * Called whenever multicast address list is updated.
3441 ************************************************************************/
3442 static u_int
ixgbe_mc_filter_apply(void * arg,struct sockaddr_dl * sdl,u_int idx)3443 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3444 {
3445 struct ixgbe_softc *sc = arg;
3446 struct ixgbe_mc_addr *mta = sc->mta;
3447
3448 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3449 return (0);
3450 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3451 mta[idx].vmdq = sc->pool;
3452
3453 return (1);
3454 } /* ixgbe_mc_filter_apply */
3455
3456 static void
ixgbe_if_multi_set(if_ctx_t ctx)3457 ixgbe_if_multi_set(if_ctx_t ctx)
3458 {
3459 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3460 struct ixgbe_mc_addr *mta;
3461 if_t ifp = iflib_get_ifp(ctx);
3462 u8 *update_ptr;
3463 u32 fctrl;
3464 u_int mcnt;
3465
3466 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3467
3468 mta = sc->mta;
3469 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3470
3471 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc);
3472
3473 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3474 update_ptr = (u8 *)mta;
3475 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3476 ixgbe_mc_array_itr, true);
3477 }
3478
3479 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3480
3481 if (if_getflags(ifp) & IFF_PROMISC)
3482 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3483 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3484 if_getflags(ifp) & IFF_ALLMULTI) {
3485 fctrl |= IXGBE_FCTRL_MPE;
3486 fctrl &= ~IXGBE_FCTRL_UPE;
3487 } else
3488 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3489
3490 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3491 } /* ixgbe_if_multi_set */
3492
3493 /************************************************************************
3494 * ixgbe_mc_array_itr
3495 *
3496 * An iterator function needed by the multicast shared code.
3497 * It feeds the shared code routine the addresses in the
3498 * array of ixgbe_set_multi() one by one.
3499 ************************************************************************/
3500 static u8 *
ixgbe_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)3501 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3502 {
3503 struct ixgbe_mc_addr *mta;
3504
3505 mta = (struct ixgbe_mc_addr *)*update_ptr;
3506 *vmdq = mta->vmdq;
3507
3508 *update_ptr = (u8*)(mta + 1);
3509
3510 return (mta->addr);
3511 } /* ixgbe_mc_array_itr */
3512
3513 /************************************************************************
3514 * ixgbe_local_timer - Timer routine
3515 *
3516 * Checks for link status, updates statistics,
3517 * and runs the watchdog check.
3518 ************************************************************************/
3519 static void
ixgbe_if_timer(if_ctx_t ctx,uint16_t qid)3520 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3521 {
3522 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3523
3524 if (qid != 0)
3525 return;
3526
3527 /* Check for pluggable optics */
3528 if (sc->sfp_probe)
3529 if (!ixgbe_sfp_probe(ctx))
3530 return; /* Nothing to do */
3531
3532 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3533
3534 /* Fire off the adminq task */
3535 iflib_admin_intr_deferred(ctx);
3536
3537 } /* ixgbe_if_timer */
3538
3539 /************************************************************************
3540 * ixgbe_fw_mode_timer - FW mode timer routine
3541 ************************************************************************/
3542 static void
ixgbe_fw_mode_timer(void * arg)3543 ixgbe_fw_mode_timer(void *arg)
3544 {
3545 struct ixgbe_softc *sc = arg;
3546 struct ixgbe_hw *hw = &sc->hw;
3547
3548 if (ixgbe_fw_recovery_mode(hw)) {
3549 if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
3550 /* Firmware error detected, entering recovery mode */
3551 device_printf(sc->dev, "Firmware recovery mode detected. Limiting"
3552 " functionality. Refer to the Intel(R) Ethernet Adapters"
3553 " and Devices User Guide for details on firmware recovery"
3554 " mode.\n");
3555
3556 if (hw->adapter_stopped == FALSE)
3557 ixgbe_if_stop(sc->ctx);
3558 }
3559 } else
3560 atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
3561
3562
3563 callout_reset(&sc->fw_mode_timer, hz,
3564 ixgbe_fw_mode_timer, sc);
3565 } /* ixgbe_fw_mode_timer */
3566
3567 /************************************************************************
3568 * ixgbe_sfp_probe
3569 *
3570 * Determine if a port had optics inserted.
3571 ************************************************************************/
3572 static bool
ixgbe_sfp_probe(if_ctx_t ctx)3573 ixgbe_sfp_probe(if_ctx_t ctx)
3574 {
3575 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3576 struct ixgbe_hw *hw = &sc->hw;
3577 device_t dev = iflib_get_dev(ctx);
3578 bool result = false;
3579
3580 if ((hw->phy.type == ixgbe_phy_nl) &&
3581 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3582 s32 ret = hw->phy.ops.identify_sfp(hw);
3583 if (ret)
3584 goto out;
3585 ret = hw->phy.ops.reset(hw);
3586 sc->sfp_probe = false;
3587 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3588 device_printf(dev, "Unsupported SFP+ module detected!");
3589 device_printf(dev,
3590 "Reload driver with supported module.\n");
3591 goto out;
3592 } else
3593 device_printf(dev, "SFP+ module detected!\n");
3594 /* We now have supported optics */
3595 result = true;
3596 }
3597 out:
3598
3599 return (result);
3600 } /* ixgbe_sfp_probe */
3601
3602 /************************************************************************
3603 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3604 ************************************************************************/
3605 static void
ixgbe_handle_mod(void * context)3606 ixgbe_handle_mod(void *context)
3607 {
3608 if_ctx_t ctx = context;
3609 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3610 struct ixgbe_hw *hw = &sc->hw;
3611 device_t dev = iflib_get_dev(ctx);
3612 u32 err, cage_full = 0;
3613
3614 if (sc->hw.need_crosstalk_fix) {
3615 switch (hw->mac.type) {
3616 case ixgbe_mac_82599EB:
3617 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3618 IXGBE_ESDP_SDP2;
3619 break;
3620 case ixgbe_mac_X550EM_x:
3621 case ixgbe_mac_X550EM_a:
3622 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3623 IXGBE_ESDP_SDP0;
3624 break;
3625 default:
3626 break;
3627 }
3628
3629 if (!cage_full)
3630 goto handle_mod_out;
3631 }
3632
3633 err = hw->phy.ops.identify_sfp(hw);
3634 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3635 device_printf(dev,
3636 "Unsupported SFP+ module type was detected.\n");
3637 goto handle_mod_out;
3638 }
3639
3640 if (hw->mac.type == ixgbe_mac_82598EB)
3641 err = hw->phy.ops.reset(hw);
3642 else
3643 err = hw->mac.ops.setup_sfp(hw);
3644
3645 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3646 device_printf(dev,
3647 "Setup failure - unsupported SFP+ module type.\n");
3648 goto handle_mod_out;
3649 }
3650 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3651 return;
3652
3653 handle_mod_out:
3654 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3655 } /* ixgbe_handle_mod */
3656
3657
3658 /************************************************************************
3659 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3660 ************************************************************************/
3661 static void
ixgbe_handle_msf(void * context)3662 ixgbe_handle_msf(void *context)
3663 {
3664 if_ctx_t ctx = context;
3665 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3666 struct ixgbe_hw *hw = &sc->hw;
3667 u32 autoneg;
3668 bool negotiate;
3669
3670 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3671 sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3672
3673 autoneg = hw->phy.autoneg_advertised;
3674 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3675 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3676 if (hw->mac.ops.setup_link)
3677 hw->mac.ops.setup_link(hw, autoneg, true);
3678
3679 /* Adjust media types shown in ifconfig */
3680 ifmedia_removeall(sc->media);
3681 ixgbe_add_media_types(sc->ctx);
3682 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3683 } /* ixgbe_handle_msf */
3684
3685 /************************************************************************
3686 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3687 ************************************************************************/
3688 static void
ixgbe_handle_phy(void * context)3689 ixgbe_handle_phy(void *context)
3690 {
3691 if_ctx_t ctx = context;
3692 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3693 struct ixgbe_hw *hw = &sc->hw;
3694 int error;
3695
3696 error = hw->phy.ops.handle_lasi(hw);
3697 if (error == IXGBE_ERR_OVERTEMP)
3698 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3699 else if (error)
3700 device_printf(sc->dev,
3701 "Error handling LASI interrupt: %d\n", error);
3702 } /* ixgbe_handle_phy */
3703
3704 /************************************************************************
3705 * ixgbe_if_stop - Stop the hardware
3706 *
3707 * Disables all traffic on the adapter by issuing a
3708 * global reset on the MAC and deallocates TX/RX buffers.
3709 ************************************************************************/
3710 static void
ixgbe_if_stop(if_ctx_t ctx)3711 ixgbe_if_stop(if_ctx_t ctx)
3712 {
3713 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3714 struct ixgbe_hw *hw = &sc->hw;
3715
3716 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3717
3718 ixgbe_reset_hw(hw);
3719 hw->adapter_stopped = false;
3720 ixgbe_stop_adapter(hw);
3721 if (hw->mac.type == ixgbe_mac_82599EB)
3722 ixgbe_stop_mac_link_on_d3_82599(hw);
3723 /* Turn off the laser - noop with no optics */
3724 ixgbe_disable_tx_laser(hw);
3725
3726 /* Update the stack */
3727 sc->link_up = false;
3728 ixgbe_if_update_admin_status(ctx);
3729
3730 /* reprogram the RAR[0] in case user changed it. */
3731 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
3732
3733 return;
3734 } /* ixgbe_if_stop */
3735
3736 /************************************************************************
3737 * ixgbe_update_link_status - Update OS on link state
3738 *
3739 * Note: Only updates the OS on the cached link state.
3740 * The real check of the hardware only happens with
3741 * a link interrupt.
3742 ************************************************************************/
3743 static void
ixgbe_if_update_admin_status(if_ctx_t ctx)3744 ixgbe_if_update_admin_status(if_ctx_t ctx)
3745 {
3746 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3747 device_t dev = iflib_get_dev(ctx);
3748
3749 if (sc->link_up) {
3750 if (sc->link_active == false) {
3751 if (bootverbose)
3752 device_printf(dev, "Link is up %d Gbps %s \n",
3753 ((sc->link_speed == 128) ? 10 : 1),
3754 "Full Duplex");
3755 sc->link_active = true;
3756 /* Update any Flow Control changes */
3757 ixgbe_fc_enable(&sc->hw);
3758 /* Update DMA coalescing config */
3759 ixgbe_config_dmac(sc);
3760 iflib_link_state_change(ctx, LINK_STATE_UP,
3761 ixgbe_link_speed_to_baudrate(sc->link_speed));
3762
3763 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3764 ixgbe_ping_all_vfs(sc);
3765 }
3766 } else { /* Link down */
3767 if (sc->link_active == true) {
3768 if (bootverbose)
3769 device_printf(dev, "Link is Down\n");
3770 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3771 sc->link_active = false;
3772 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3773 ixgbe_ping_all_vfs(sc);
3774 }
3775 }
3776
3777 /* Handle task requests from msix_link() */
3778 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
3779 ixgbe_handle_mod(ctx);
3780 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
3781 ixgbe_handle_msf(ctx);
3782 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
3783 ixgbe_handle_mbx(ctx);
3784 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
3785 ixgbe_reinit_fdir(ctx);
3786 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
3787 ixgbe_handle_phy(ctx);
3788 sc->task_requests = 0;
3789
3790 ixgbe_update_stats_counters(sc);
3791 } /* ixgbe_if_update_admin_status */
3792
3793 /************************************************************************
3794 * ixgbe_config_dmac - Configure DMA Coalescing
3795 ************************************************************************/
3796 static void
ixgbe_config_dmac(struct ixgbe_softc * sc)3797 ixgbe_config_dmac(struct ixgbe_softc *sc)
3798 {
3799 struct ixgbe_hw *hw = &sc->hw;
3800 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3801
3802 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3803 return;
3804
3805 if (dcfg->watchdog_timer ^ sc->dmac ||
3806 dcfg->link_speed ^ sc->link_speed) {
3807 dcfg->watchdog_timer = sc->dmac;
3808 dcfg->fcoe_en = false;
3809 dcfg->link_speed = sc->link_speed;
3810 dcfg->num_tcs = 1;
3811
3812 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3813 dcfg->watchdog_timer, dcfg->link_speed);
3814
3815 hw->mac.ops.dmac_config(hw);
3816 }
3817 } /* ixgbe_config_dmac */
3818
3819 /************************************************************************
3820 * ixgbe_if_enable_intr
3821 ************************************************************************/
3822 void
ixgbe_if_enable_intr(if_ctx_t ctx)3823 ixgbe_if_enable_intr(if_ctx_t ctx)
3824 {
3825 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3826 struct ixgbe_hw *hw = &sc->hw;
3827 struct ix_rx_queue *que = sc->rx_queues;
3828 u32 mask, fwsm;
3829
3830 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3831
3832 switch (sc->hw.mac.type) {
3833 case ixgbe_mac_82599EB:
3834 mask |= IXGBE_EIMS_ECC;
3835 /* Temperature sensor on some scs */
3836 mask |= IXGBE_EIMS_GPI_SDP0;
3837 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3838 mask |= IXGBE_EIMS_GPI_SDP1;
3839 mask |= IXGBE_EIMS_GPI_SDP2;
3840 break;
3841 case ixgbe_mac_X540:
3842 /* Detect if Thermal Sensor is enabled */
3843 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3844 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3845 mask |= IXGBE_EIMS_TS;
3846 mask |= IXGBE_EIMS_ECC;
3847 break;
3848 case ixgbe_mac_X550:
3849 /* MAC thermal sensor is automatically enabled */
3850 mask |= IXGBE_EIMS_TS;
3851 mask |= IXGBE_EIMS_ECC;
3852 break;
3853 case ixgbe_mac_X550EM_x:
3854 case ixgbe_mac_X550EM_a:
3855 /* Some devices use SDP0 for important information */
3856 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3857 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3858 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3859 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3860 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3861 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3862 mask |= IXGBE_EICR_GPI_SDP0_X540;
3863 mask |= IXGBE_EIMS_ECC;
3864 break;
3865 default:
3866 break;
3867 }
3868
3869 /* Enable Fan Failure detection */
3870 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3871 mask |= IXGBE_EIMS_GPI_SDP1;
3872 /* Enable SR-IOV */
3873 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3874 mask |= IXGBE_EIMS_MAILBOX;
3875 /* Enable Flow Director */
3876 if (sc->feat_en & IXGBE_FEATURE_FDIR)
3877 mask |= IXGBE_EIMS_FLOW_DIR;
3878
3879 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3880
3881 /* With MSI-X we use auto clear */
3882 if (sc->intr_type == IFLIB_INTR_MSIX) {
3883 mask = IXGBE_EIMS_ENABLE_MASK;
3884 /* Don't autoclear Link */
3885 mask &= ~IXGBE_EIMS_OTHER;
3886 mask &= ~IXGBE_EIMS_LSC;
3887 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
3888 mask &= ~IXGBE_EIMS_MAILBOX;
3889 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3890 }
3891
3892 /*
3893 * Now enable all queues, this is done separately to
3894 * allow for handling the extended (beyond 32) MSI-X
3895 * vectors that can be used by 82599
3896 */
3897 for (int i = 0; i < sc->num_rx_queues; i++, que++)
3898 ixgbe_enable_queue(sc, que->msix);
3899
3900 IXGBE_WRITE_FLUSH(hw);
3901
3902 } /* ixgbe_if_enable_intr */
3903
3904 /************************************************************************
3905 * ixgbe_disable_intr
3906 ************************************************************************/
3907 static void
ixgbe_if_disable_intr(if_ctx_t ctx)3908 ixgbe_if_disable_intr(if_ctx_t ctx)
3909 {
3910 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3911
3912 if (sc->intr_type == IFLIB_INTR_MSIX)
3913 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3914 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3915 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3916 } else {
3917 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3918 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3919 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3920 }
3921 IXGBE_WRITE_FLUSH(&sc->hw);
3922
3923 } /* ixgbe_if_disable_intr */
3924
3925 /************************************************************************
3926 * ixgbe_link_intr_enable
3927 ************************************************************************/
3928 static void
ixgbe_link_intr_enable(if_ctx_t ctx)3929 ixgbe_link_intr_enable(if_ctx_t ctx)
3930 {
3931 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
3932
3933 /* Re-enable other interrupts */
3934 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3935 } /* ixgbe_link_intr_enable */
3936
3937 /************************************************************************
3938 * ixgbe_if_rx_queue_intr_enable
3939 ************************************************************************/
3940 static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)3941 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3942 {
3943 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3944 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
3945
3946 ixgbe_enable_queue(sc, que->msix);
3947
3948 return (0);
3949 } /* ixgbe_if_rx_queue_intr_enable */
3950
3951 /************************************************************************
3952 * ixgbe_enable_queue
3953 ************************************************************************/
3954 static void
ixgbe_enable_queue(struct ixgbe_softc * sc,u32 vector)3955 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
3956 {
3957 struct ixgbe_hw *hw = &sc->hw;
3958 u64 queue = 1ULL << vector;
3959 u32 mask;
3960
3961 if (hw->mac.type == ixgbe_mac_82598EB) {
3962 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3963 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3964 } else {
3965 mask = (queue & 0xFFFFFFFF);
3966 if (mask)
3967 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3968 mask = (queue >> 32);
3969 if (mask)
3970 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3971 }
3972 } /* ixgbe_enable_queue */
3973
3974 /************************************************************************
3975 * ixgbe_disable_queue
3976 ************************************************************************/
3977 static void
ixgbe_disable_queue(struct ixgbe_softc * sc,u32 vector)3978 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
3979 {
3980 struct ixgbe_hw *hw = &sc->hw;
3981 u64 queue = 1ULL << vector;
3982 u32 mask;
3983
3984 if (hw->mac.type == ixgbe_mac_82598EB) {
3985 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3986 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3987 } else {
3988 mask = (queue & 0xFFFFFFFF);
3989 if (mask)
3990 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3991 mask = (queue >> 32);
3992 if (mask)
3993 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3994 }
3995 } /* ixgbe_disable_queue */
3996
3997 /************************************************************************
3998 * ixgbe_intr - Legacy Interrupt Service Routine
3999 ************************************************************************/
4000 int
ixgbe_intr(void * arg)4001 ixgbe_intr(void *arg)
4002 {
4003 struct ixgbe_softc *sc = arg;
4004 struct ix_rx_queue *que = sc->rx_queues;
4005 struct ixgbe_hw *hw = &sc->hw;
4006 if_ctx_t ctx = sc->ctx;
4007 u32 eicr, eicr_mask;
4008
4009 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4010
4011 ++que->irqs;
4012 if (eicr == 0) {
4013 ixgbe_if_enable_intr(ctx);
4014 return (FILTER_HANDLED);
4015 }
4016
4017 /* Check for fan failure */
4018 if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
4019 (eicr & IXGBE_EICR_GPI_SDP1)) {
4020 device_printf(sc->dev,
4021 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4022 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4023 }
4024
4025 /* Link status change */
4026 if (eicr & IXGBE_EICR_LSC) {
4027 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4028 iflib_admin_intr_deferred(ctx);
4029 }
4030
4031 if (ixgbe_is_sfp(hw)) {
4032 /* Pluggable optics-related interrupt */
4033 if (hw->mac.type >= ixgbe_mac_X540)
4034 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4035 else
4036 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4037
4038 if (eicr & eicr_mask) {
4039 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4040 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
4041 }
4042
4043 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4044 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4045 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4046 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4047 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4048 }
4049 }
4050
4051 /* External PHY interrupt */
4052 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4053 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4054 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
4055
4056 return (FILTER_SCHEDULE_THREAD);
4057 } /* ixgbe_intr */
4058
4059 /************************************************************************
4060 * ixgbe_free_pci_resources
4061 ************************************************************************/
4062 static void
ixgbe_free_pci_resources(if_ctx_t ctx)4063 ixgbe_free_pci_resources(if_ctx_t ctx)
4064 {
4065 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4066 struct ix_rx_queue *que = sc->rx_queues;
4067 device_t dev = iflib_get_dev(ctx);
4068
4069 /* Release all MSI-X queue resources */
4070 if (sc->intr_type == IFLIB_INTR_MSIX)
4071 iflib_irq_free(ctx, &sc->irq);
4072
4073 if (que != NULL) {
4074 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
4075 iflib_irq_free(ctx, &que->que_irq);
4076 }
4077 }
4078
4079 if (sc->pci_mem != NULL)
4080 bus_release_resource(dev, SYS_RES_MEMORY,
4081 rman_get_rid(sc->pci_mem), sc->pci_mem);
4082 } /* ixgbe_free_pci_resources */
4083
4084 /************************************************************************
4085 * ixgbe_sysctl_flowcntl
4086 *
4087 * SYSCTL wrapper around setting Flow Control
4088 ************************************************************************/
4089 static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)4090 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4091 {
4092 struct ixgbe_softc *sc;
4093 int error, fc;
4094
4095 sc = (struct ixgbe_softc *)arg1;
4096 fc = sc->hw.fc.current_mode;
4097
4098 error = sysctl_handle_int(oidp, &fc, 0, req);
4099 if ((error) || (req->newptr == NULL))
4100 return (error);
4101
4102 /* Don't bother if it's not changed */
4103 if (fc == sc->hw.fc.current_mode)
4104 return (0);
4105
4106 return ixgbe_set_flowcntl(sc, fc);
4107 } /* ixgbe_sysctl_flowcntl */
4108
4109 /************************************************************************
4110 * ixgbe_set_flowcntl - Set flow control
4111 *
4112 * Flow control values:
4113 * 0 - off
4114 * 1 - rx pause
4115 * 2 - tx pause
4116 * 3 - full
4117 ************************************************************************/
4118 static int
ixgbe_set_flowcntl(struct ixgbe_softc * sc,int fc)4119 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4120 {
4121 switch (fc) {
4122 case ixgbe_fc_rx_pause:
4123 case ixgbe_fc_tx_pause:
4124 case ixgbe_fc_full:
4125 sc->hw.fc.requested_mode = fc;
4126 if (sc->num_rx_queues > 1)
4127 ixgbe_disable_rx_drop(sc);
4128 break;
4129 case ixgbe_fc_none:
4130 sc->hw.fc.requested_mode = ixgbe_fc_none;
4131 if (sc->num_rx_queues > 1)
4132 ixgbe_enable_rx_drop(sc);
4133 break;
4134 default:
4135 return (EINVAL);
4136 }
4137
4138 /* Don't autoneg if forcing a value */
4139 sc->hw.fc.disable_fc_autoneg = true;
4140 ixgbe_fc_enable(&sc->hw);
4141
4142 return (0);
4143 } /* ixgbe_set_flowcntl */
4144
4145 /************************************************************************
4146 * ixgbe_enable_rx_drop
4147 *
4148 * Enable the hardware to drop packets when the buffer is
4149 * full. This is useful with multiqueue, so that no single
4150 * queue being full stalls the entire RX engine. We only
4151 * enable this when Multiqueue is enabled AND Flow Control
4152 * is disabled.
4153 ************************************************************************/
4154 static void
ixgbe_enable_rx_drop(struct ixgbe_softc * sc)4155 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
4156 {
4157 struct ixgbe_hw *hw = &sc->hw;
4158 struct rx_ring *rxr;
4159 u32 srrctl;
4160
4161 for (int i = 0; i < sc->num_rx_queues; i++) {
4162 rxr = &sc->rx_queues[i].rxr;
4163 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4164 srrctl |= IXGBE_SRRCTL_DROP_EN;
4165 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4166 }
4167
4168 /* enable drop for each vf */
4169 for (int i = 0; i < sc->num_vfs; i++) {
4170 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4171 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4172 IXGBE_QDE_ENABLE));
4173 }
4174 } /* ixgbe_enable_rx_drop */
4175
4176 /************************************************************************
4177 * ixgbe_disable_rx_drop
4178 ************************************************************************/
4179 static void
ixgbe_disable_rx_drop(struct ixgbe_softc * sc)4180 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
4181 {
4182 struct ixgbe_hw *hw = &sc->hw;
4183 struct rx_ring *rxr;
4184 u32 srrctl;
4185
4186 for (int i = 0; i < sc->num_rx_queues; i++) {
4187 rxr = &sc->rx_queues[i].rxr;
4188 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4189 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4190 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4191 }
4192
4193 /* disable drop for each vf */
4194 for (int i = 0; i < sc->num_vfs; i++) {
4195 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4196 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4197 }
4198 } /* ixgbe_disable_rx_drop */
4199
4200 /************************************************************************
4201 * ixgbe_sysctl_advertise
4202 *
4203 * SYSCTL wrapper around setting advertised speed
4204 ************************************************************************/
4205 static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)4206 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4207 {
4208 struct ixgbe_softc *sc;
4209 int error, advertise;
4210
4211 sc = (struct ixgbe_softc *)arg1;
4212 if (atomic_load_acq_int(&sc->recovery_mode))
4213 return (EPERM);
4214
4215 advertise = sc->advertise;
4216
4217 error = sysctl_handle_int(oidp, &advertise, 0, req);
4218 if ((error) || (req->newptr == NULL))
4219 return (error);
4220
4221 return ixgbe_set_advertise(sc, advertise);
4222 } /* ixgbe_sysctl_advertise */
4223
4224 /************************************************************************
4225 * ixgbe_set_advertise - Control advertised link speed
4226 *
4227 * Flags:
4228 * 0x1 - advertise 100 Mb
4229 * 0x2 - advertise 1G
4230 * 0x4 - advertise 10G
4231 * 0x8 - advertise 10 Mb (yes, Mb)
4232 * 0x10 - advertise 2.5G (disabled by default)
4233 * 0x20 - advertise 5G (disabled by default)
4234 *
4235 ************************************************************************/
4236 static int
ixgbe_set_advertise(struct ixgbe_softc * sc,int advertise)4237 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4238 {
4239 device_t dev = iflib_get_dev(sc->ctx);
4240 struct ixgbe_hw *hw;
4241 ixgbe_link_speed speed = 0;
4242 ixgbe_link_speed link_caps = 0;
4243 s32 err = IXGBE_NOT_IMPLEMENTED;
4244 bool negotiate = false;
4245
4246 /* Checks to validate new value */
4247 if (sc->advertise == advertise) /* no change */
4248 return (0);
4249
4250 hw = &sc->hw;
4251
4252 /* No speed changes for backplane media */
4253 if (hw->phy.media_type == ixgbe_media_type_backplane)
4254 return (ENODEV);
4255
4256 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4257 (hw->phy.multispeed_fiber))) {
4258 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4259 return (EINVAL);
4260 }
4261
4262 if (advertise < 0x1 || advertise > 0x3F) {
4263 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n");
4264 return (EINVAL);
4265 }
4266
4267 if (hw->mac.ops.get_link_capabilities) {
4268 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4269 &negotiate);
4270 if (err != IXGBE_SUCCESS) {
4271 device_printf(dev, "Unable to determine supported advertise speeds\n");
4272 return (ENODEV);
4273 }
4274 }
4275
4276 /* Set new value and report new advertised mode */
4277 if (advertise & 0x1) {
4278 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4279 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4280 return (EINVAL);
4281 }
4282 speed |= IXGBE_LINK_SPEED_100_FULL;
4283 }
4284 if (advertise & 0x2) {
4285 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4286 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4287 return (EINVAL);
4288 }
4289 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4290 }
4291 if (advertise & 0x4) {
4292 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4293 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4294 return (EINVAL);
4295 }
4296 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4297 }
4298 if (advertise & 0x8) {
4299 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4300 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4301 return (EINVAL);
4302 }
4303 speed |= IXGBE_LINK_SPEED_10_FULL;
4304 }
4305 if (advertise & 0x10) {
4306 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
4307 device_printf(dev, "Interface does not support 2.5G advertised speed\n");
4308 return (EINVAL);
4309 }
4310 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
4311 }
4312 if (advertise & 0x20) {
4313 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
4314 device_printf(dev, "Interface does not support 5G advertised speed\n");
4315 return (EINVAL);
4316 }
4317 speed |= IXGBE_LINK_SPEED_5GB_FULL;
4318 }
4319
4320 hw->mac.autotry_restart = true;
4321 hw->mac.ops.setup_link(hw, speed, true);
4322 sc->advertise = advertise;
4323
4324 return (0);
4325 } /* ixgbe_set_advertise */
4326
4327 /************************************************************************
4328 * ixgbe_get_default_advertise - Get default advertised speed settings
4329 *
4330 * Formatted for sysctl usage.
4331 * Flags:
4332 * 0x1 - advertise 100 Mb
4333 * 0x2 - advertise 1G
4334 * 0x4 - advertise 10G
4335 * 0x8 - advertise 10 Mb (yes, Mb)
4336 * 0x10 - advertise 2.5G (disabled by default)
4337 * 0x20 - advertise 5G (disabled by default)
4338 ************************************************************************/
4339 static int
ixgbe_get_default_advertise(struct ixgbe_softc * sc)4340 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
4341 {
4342 struct ixgbe_hw *hw = &sc->hw;
4343 int speed;
4344 ixgbe_link_speed link_caps = 0;
4345 s32 err;
4346 bool negotiate = false;
4347
4348 /*
4349 * Advertised speed means nothing unless it's copper or
4350 * multi-speed fiber
4351 */
4352 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4353 !(hw->phy.multispeed_fiber))
4354 return (0);
4355
4356 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4357 if (err != IXGBE_SUCCESS)
4358 return (0);
4359
4360 if (hw->mac.type == ixgbe_mac_X550) {
4361 /*
4362 * 2.5G and 5G autonegotiation speeds on X550
4363 * are disabled by default due to reported
4364 * interoperability issues with some switches.
4365 */
4366 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
4367 IXGBE_LINK_SPEED_5GB_FULL);
4368 }
4369
4370 speed =
4371 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
4372 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
4373 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
4374 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
4375 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
4376 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
4377
4378 return speed;
4379 } /* ixgbe_get_default_advertise */
4380
4381 /************************************************************************
4382 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4383 *
4384 * Control values:
4385 * 0/1 - off / on (use default value of 1000)
4386 *
4387 * Legal timer values are:
4388 * 50,100,250,500,1000,2000,5000,10000
4389 *
4390 * Turning off interrupt moderation will also turn this off.
4391 ************************************************************************/
4392 static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)4393 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4394 {
4395 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4396 if_t ifp = iflib_get_ifp(sc->ctx);
4397 int error;
4398 u16 newval;
4399
4400 newval = sc->dmac;
4401 error = sysctl_handle_16(oidp, &newval, 0, req);
4402 if ((error) || (req->newptr == NULL))
4403 return (error);
4404
4405 switch (newval) {
4406 case 0:
4407 /* Disabled */
4408 sc->dmac = 0;
4409 break;
4410 case 1:
4411 /* Enable and use default */
4412 sc->dmac = 1000;
4413 break;
4414 case 50:
4415 case 100:
4416 case 250:
4417 case 500:
4418 case 1000:
4419 case 2000:
4420 case 5000:
4421 case 10000:
4422 /* Legal values - allow */
4423 sc->dmac = newval;
4424 break;
4425 default:
4426 /* Do nothing, illegal value */
4427 return (EINVAL);
4428 }
4429
4430 /* Re-initialize hardware if it's already running */
4431 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
4432 if_init(ifp, ifp);
4433
4434 return (0);
4435 } /* ixgbe_sysctl_dmac */
4436
4437 #ifdef IXGBE_DEBUG
4438 /************************************************************************
4439 * ixgbe_sysctl_power_state
4440 *
4441 * Sysctl to test power states
4442 * Values:
4443 * 0 - set device to D0
4444 * 3 - set device to D3
4445 * (none) - get current device power state
4446 ************************************************************************/
4447 static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)4448 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4449 {
4450 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4451 device_t dev = sc->dev;
4452 int curr_ps, new_ps, error = 0;
4453
4454 curr_ps = new_ps = pci_get_powerstate(dev);
4455
4456 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4457 if ((error) || (req->newptr == NULL))
4458 return (error);
4459
4460 if (new_ps == curr_ps)
4461 return (0);
4462
4463 if (new_ps == 3 && curr_ps == 0)
4464 error = DEVICE_SUSPEND(dev);
4465 else if (new_ps == 0 && curr_ps == 3)
4466 error = DEVICE_RESUME(dev);
4467 else
4468 return (EINVAL);
4469
4470 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4471
4472 return (error);
4473 } /* ixgbe_sysctl_power_state */
4474 #endif
4475
4476 /************************************************************************
4477 * ixgbe_sysctl_wol_enable
4478 *
4479 * Sysctl to enable/disable the WoL capability,
4480 * if supported by the adapter.
4481 *
4482 * Values:
4483 * 0 - disabled
4484 * 1 - enabled
4485 ************************************************************************/
4486 static int
ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)4487 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4488 {
4489 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4490 struct ixgbe_hw *hw = &sc->hw;
4491 int new_wol_enabled;
4492 int error = 0;
4493
4494 new_wol_enabled = hw->wol_enabled;
4495 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4496 if ((error) || (req->newptr == NULL))
4497 return (error);
4498 new_wol_enabled = !!(new_wol_enabled);
4499 if (new_wol_enabled == hw->wol_enabled)
4500 return (0);
4501
4502 if (new_wol_enabled > 0 && !sc->wol_support)
4503 return (ENODEV);
4504 else
4505 hw->wol_enabled = new_wol_enabled;
4506
4507 return (0);
4508 } /* ixgbe_sysctl_wol_enable */
4509
4510 /************************************************************************
4511 * ixgbe_sysctl_wufc - Wake Up Filter Control
4512 *
4513 * Sysctl to enable/disable the types of packets that the
4514 * adapter will wake up on upon receipt.
4515 * Flags:
4516 * 0x1 - Link Status Change
4517 * 0x2 - Magic Packet
4518 * 0x4 - Direct Exact
4519 * 0x8 - Directed Multicast
4520 * 0x10 - Broadcast
4521 * 0x20 - ARP/IPv4 Request Packet
4522 * 0x40 - Direct IPv4 Packet
4523 * 0x80 - Direct IPv6 Packet
4524 *
4525 * Settings not listed above will cause the sysctl to return an error.
4526 ************************************************************************/
4527 static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)4528 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4529 {
4530 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4531 int error = 0;
4532 u32 new_wufc;
4533
4534 new_wufc = sc->wufc;
4535
4536 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4537 if ((error) || (req->newptr == NULL))
4538 return (error);
4539 if (new_wufc == sc->wufc)
4540 return (0);
4541
4542 if (new_wufc & 0xffffff00)
4543 return (EINVAL);
4544
4545 new_wufc &= 0xff;
4546 new_wufc |= (0xffffff & sc->wufc);
4547 sc->wufc = new_wufc;
4548
4549 return (0);
4550 } /* ixgbe_sysctl_wufc */
4551
4552 #ifdef IXGBE_DEBUG
4553 /************************************************************************
4554 * ixgbe_sysctl_print_rss_config
4555 ************************************************************************/
4556 static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)4557 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4558 {
4559 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4560 struct ixgbe_hw *hw = &sc->hw;
4561 device_t dev = sc->dev;
4562 struct sbuf *buf;
4563 int error = 0, reta_size;
4564 u32 reg;
4565
4566 if (atomic_load_acq_int(&sc->recovery_mode))
4567 return (EPERM);
4568
4569 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4570 if (!buf) {
4571 device_printf(dev, "Could not allocate sbuf for output.\n");
4572 return (ENOMEM);
4573 }
4574
4575 // TODO: use sbufs to make a string to print out
4576 /* Set multiplier for RETA setup and table size based on MAC */
4577 switch (sc->hw.mac.type) {
4578 case ixgbe_mac_X550:
4579 case ixgbe_mac_X550EM_x:
4580 case ixgbe_mac_X550EM_a:
4581 reta_size = 128;
4582 break;
4583 default:
4584 reta_size = 32;
4585 break;
4586 }
4587
4588 /* Print out the redirection table */
4589 sbuf_cat(buf, "\n");
4590 for (int i = 0; i < reta_size; i++) {
4591 if (i < 32) {
4592 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4593 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4594 } else {
4595 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4596 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4597 }
4598 }
4599
4600 // TODO: print more config
4601
4602 error = sbuf_finish(buf);
4603 if (error)
4604 device_printf(dev, "Error finishing sbuf: %d\n", error);
4605
4606 sbuf_delete(buf);
4607
4608 return (0);
4609 } /* ixgbe_sysctl_print_rss_config */
4610 #endif /* IXGBE_DEBUG */
4611
4612 /************************************************************************
4613 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4614 *
4615 * For X552/X557-AT devices using an external PHY
4616 ************************************************************************/
4617 static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)4618 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4619 {
4620 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4621 struct ixgbe_hw *hw = &sc->hw;
4622 u16 reg;
4623
4624 if (atomic_load_acq_int(&sc->recovery_mode))
4625 return (EPERM);
4626
4627 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4628 device_printf(iflib_get_dev(sc->ctx),
4629 "Device has no supported external thermal sensor.\n");
4630 return (ENODEV);
4631 }
4632
4633 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4634 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4635 device_printf(iflib_get_dev(sc->ctx),
4636 "Error reading from PHY's current temperature register\n");
4637 return (EAGAIN);
4638 }
4639
4640 /* Shift temp for output */
4641 reg = reg >> 8;
4642
4643 return (sysctl_handle_16(oidp, NULL, reg, req));
4644 } /* ixgbe_sysctl_phy_temp */
4645
4646 /************************************************************************
4647 * ixgbe_sysctl_phy_overtemp_occurred
4648 *
4649 * Reports (directly from the PHY) whether the current PHY
4650 * temperature is over the overtemp threshold.
4651 ************************************************************************/
4652 static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)4653 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4654 {
4655 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4656 struct ixgbe_hw *hw = &sc->hw;
4657 u16 reg;
4658
4659 if (atomic_load_acq_int(&sc->recovery_mode))
4660 return (EPERM);
4661
4662 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4663 device_printf(iflib_get_dev(sc->ctx),
4664 "Device has no supported external thermal sensor.\n");
4665 return (ENODEV);
4666 }
4667
4668 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4669 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4670 device_printf(iflib_get_dev(sc->ctx),
4671 "Error reading from PHY's temperature status register\n");
4672 return (EAGAIN);
4673 }
4674
4675 /* Get occurrence bit */
4676 reg = !!(reg & 0x4000);
4677
4678 return (sysctl_handle_16(oidp, 0, reg, req));
4679 } /* ixgbe_sysctl_phy_overtemp_occurred */
4680
4681 /************************************************************************
4682 * ixgbe_sysctl_eee_state
4683 *
4684 * Sysctl to set EEE power saving feature
4685 * Values:
4686 * 0 - disable EEE
4687 * 1 - enable EEE
4688 * (none) - get current device EEE state
4689 ************************************************************************/
4690 static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)4691 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4692 {
4693 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4694 device_t dev = sc->dev;
4695 if_t ifp = iflib_get_ifp(sc->ctx);
4696 int curr_eee, new_eee, error = 0;
4697 s32 retval;
4698
4699 if (atomic_load_acq_int(&sc->recovery_mode))
4700 return (EPERM);
4701
4702 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
4703
4704 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4705 if ((error) || (req->newptr == NULL))
4706 return (error);
4707
4708 /* Nothing to do */
4709 if (new_eee == curr_eee)
4710 return (0);
4711
4712 /* Not supported */
4713 if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
4714 return (EINVAL);
4715
4716 /* Bounds checking */
4717 if ((new_eee < 0) || (new_eee > 1))
4718 return (EINVAL);
4719
4720 retval = ixgbe_setup_eee(&sc->hw, new_eee);
4721 if (retval) {
4722 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4723 return (EINVAL);
4724 }
4725
4726 /* Restart auto-neg */
4727 if_init(ifp, ifp);
4728
4729 device_printf(dev, "New EEE state: %d\n", new_eee);
4730
4731 /* Cache new value */
4732 if (new_eee)
4733 sc->feat_en |= IXGBE_FEATURE_EEE;
4734 else
4735 sc->feat_en &= ~IXGBE_FEATURE_EEE;
4736
4737 return (error);
4738 } /* ixgbe_sysctl_eee_state */
4739
4740 /************************************************************************
4741 * ixgbe_init_device_features
4742 ************************************************************************/
4743 static void
ixgbe_init_device_features(struct ixgbe_softc * sc)4744 ixgbe_init_device_features(struct ixgbe_softc *sc)
4745 {
4746 sc->feat_cap = IXGBE_FEATURE_NETMAP
4747 | IXGBE_FEATURE_RSS
4748 | IXGBE_FEATURE_MSI
4749 | IXGBE_FEATURE_MSIX
4750 | IXGBE_FEATURE_LEGACY_IRQ;
4751
4752 /* Set capabilities first... */
4753 switch (sc->hw.mac.type) {
4754 case ixgbe_mac_82598EB:
4755 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
4756 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4757 break;
4758 case ixgbe_mac_X540:
4759 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4760 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4761 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4762 (sc->hw.bus.func == 0))
4763 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4764 break;
4765 case ixgbe_mac_X550:
4766 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4767 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4768 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4769 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4770 break;
4771 case ixgbe_mac_X550EM_x:
4772 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4773 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4774 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4775 if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4776 sc->feat_cap |= IXGBE_FEATURE_EEE;
4777 break;
4778 case ixgbe_mac_X550EM_a:
4779 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4780 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4781 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4782 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4783 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4784 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4785 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4786 sc->feat_cap |= IXGBE_FEATURE_EEE;
4787 }
4788 break;
4789 case ixgbe_mac_82599EB:
4790 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4791 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4792 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4793 (sc->hw.bus.func == 0))
4794 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4795 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4796 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4797 break;
4798 default:
4799 break;
4800 }
4801
4802 /* Enabled by default... */
4803 /* Fan failure detection */
4804 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4805 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4806 /* Netmap */
4807 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
4808 sc->feat_en |= IXGBE_FEATURE_NETMAP;
4809 /* EEE */
4810 if (sc->feat_cap & IXGBE_FEATURE_EEE)
4811 sc->feat_en |= IXGBE_FEATURE_EEE;
4812 /* Thermal Sensor */
4813 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4814 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4815 /* Recovery mode */
4816 if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
4817 sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
4818
4819 /* Enabled via global sysctl... */
4820 /* Flow Director */
4821 if (ixgbe_enable_fdir) {
4822 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
4823 sc->feat_en |= IXGBE_FEATURE_FDIR;
4824 else
4825 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled.");
4826 }
4827 /*
4828 * Message Signal Interrupts - Extended (MSI-X)
4829 * Normal MSI is only enabled if MSI-X calls fail.
4830 */
4831 if (!ixgbe_enable_msix)
4832 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
4833 /* Receive-Side Scaling (RSS) */
4834 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4835 sc->feat_en |= IXGBE_FEATURE_RSS;
4836
4837 /* Disable features with unmet dependencies... */
4838 /* No MSI-X */
4839 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
4840 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
4841 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4842 sc->feat_en &= ~IXGBE_FEATURE_RSS;
4843 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
4844 }
4845 } /* ixgbe_init_device_features */
4846
4847 /************************************************************************
4848 * ixgbe_check_fan_failure
4849 ************************************************************************/
4850 static void
ixgbe_check_fan_failure(struct ixgbe_softc * sc,u32 reg,bool in_interrupt)4851 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
4852 {
4853 u32 mask;
4854
4855 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
4856 IXGBE_ESDP_SDP1;
4857
4858 if (reg & mask)
4859 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4860 } /* ixgbe_check_fan_failure */
4861
4862 /************************************************************************
4863 * ixgbe_sbuf_fw_version
4864 ************************************************************************/
4865 static void
ixgbe_sbuf_fw_version(struct ixgbe_hw * hw,struct sbuf * buf)4866 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
4867 {
4868 struct ixgbe_nvm_version nvm_ver = {0};
4869 const char *space = "";
4870
4871 ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
4872 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
4873 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
4874 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
4875
4876 /* FW version */
4877 if ((nvm_ver.phy_fw_maj == 0x0 &&
4878 nvm_ver.phy_fw_min == 0x0 &&
4879 nvm_ver.phy_fw_id == 0x0) ||
4880 (nvm_ver.phy_fw_maj == 0xF &&
4881 nvm_ver.phy_fw_min == 0xFF &&
4882 nvm_ver.phy_fw_id == 0xF)) {
4883 /* If major, minor and id numbers are set to 0,
4884 * reading FW version is unsupported. If major number
4885 * is set to 0xF, minor is set to 0xFF and id is set
4886 * to 0xF, this means that number read is invalid. */
4887 } else
4888 sbuf_printf(buf, "fw %d.%d.%d ",
4889 nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min, nvm_ver.phy_fw_id);
4890
4891 /* NVM version */
4892 if ((nvm_ver.nvm_major == 0x0 &&
4893 nvm_ver.nvm_minor == 0x0 &&
4894 nvm_ver.nvm_id == 0x0) ||
4895 (nvm_ver.nvm_major == 0xF &&
4896 nvm_ver.nvm_minor == 0xFF &&
4897 nvm_ver.nvm_id == 0xF)) {
4898 /* If major, minor and id numbers are set to 0,
4899 * reading NVM version is unsupported. If major number
4900 * is set to 0xF, minor is set to 0xFF and id is set
4901 * to 0xF, this means that number read is invalid. */
4902 } else
4903 sbuf_printf(buf, "nvm %x.%02x.%x ",
4904 nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
4905
4906 if (nvm_ver.oem_valid) {
4907 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
4908 nvm_ver.oem_minor, nvm_ver.oem_release);
4909 space = " ";
4910 }
4911
4912 if (nvm_ver.or_valid) {
4913 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
4914 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
4915 space = " ";
4916 }
4917
4918 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
4919 NVM_VER_INVALID | 0xFFFFFFFF)) {
4920 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
4921 }
4922 } /* ixgbe_sbuf_fw_version */
4923
4924 /************************************************************************
4925 * ixgbe_print_fw_version
4926 ************************************************************************/
4927 static void
ixgbe_print_fw_version(if_ctx_t ctx)4928 ixgbe_print_fw_version(if_ctx_t ctx)
4929 {
4930 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4931 struct ixgbe_hw *hw = &sc->hw;
4932 device_t dev = sc->dev;
4933 struct sbuf *buf;
4934 int error = 0;
4935
4936 buf = sbuf_new_auto();
4937 if (!buf) {
4938 device_printf(dev, "Could not allocate sbuf for output.\n");
4939 return;
4940 }
4941
4942 ixgbe_sbuf_fw_version(hw, buf);
4943
4944 error = sbuf_finish(buf);
4945 if (error)
4946 device_printf(dev, "Error finishing sbuf: %d\n", error);
4947 else if (sbuf_len(buf))
4948 device_printf(dev, "%s\n", sbuf_data(buf));
4949
4950 sbuf_delete(buf);
4951 } /* ixgbe_print_fw_version */
4952
4953 /************************************************************************
4954 * ixgbe_sysctl_print_fw_version
4955 ************************************************************************/
4956 static int
ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)4957 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
4958 {
4959 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4960 struct ixgbe_hw *hw = &sc->hw;
4961 device_t dev = sc->dev;
4962 struct sbuf *buf;
4963 int error = 0;
4964
4965 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4966 if (!buf) {
4967 device_printf(dev, "Could not allocate sbuf for output.\n");
4968 return (ENOMEM);
4969 }
4970
4971 ixgbe_sbuf_fw_version(hw, buf);
4972
4973 error = sbuf_finish(buf);
4974 if (error)
4975 device_printf(dev, "Error finishing sbuf: %d\n", error);
4976
4977 sbuf_delete(buf);
4978
4979 return (0);
4980 } /* ixgbe_sysctl_print_fw_version */
4981