1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #define BXE_DRIVER_VERSION "1.78.91"
33
34 #include "bxe.h"
35 #include "ecore_sp.h"
36 #include "ecore_init.h"
37 #include "ecore_init_ops.h"
38
39 #include "57710_int_offsets.h"
40 #include "57711_int_offsets.h"
41 #include "57712_int_offsets.h"
42
43 /*
44 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
45 * explicitly here for older kernels that don't include this changeset.
46 */
47 #ifndef CTLTYPE_U64
48 #define CTLTYPE_U64 CTLTYPE_QUAD
49 #define sysctl_handle_64 sysctl_handle_quad
50 #endif
51
52 /*
53 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
54 * here as zero(0) for older kernels that don't include this changeset
55 * thereby masking the functionality.
56 */
57 #ifndef CSUM_TCP_IPV6
58 #define CSUM_TCP_IPV6 0
59 #define CSUM_UDP_IPV6 0
60 #endif
61
62 #define BXE_DEF_SB_ATT_IDX 0x0001
63 #define BXE_DEF_SB_IDX 0x0002
64
65 /*
66 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
67 * function HW initialization.
68 */
69 #define FLR_WAIT_USEC 10000 /* 10 msecs */
70 #define FLR_WAIT_INTERVAL 50 /* usecs */
71 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
72
73 struct pbf_pN_buf_regs {
74 int pN;
75 uint32_t init_crd;
76 uint32_t crd;
77 uint32_t crd_freed;
78 };
79
80 struct pbf_pN_cmd_regs {
81 int pN;
82 uint32_t lines_occup;
83 uint32_t lines_freed;
84 };
85
86 /*
87 * PCI Device ID Table used by bxe_probe().
88 */
89 #define BXE_DEVDESC_MAX 64
90 static struct bxe_device_type bxe_devs[] = {
91 {
92 BRCM_VENDORID,
93 CHIP_NUM_57710,
94 PCI_ANY_ID, PCI_ANY_ID,
95 "QLogic NetXtreme II BCM57710 10GbE"
96 },
97 {
98 BRCM_VENDORID,
99 CHIP_NUM_57711,
100 PCI_ANY_ID, PCI_ANY_ID,
101 "QLogic NetXtreme II BCM57711 10GbE"
102 },
103 {
104 BRCM_VENDORID,
105 CHIP_NUM_57711E,
106 PCI_ANY_ID, PCI_ANY_ID,
107 "QLogic NetXtreme II BCM57711E 10GbE"
108 },
109 {
110 BRCM_VENDORID,
111 CHIP_NUM_57712,
112 PCI_ANY_ID, PCI_ANY_ID,
113 "QLogic NetXtreme II BCM57712 10GbE"
114 },
115 {
116 BRCM_VENDORID,
117 CHIP_NUM_57712_MF,
118 PCI_ANY_ID, PCI_ANY_ID,
119 "QLogic NetXtreme II BCM57712 MF 10GbE"
120 },
121 {
122 BRCM_VENDORID,
123 CHIP_NUM_57800,
124 PCI_ANY_ID, PCI_ANY_ID,
125 "QLogic NetXtreme II BCM57800 10GbE"
126 },
127 {
128 BRCM_VENDORID,
129 CHIP_NUM_57800_MF,
130 PCI_ANY_ID, PCI_ANY_ID,
131 "QLogic NetXtreme II BCM57800 MF 10GbE"
132 },
133 {
134 BRCM_VENDORID,
135 CHIP_NUM_57810,
136 PCI_ANY_ID, PCI_ANY_ID,
137 "QLogic NetXtreme II BCM57810 10GbE"
138 },
139 {
140 BRCM_VENDORID,
141 CHIP_NUM_57810_MF,
142 PCI_ANY_ID, PCI_ANY_ID,
143 "QLogic NetXtreme II BCM57810 MF 10GbE"
144 },
145 {
146 BRCM_VENDORID,
147 CHIP_NUM_57811,
148 PCI_ANY_ID, PCI_ANY_ID,
149 "QLogic NetXtreme II BCM57811 10GbE"
150 },
151 {
152 BRCM_VENDORID,
153 CHIP_NUM_57811_MF,
154 PCI_ANY_ID, PCI_ANY_ID,
155 "QLogic NetXtreme II BCM57811 MF 10GbE"
156 },
157 {
158 BRCM_VENDORID,
159 CHIP_NUM_57840_4_10,
160 PCI_ANY_ID, PCI_ANY_ID,
161 "QLogic NetXtreme II BCM57840 4x10GbE"
162 },
163 {
164 QLOGIC_VENDORID,
165 CHIP_NUM_57840_4_10,
166 PCI_ANY_ID, PCI_ANY_ID,
167 "QLogic NetXtreme II BCM57840 4x10GbE"
168 },
169 {
170 BRCM_VENDORID,
171 CHIP_NUM_57840_2_20,
172 PCI_ANY_ID, PCI_ANY_ID,
173 "QLogic NetXtreme II BCM57840 2x20GbE"
174 },
175 {
176 BRCM_VENDORID,
177 CHIP_NUM_57840_MF,
178 PCI_ANY_ID, PCI_ANY_ID,
179 "QLogic NetXtreme II BCM57840 MF 10GbE"
180 },
181 {
182 0, 0, 0, 0, NULL
183 }
184 };
185
186 MALLOC_DECLARE(M_BXE_ILT);
187 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
188
189 /*
190 * FreeBSD device entry points.
191 */
192 static int bxe_probe(device_t);
193 static int bxe_attach(device_t);
194 static int bxe_detach(device_t);
195 static int bxe_shutdown(device_t);
196
197
198 /*
199 * FreeBSD KLD module/device interface event handler method.
200 */
201 static device_method_t bxe_methods[] = {
202 /* Device interface (device_if.h) */
203 DEVMETHOD(device_probe, bxe_probe),
204 DEVMETHOD(device_attach, bxe_attach),
205 DEVMETHOD(device_detach, bxe_detach),
206 DEVMETHOD(device_shutdown, bxe_shutdown),
207 /* Bus interface (bus_if.h) */
208 DEVMETHOD(bus_print_child, bus_generic_print_child),
209 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
210 KOBJMETHOD_END
211 };
212
213 /*
214 * FreeBSD KLD Module data declaration
215 */
216 static driver_t bxe_driver = {
217 "bxe", /* module name */
218 bxe_methods, /* event handler */
219 sizeof(struct bxe_softc) /* extra data */
220 };
221
222 /*
223 * FreeBSD dev class is needed to manage dev instances and
224 * to associate with a bus type
225 */
226 static devclass_t bxe_devclass;
227
228 MODULE_DEPEND(bxe, pci, 1, 1, 1);
229 MODULE_DEPEND(bxe, ether, 1, 1, 1);
230 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
231
232 DEBUGNET_DEFINE(bxe);
233
234 /* resources needed for unloading a previously loaded device */
235
236 #define BXE_PREV_WAIT_NEEDED 1
237 struct mtx bxe_prev_mtx;
238 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
239 struct bxe_prev_list_node {
240 LIST_ENTRY(bxe_prev_list_node) node;
241 uint8_t bus;
242 uint8_t slot;
243 uint8_t path;
244 uint8_t aer; /* XXX automatic error recovery */
245 uint8_t undi;
246 };
247 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
248
249 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
250
251 /* Tunable device values... */
252
253 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
254 "bxe driver parameters");
255
256 /* Debug */
257 unsigned long bxe_debug = 0;
258 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
259 &bxe_debug, 0, "Debug logging mode");
260
261 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
262 static int bxe_interrupt_mode = INTR_MODE_MSIX;
263 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
264 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
265
266 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
267 static int bxe_queue_count = 4;
268 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
269 &bxe_queue_count, 0, "Multi-Queue queue count");
270
271 /* max number of buffers per queue (default RX_BD_USABLE) */
272 static int bxe_max_rx_bufs = 0;
273 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
274 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
275
276 /* Host interrupt coalescing RX tick timer (usecs) */
277 static int bxe_hc_rx_ticks = 25;
278 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
279 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
280
281 /* Host interrupt coalescing TX tick timer (usecs) */
282 static int bxe_hc_tx_ticks = 50;
283 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
284 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
285
286 /* Maximum number of Rx packets to process at a time */
287 static int bxe_rx_budget = 0xffffffff;
288 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
289 &bxe_rx_budget, 0, "Rx processing budget");
290
291 /* Maximum LRO aggregation size */
292 static int bxe_max_aggregation_size = 0;
293 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
294 &bxe_max_aggregation_size, 0, "max aggregation size");
295
296 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
297 static int bxe_mrrs = -1;
298 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
299 &bxe_mrrs, 0, "PCIe maximum read request size");
300
301 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
302 static int bxe_autogreeen = 0;
303 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
304 &bxe_autogreeen, 0, "AutoGrEEEn support");
305
306 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
307 static int bxe_udp_rss = 0;
308 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
309 &bxe_udp_rss, 0, "UDP RSS support");
310
311
312 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
313
314 #define STATS_OFFSET32(stat_name) \
315 (offsetof(struct bxe_eth_stats, stat_name) / 4)
316
317 #define Q_STATS_OFFSET32(stat_name) \
318 (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
319
320 static const struct {
321 uint32_t offset;
322 uint32_t size;
323 uint32_t flags;
324 #define STATS_FLAGS_PORT 1
325 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */
326 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
327 char string[STAT_NAME_LEN];
328 } bxe_eth_stats_arr[] = {
329 { STATS_OFFSET32(total_bytes_received_hi),
330 8, STATS_FLAGS_BOTH, "rx_bytes" },
331 { STATS_OFFSET32(error_bytes_received_hi),
332 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
333 { STATS_OFFSET32(total_unicast_packets_received_hi),
334 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
335 { STATS_OFFSET32(total_multicast_packets_received_hi),
336 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
337 { STATS_OFFSET32(total_broadcast_packets_received_hi),
338 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
339 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
340 8, STATS_FLAGS_PORT, "rx_crc_errors" },
341 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
342 8, STATS_FLAGS_PORT, "rx_align_errors" },
343 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
344 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
345 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
346 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
347 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
348 8, STATS_FLAGS_PORT, "rx_fragments" },
349 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
350 8, STATS_FLAGS_PORT, "rx_jabbers" },
351 { STATS_OFFSET32(no_buff_discard_hi),
352 8, STATS_FLAGS_BOTH, "rx_discards" },
353 { STATS_OFFSET32(mac_filter_discard),
354 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
355 { STATS_OFFSET32(mf_tag_discard),
356 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
357 { STATS_OFFSET32(pfc_frames_received_hi),
358 8, STATS_FLAGS_PORT, "pfc_frames_received" },
359 { STATS_OFFSET32(pfc_frames_sent_hi),
360 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
361 { STATS_OFFSET32(brb_drop_hi),
362 8, STATS_FLAGS_PORT, "rx_brb_discard" },
363 { STATS_OFFSET32(brb_truncate_hi),
364 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
365 { STATS_OFFSET32(pause_frames_received_hi),
366 8, STATS_FLAGS_PORT, "rx_pause_frames" },
367 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
368 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
369 { STATS_OFFSET32(nig_timer_max),
370 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
371 { STATS_OFFSET32(total_bytes_transmitted_hi),
372 8, STATS_FLAGS_BOTH, "tx_bytes" },
373 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
374 8, STATS_FLAGS_PORT, "tx_error_bytes" },
375 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
376 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
377 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
378 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
379 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
380 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
381 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
382 8, STATS_FLAGS_PORT, "tx_mac_errors" },
383 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
384 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
385 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
386 8, STATS_FLAGS_PORT, "tx_single_collisions" },
387 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
388 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
389 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
390 8, STATS_FLAGS_PORT, "tx_deferred" },
391 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
392 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
393 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
394 8, STATS_FLAGS_PORT, "tx_late_collisions" },
395 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
396 8, STATS_FLAGS_PORT, "tx_total_collisions" },
397 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
398 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
399 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
400 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
401 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
402 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
403 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
404 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
405 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
406 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
407 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
408 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
409 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
410 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
411 { STATS_OFFSET32(pause_frames_sent_hi),
412 8, STATS_FLAGS_PORT, "tx_pause_frames" },
413 { STATS_OFFSET32(total_tpa_aggregations_hi),
414 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
415 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
416 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
417 { STATS_OFFSET32(total_tpa_bytes_hi),
418 8, STATS_FLAGS_FUNC, "tpa_bytes"},
419 { STATS_OFFSET32(eee_tx_lpi),
420 4, STATS_FLAGS_PORT, "eee_tx_lpi"},
421 { STATS_OFFSET32(rx_calls),
422 4, STATS_FLAGS_FUNC, "rx_calls"},
423 { STATS_OFFSET32(rx_pkts),
424 4, STATS_FLAGS_FUNC, "rx_pkts"},
425 { STATS_OFFSET32(rx_tpa_pkts),
426 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
427 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
428 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
429 { STATS_OFFSET32(rx_bxe_service_rxsgl),
430 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
431 { STATS_OFFSET32(rx_jumbo_sge_pkts),
432 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
433 { STATS_OFFSET32(rx_soft_errors),
434 4, STATS_FLAGS_FUNC, "rx_soft_errors"},
435 { STATS_OFFSET32(rx_hw_csum_errors),
436 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
437 { STATS_OFFSET32(rx_ofld_frames_csum_ip),
438 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
439 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
440 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
441 { STATS_OFFSET32(rx_budget_reached),
442 4, STATS_FLAGS_FUNC, "rx_budget_reached"},
443 { STATS_OFFSET32(tx_pkts),
444 4, STATS_FLAGS_FUNC, "tx_pkts"},
445 { STATS_OFFSET32(tx_soft_errors),
446 4, STATS_FLAGS_FUNC, "tx_soft_errors"},
447 { STATS_OFFSET32(tx_ofld_frames_csum_ip),
448 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
449 { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
450 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
451 { STATS_OFFSET32(tx_ofld_frames_csum_udp),
452 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
453 { STATS_OFFSET32(tx_ofld_frames_lso),
454 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
455 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
456 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
457 { STATS_OFFSET32(tx_encap_failures),
458 4, STATS_FLAGS_FUNC, "tx_encap_failures"},
459 { STATS_OFFSET32(tx_hw_queue_full),
460 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
461 { STATS_OFFSET32(tx_hw_max_queue_depth),
462 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
463 { STATS_OFFSET32(tx_dma_mapping_failure),
464 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
465 { STATS_OFFSET32(tx_max_drbr_queue_depth),
466 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
467 { STATS_OFFSET32(tx_window_violation_std),
468 4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
469 { STATS_OFFSET32(tx_window_violation_tso),
470 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
471 { STATS_OFFSET32(tx_chain_lost_mbuf),
472 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
473 { STATS_OFFSET32(tx_frames_deferred),
474 4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
475 { STATS_OFFSET32(tx_queue_xoff),
476 4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
477 { STATS_OFFSET32(mbuf_defrag_attempts),
478 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
479 { STATS_OFFSET32(mbuf_defrag_failures),
480 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
481 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
482 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
483 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
484 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
485 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
486 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
487 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
488 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
489 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
490 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
491 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
492 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
493 { STATS_OFFSET32(mbuf_alloc_tx),
494 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
495 { STATS_OFFSET32(mbuf_alloc_rx),
496 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
497 { STATS_OFFSET32(mbuf_alloc_sge),
498 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
499 { STATS_OFFSET32(mbuf_alloc_tpa),
500 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
501 { STATS_OFFSET32(tx_queue_full_return),
502 4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
503 { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
504 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
505 { STATS_OFFSET32(tx_request_link_down_failures),
506 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
507 { STATS_OFFSET32(bd_avail_too_less_failures),
508 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
509 { STATS_OFFSET32(tx_mq_not_empty),
510 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
511 { STATS_OFFSET32(nsegs_path1_errors),
512 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
513 { STATS_OFFSET32(nsegs_path2_errors),
514 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
515
516
517 };
518
519 static const struct {
520 uint32_t offset;
521 uint32_t size;
522 char string[STAT_NAME_LEN];
523 } bxe_eth_q_stats_arr[] = {
524 { Q_STATS_OFFSET32(total_bytes_received_hi),
525 8, "rx_bytes" },
526 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
527 8, "rx_ucast_packets" },
528 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
529 8, "rx_mcast_packets" },
530 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
531 8, "rx_bcast_packets" },
532 { Q_STATS_OFFSET32(no_buff_discard_hi),
533 8, "rx_discards" },
534 { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
535 8, "tx_bytes" },
536 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
537 8, "tx_ucast_packets" },
538 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
539 8, "tx_mcast_packets" },
540 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
541 8, "tx_bcast_packets" },
542 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
543 8, "tpa_aggregations" },
544 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
545 8, "tpa_aggregated_frames"},
546 { Q_STATS_OFFSET32(total_tpa_bytes_hi),
547 8, "tpa_bytes"},
548 { Q_STATS_OFFSET32(rx_calls),
549 4, "rx_calls"},
550 { Q_STATS_OFFSET32(rx_pkts),
551 4, "rx_pkts"},
552 { Q_STATS_OFFSET32(rx_tpa_pkts),
553 4, "rx_tpa_pkts"},
554 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
555 4, "rx_erroneous_jumbo_sge_pkts"},
556 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
557 4, "rx_bxe_service_rxsgl"},
558 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
559 4, "rx_jumbo_sge_pkts"},
560 { Q_STATS_OFFSET32(rx_soft_errors),
561 4, "rx_soft_errors"},
562 { Q_STATS_OFFSET32(rx_hw_csum_errors),
563 4, "rx_hw_csum_errors"},
564 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
565 4, "rx_ofld_frames_csum_ip"},
566 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
567 4, "rx_ofld_frames_csum_tcp_udp"},
568 { Q_STATS_OFFSET32(rx_budget_reached),
569 4, "rx_budget_reached"},
570 { Q_STATS_OFFSET32(tx_pkts),
571 4, "tx_pkts"},
572 { Q_STATS_OFFSET32(tx_soft_errors),
573 4, "tx_soft_errors"},
574 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
575 4, "tx_ofld_frames_csum_ip"},
576 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
577 4, "tx_ofld_frames_csum_tcp"},
578 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
579 4, "tx_ofld_frames_csum_udp"},
580 { Q_STATS_OFFSET32(tx_ofld_frames_lso),
581 4, "tx_ofld_frames_lso"},
582 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
583 4, "tx_ofld_frames_lso_hdr_splits"},
584 { Q_STATS_OFFSET32(tx_encap_failures),
585 4, "tx_encap_failures"},
586 { Q_STATS_OFFSET32(tx_hw_queue_full),
587 4, "tx_hw_queue_full"},
588 { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
589 4, "tx_hw_max_queue_depth"},
590 { Q_STATS_OFFSET32(tx_dma_mapping_failure),
591 4, "tx_dma_mapping_failure"},
592 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
593 4, "tx_max_drbr_queue_depth"},
594 { Q_STATS_OFFSET32(tx_window_violation_std),
595 4, "tx_window_violation_std"},
596 { Q_STATS_OFFSET32(tx_window_violation_tso),
597 4, "tx_window_violation_tso"},
598 { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
599 4, "tx_chain_lost_mbuf"},
600 { Q_STATS_OFFSET32(tx_frames_deferred),
601 4, "tx_frames_deferred"},
602 { Q_STATS_OFFSET32(tx_queue_xoff),
603 4, "tx_queue_xoff"},
604 { Q_STATS_OFFSET32(mbuf_defrag_attempts),
605 4, "mbuf_defrag_attempts"},
606 { Q_STATS_OFFSET32(mbuf_defrag_failures),
607 4, "mbuf_defrag_failures"},
608 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
609 4, "mbuf_rx_bd_alloc_failed"},
610 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
611 4, "mbuf_rx_bd_mapping_failed"},
612 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
613 4, "mbuf_rx_tpa_alloc_failed"},
614 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
615 4, "mbuf_rx_tpa_mapping_failed"},
616 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
617 4, "mbuf_rx_sge_alloc_failed"},
618 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
619 4, "mbuf_rx_sge_mapping_failed"},
620 { Q_STATS_OFFSET32(mbuf_alloc_tx),
621 4, "mbuf_alloc_tx"},
622 { Q_STATS_OFFSET32(mbuf_alloc_rx),
623 4, "mbuf_alloc_rx"},
624 { Q_STATS_OFFSET32(mbuf_alloc_sge),
625 4, "mbuf_alloc_sge"},
626 { Q_STATS_OFFSET32(mbuf_alloc_tpa),
627 4, "mbuf_alloc_tpa"},
628 { Q_STATS_OFFSET32(tx_queue_full_return),
629 4, "tx_queue_full_return"},
630 { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
631 4, "bxe_tx_mq_sc_state_failures"},
632 { Q_STATS_OFFSET32(tx_request_link_down_failures),
633 4, "tx_request_link_down_failures"},
634 { Q_STATS_OFFSET32(bd_avail_too_less_failures),
635 4, "bd_avail_too_less_failures"},
636 { Q_STATS_OFFSET32(tx_mq_not_empty),
637 4, "tx_mq_not_empty"},
638 { Q_STATS_OFFSET32(nsegs_path1_errors),
639 4, "nsegs_path1_errors"},
640 { Q_STATS_OFFSET32(nsegs_path2_errors),
641 4, "nsegs_path2_errors"}
642
643
644 };
645
646 #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr)
647 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
648
649
650 static void bxe_cmng_fns_init(struct bxe_softc *sc,
651 uint8_t read_cfg,
652 uint8_t cmng_type);
653 static int bxe_get_cmng_fns_mode(struct bxe_softc *sc);
654 static void storm_memset_cmng(struct bxe_softc *sc,
655 struct cmng_init *cmng,
656 uint8_t port);
657 static void bxe_set_reset_global(struct bxe_softc *sc);
658 static void bxe_set_reset_in_progress(struct bxe_softc *sc);
659 static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
660 int engine);
661 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
662 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
663 uint8_t *global,
664 uint8_t print);
665 static void bxe_int_disable(struct bxe_softc *sc);
666 static int bxe_release_leader_lock(struct bxe_softc *sc);
667 static void bxe_pf_disable(struct bxe_softc *sc);
668 static void bxe_free_fp_buffers(struct bxe_softc *sc);
669 static inline void bxe_update_rx_prod(struct bxe_softc *sc,
670 struct bxe_fastpath *fp,
671 uint16_t rx_bd_prod,
672 uint16_t rx_cq_prod,
673 uint16_t rx_sge_prod);
674 static void bxe_link_report_locked(struct bxe_softc *sc);
675 static void bxe_link_report(struct bxe_softc *sc);
676 static void bxe_link_status_update(struct bxe_softc *sc);
677 static void bxe_periodic_callout_func(void *xsc);
678 static void bxe_periodic_start(struct bxe_softc *sc);
679 static void bxe_periodic_stop(struct bxe_softc *sc);
680 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
681 uint16_t prev_index,
682 uint16_t index);
683 static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
684 int queue);
685 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
686 uint16_t index);
687 static uint8_t bxe_txeof(struct bxe_softc *sc,
688 struct bxe_fastpath *fp);
689 static void bxe_task_fp(struct bxe_fastpath *fp);
690 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
691 struct mbuf *m,
692 uint8_t contents);
693 static int bxe_alloc_mem(struct bxe_softc *sc);
694 static void bxe_free_mem(struct bxe_softc *sc);
695 static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
696 static void bxe_free_fw_stats_mem(struct bxe_softc *sc);
697 static int bxe_interrupt_attach(struct bxe_softc *sc);
698 static void bxe_interrupt_detach(struct bxe_softc *sc);
699 static void bxe_set_rx_mode(struct bxe_softc *sc);
700 static int bxe_init_locked(struct bxe_softc *sc);
701 static int bxe_stop_locked(struct bxe_softc *sc);
702 static void bxe_sp_err_timeout_task(void *arg, int pending);
703 void bxe_parity_recover(struct bxe_softc *sc);
704 void bxe_handle_error(struct bxe_softc *sc);
705 static __noinline int bxe_nic_load(struct bxe_softc *sc,
706 int load_mode);
707 static __noinline int bxe_nic_unload(struct bxe_softc *sc,
708 uint32_t unload_mode,
709 uint8_t keep_link);
710
711 static void bxe_handle_sp_tq(void *context, int pending);
712 static void bxe_handle_fp_tq(void *context, int pending);
713
714 static int bxe_add_cdev(struct bxe_softc *sc);
715 static void bxe_del_cdev(struct bxe_softc *sc);
716 int bxe_grc_dump(struct bxe_softc *sc);
717 static int bxe_alloc_buf_rings(struct bxe_softc *sc);
718 static void bxe_free_buf_rings(struct bxe_softc *sc);
719
720 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
721 uint32_t
calc_crc32(uint8_t * crc32_packet,uint32_t crc32_length,uint32_t crc32_seed,uint8_t complement)722 calc_crc32(uint8_t *crc32_packet,
723 uint32_t crc32_length,
724 uint32_t crc32_seed,
725 uint8_t complement)
726 {
727 uint32_t byte = 0;
728 uint32_t bit = 0;
729 uint8_t msb = 0;
730 uint32_t temp = 0;
731 uint32_t shft = 0;
732 uint8_t current_byte = 0;
733 uint32_t crc32_result = crc32_seed;
734 const uint32_t CRC32_POLY = 0x1edc6f41;
735
736 if ((crc32_packet == NULL) ||
737 (crc32_length == 0) ||
738 ((crc32_length % 8) != 0))
739 {
740 return (crc32_result);
741 }
742
743 for (byte = 0; byte < crc32_length; byte = byte + 1)
744 {
745 current_byte = crc32_packet[byte];
746 for (bit = 0; bit < 8; bit = bit + 1)
747 {
748 /* msb = crc32_result[31]; */
749 msb = (uint8_t)(crc32_result >> 31);
750
751 crc32_result = crc32_result << 1;
752
753 /* it (msb != current_byte[bit]) */
754 if (msb != (0x1 & (current_byte >> bit)))
755 {
756 crc32_result = crc32_result ^ CRC32_POLY;
757 /* crc32_result[0] = 1 */
758 crc32_result |= 1;
759 }
760 }
761 }
762
763 /* Last step is to:
764 * 1. "mirror" every bit
765 * 2. swap the 4 bytes
766 * 3. complement each bit
767 */
768
769 /* Mirror */
770 temp = crc32_result;
771 shft = sizeof(crc32_result) * 8 - 1;
772
773 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
774 {
775 temp <<= 1;
776 temp |= crc32_result & 1;
777 shft-- ;
778 }
779
780 /* temp[31-bit] = crc32_result[bit] */
781 temp <<= shft;
782
783 /* Swap */
784 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
785 {
786 uint32_t t0, t1, t2, t3;
787 t0 = (0x000000ff & (temp >> 24));
788 t1 = (0x0000ff00 & (temp >> 8));
789 t2 = (0x00ff0000 & (temp << 8));
790 t3 = (0xff000000 & (temp << 24));
791 crc32_result = t0 | t1 | t2 | t3;
792 }
793
794 /* Complement */
795 if (complement)
796 {
797 crc32_result = ~crc32_result;
798 }
799
800 return (crc32_result);
801 }
802
803 int
bxe_test_bit(int nr,volatile unsigned long * addr)804 bxe_test_bit(int nr,
805 volatile unsigned long *addr)
806 {
807 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
808 }
809
810 void
bxe_set_bit(unsigned int nr,volatile unsigned long * addr)811 bxe_set_bit(unsigned int nr,
812 volatile unsigned long *addr)
813 {
814 atomic_set_acq_long(addr, (1 << nr));
815 }
816
817 void
bxe_clear_bit(int nr,volatile unsigned long * addr)818 bxe_clear_bit(int nr,
819 volatile unsigned long *addr)
820 {
821 atomic_clear_acq_long(addr, (1 << nr));
822 }
823
824 int
bxe_test_and_set_bit(int nr,volatile unsigned long * addr)825 bxe_test_and_set_bit(int nr,
826 volatile unsigned long *addr)
827 {
828 unsigned long x;
829 nr = (1 << nr);
830 do {
831 x = *addr;
832 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
833 // if (x & nr) bit_was_set; else bit_was_not_set;
834 return (x & nr);
835 }
836
837 int
bxe_test_and_clear_bit(int nr,volatile unsigned long * addr)838 bxe_test_and_clear_bit(int nr,
839 volatile unsigned long *addr)
840 {
841 unsigned long x;
842 nr = (1 << nr);
843 do {
844 x = *addr;
845 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
846 // if (x & nr) bit_was_set; else bit_was_not_set;
847 return (x & nr);
848 }
849
850 int
bxe_cmpxchg(volatile int * addr,int old,int new)851 bxe_cmpxchg(volatile int *addr,
852 int old,
853 int new)
854 {
855 int x;
856 do {
857 x = *addr;
858 } while (atomic_cmpset_acq_int(addr, old, new) == 0);
859 return (x);
860 }
861
862 /*
863 * Get DMA memory from the OS.
864 *
865 * Validates that the OS has provided DMA buffers in response to a
866 * bus_dmamap_load call and saves the physical address of those buffers.
867 * When the callback is used the OS will return 0 for the mapping function
868 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
869 * failures back to the caller.
870 *
871 * Returns:
872 * Nothing.
873 */
874 static void
bxe_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)875 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
876 {
877 struct bxe_dma *dma = arg;
878
879 if (error) {
880 dma->paddr = 0;
881 dma->nseg = 0;
882 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
883 } else {
884 dma->paddr = segs->ds_addr;
885 dma->nseg = nseg;
886 }
887 }
888
889 /*
890 * Allocate a block of memory and map it for DMA. No partial completions
891 * allowed and release any resources acquired if we can't acquire all
892 * resources.
893 *
894 * Returns:
895 * 0 = Success, !0 = Failure
896 */
897 int
bxe_dma_alloc(struct bxe_softc * sc,bus_size_t size,struct bxe_dma * dma,const char * msg)898 bxe_dma_alloc(struct bxe_softc *sc,
899 bus_size_t size,
900 struct bxe_dma *dma,
901 const char *msg)
902 {
903 int rc;
904
905 if (dma->size > 0) {
906 BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
907 (unsigned long)dma->size);
908 return (1);
909 }
910
911 memset(dma, 0, sizeof(*dma)); /* sanity */
912 dma->sc = sc;
913 dma->size = size;
914 snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
915
916 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
917 BCM_PAGE_SIZE, /* alignment */
918 0, /* boundary limit */
919 BUS_SPACE_MAXADDR, /* restricted low */
920 BUS_SPACE_MAXADDR, /* restricted hi */
921 NULL, /* addr filter() */
922 NULL, /* addr filter() arg */
923 size, /* max map size */
924 1, /* num discontinuous */
925 size, /* max seg size */
926 BUS_DMA_ALLOCNOW, /* flags */
927 NULL, /* lock() */
928 NULL, /* lock() arg */
929 &dma->tag); /* returned dma tag */
930 if (rc != 0) {
931 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
932 memset(dma, 0, sizeof(*dma));
933 return (1);
934 }
935
936 rc = bus_dmamem_alloc(dma->tag,
937 (void **)&dma->vaddr,
938 (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
939 &dma->map);
940 if (rc != 0) {
941 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
942 bus_dma_tag_destroy(dma->tag);
943 memset(dma, 0, sizeof(*dma));
944 return (1);
945 }
946
947 rc = bus_dmamap_load(dma->tag,
948 dma->map,
949 dma->vaddr,
950 size,
951 bxe_dma_map_addr, /* BLOGD in here */
952 dma,
953 BUS_DMA_NOWAIT);
954 if (rc != 0) {
955 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
956 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
957 bus_dma_tag_destroy(dma->tag);
958 memset(dma, 0, sizeof(*dma));
959 return (1);
960 }
961
962 return (0);
963 }
964
965 void
bxe_dma_free(struct bxe_softc * sc,struct bxe_dma * dma)966 bxe_dma_free(struct bxe_softc *sc,
967 struct bxe_dma *dma)
968 {
969 if (dma->size > 0) {
970 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
971
972 bus_dmamap_sync(dma->tag, dma->map,
973 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
974 bus_dmamap_unload(dma->tag, dma->map);
975 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
976 bus_dma_tag_destroy(dma->tag);
977 }
978
979 memset(dma, 0, sizeof(*dma));
980 }
981
982 /*
983 * These indirect read and write routines are only during init.
984 * The locking is handled by the MCP.
985 */
986
987 void
bxe_reg_wr_ind(struct bxe_softc * sc,uint32_t addr,uint32_t val)988 bxe_reg_wr_ind(struct bxe_softc *sc,
989 uint32_t addr,
990 uint32_t val)
991 {
992 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
993 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
994 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
995 }
996
997 uint32_t
bxe_reg_rd_ind(struct bxe_softc * sc,uint32_t addr)998 bxe_reg_rd_ind(struct bxe_softc *sc,
999 uint32_t addr)
1000 {
1001 uint32_t val;
1002
1003 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1004 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
1005 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1006
1007 return (val);
1008 }
1009
1010 static int
bxe_acquire_hw_lock(struct bxe_softc * sc,uint32_t resource)1011 bxe_acquire_hw_lock(struct bxe_softc *sc,
1012 uint32_t resource)
1013 {
1014 uint32_t lock_status;
1015 uint32_t resource_bit = (1 << resource);
1016 int func = SC_FUNC(sc);
1017 uint32_t hw_lock_control_reg;
1018 int cnt;
1019
1020 /* validate the resource is within range */
1021 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1022 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1023 " resource_bit 0x%x\n", resource, resource_bit);
1024 return (-1);
1025 }
1026
1027 if (func <= 5) {
1028 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1029 } else {
1030 hw_lock_control_reg =
1031 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1032 }
1033
1034 /* validate the resource is not already taken */
1035 lock_status = REG_RD(sc, hw_lock_control_reg);
1036 if (lock_status & resource_bit) {
1037 BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1038 resource, lock_status, resource_bit);
1039 return (-1);
1040 }
1041
1042 /* try every 5ms for 5 seconds */
1043 for (cnt = 0; cnt < 1000; cnt++) {
1044 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1045 lock_status = REG_RD(sc, hw_lock_control_reg);
1046 if (lock_status & resource_bit) {
1047 return (0);
1048 }
1049 DELAY(5000);
1050 }
1051
1052 BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1053 resource, resource_bit);
1054 return (-1);
1055 }
1056
1057 static int
bxe_release_hw_lock(struct bxe_softc * sc,uint32_t resource)1058 bxe_release_hw_lock(struct bxe_softc *sc,
1059 uint32_t resource)
1060 {
1061 uint32_t lock_status;
1062 uint32_t resource_bit = (1 << resource);
1063 int func = SC_FUNC(sc);
1064 uint32_t hw_lock_control_reg;
1065
1066 /* validate the resource is within range */
1067 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1068 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1069 " resource_bit 0x%x\n", resource, resource_bit);
1070 return (-1);
1071 }
1072
1073 if (func <= 5) {
1074 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1075 } else {
1076 hw_lock_control_reg =
1077 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1078 }
1079
1080 /* validate the resource is currently taken */
1081 lock_status = REG_RD(sc, hw_lock_control_reg);
1082 if (!(lock_status & resource_bit)) {
1083 BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1084 resource, lock_status, resource_bit);
1085 return (-1);
1086 }
1087
1088 REG_WR(sc, hw_lock_control_reg, resource_bit);
1089 return (0);
1090 }
bxe_acquire_phy_lock(struct bxe_softc * sc)1091 static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1092 {
1093 BXE_PHY_LOCK(sc);
1094 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1095 }
1096
bxe_release_phy_lock(struct bxe_softc * sc)1097 static void bxe_release_phy_lock(struct bxe_softc *sc)
1098 {
1099 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1100 BXE_PHY_UNLOCK(sc);
1101 }
1102 /*
1103 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1104 * had we done things the other way around, if two pfs from the same port
1105 * would attempt to access nvram at the same time, we could run into a
1106 * scenario such as:
1107 * pf A takes the port lock.
1108 * pf B succeeds in taking the same lock since they are from the same port.
1109 * pf A takes the per pf misc lock. Performs eeprom access.
1110 * pf A finishes. Unlocks the per pf misc lock.
1111 * Pf B takes the lock and proceeds to perform it's own access.
1112 * pf A unlocks the per port lock, while pf B is still working (!).
1113 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1114 * access corrupted by pf B).*
1115 */
1116 static int
bxe_acquire_nvram_lock(struct bxe_softc * sc)1117 bxe_acquire_nvram_lock(struct bxe_softc *sc)
1118 {
1119 int port = SC_PORT(sc);
1120 int count, i;
1121 uint32_t val = 0;
1122
1123 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1124 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1125
1126 /* adjust timeout for emulation/FPGA */
1127 count = NVRAM_TIMEOUT_COUNT;
1128 if (CHIP_REV_IS_SLOW(sc)) {
1129 count *= 100;
1130 }
1131
1132 /* request access to nvram interface */
1133 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1134 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1135
1136 for (i = 0; i < count*10; i++) {
1137 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1138 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1139 break;
1140 }
1141
1142 DELAY(5);
1143 }
1144
1145 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1146 BLOGE(sc, "Cannot get access to nvram interface "
1147 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1148 port, val);
1149 return (-1);
1150 }
1151
1152 return (0);
1153 }
1154
1155 static int
bxe_release_nvram_lock(struct bxe_softc * sc)1156 bxe_release_nvram_lock(struct bxe_softc *sc)
1157 {
1158 int port = SC_PORT(sc);
1159 int count, i;
1160 uint32_t val = 0;
1161
1162 /* adjust timeout for emulation/FPGA */
1163 count = NVRAM_TIMEOUT_COUNT;
1164 if (CHIP_REV_IS_SLOW(sc)) {
1165 count *= 100;
1166 }
1167
1168 /* relinquish nvram interface */
1169 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1170 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1171
1172 for (i = 0; i < count*10; i++) {
1173 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1174 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1175 break;
1176 }
1177
1178 DELAY(5);
1179 }
1180
1181 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1182 BLOGE(sc, "Cannot free access to nvram interface "
1183 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1184 port, val);
1185 return (-1);
1186 }
1187
1188 /* release HW lock: protect against other PFs in PF Direct Assignment */
1189 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1190
1191 return (0);
1192 }
1193
1194 static void
bxe_enable_nvram_access(struct bxe_softc * sc)1195 bxe_enable_nvram_access(struct bxe_softc *sc)
1196 {
1197 uint32_t val;
1198
1199 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1200
1201 /* enable both bits, even on read */
1202 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1203 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1204 }
1205
1206 static void
bxe_disable_nvram_access(struct bxe_softc * sc)1207 bxe_disable_nvram_access(struct bxe_softc *sc)
1208 {
1209 uint32_t val;
1210
1211 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1212
1213 /* disable both bits, even after read */
1214 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1215 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1216 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1217 }
1218
1219 static int
bxe_nvram_read_dword(struct bxe_softc * sc,uint32_t offset,uint32_t * ret_val,uint32_t cmd_flags)1220 bxe_nvram_read_dword(struct bxe_softc *sc,
1221 uint32_t offset,
1222 uint32_t *ret_val,
1223 uint32_t cmd_flags)
1224 {
1225 int count, i, rc;
1226 uint32_t val;
1227
1228 /* build the command word */
1229 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1230
1231 /* need to clear DONE bit separately */
1232 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1233
1234 /* address of the NVRAM to read from */
1235 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1236 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1237
1238 /* issue a read command */
1239 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1240
1241 /* adjust timeout for emulation/FPGA */
1242 count = NVRAM_TIMEOUT_COUNT;
1243 if (CHIP_REV_IS_SLOW(sc)) {
1244 count *= 100;
1245 }
1246
1247 /* wait for completion */
1248 *ret_val = 0;
1249 rc = -1;
1250 for (i = 0; i < count; i++) {
1251 DELAY(5);
1252 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1253
1254 if (val & MCPR_NVM_COMMAND_DONE) {
1255 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1256 /* we read nvram data in cpu order
1257 * but ethtool sees it as an array of bytes
1258 * converting to big-endian will do the work
1259 */
1260 *ret_val = htobe32(val);
1261 rc = 0;
1262 break;
1263 }
1264 }
1265
1266 if (rc == -1) {
1267 BLOGE(sc, "nvram read timeout expired "
1268 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1269 offset, cmd_flags, val);
1270 }
1271
1272 return (rc);
1273 }
1274
1275 static int
bxe_nvram_read(struct bxe_softc * sc,uint32_t offset,uint8_t * ret_buf,int buf_size)1276 bxe_nvram_read(struct bxe_softc *sc,
1277 uint32_t offset,
1278 uint8_t *ret_buf,
1279 int buf_size)
1280 {
1281 uint32_t cmd_flags;
1282 uint32_t val;
1283 int rc;
1284
1285 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1286 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1287 offset, buf_size);
1288 return (-1);
1289 }
1290
1291 if ((offset + buf_size) > sc->devinfo.flash_size) {
1292 BLOGE(sc, "Invalid parameter, "
1293 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1294 offset, buf_size, sc->devinfo.flash_size);
1295 return (-1);
1296 }
1297
1298 /* request access to nvram interface */
1299 rc = bxe_acquire_nvram_lock(sc);
1300 if (rc) {
1301 return (rc);
1302 }
1303
1304 /* enable access to nvram interface */
1305 bxe_enable_nvram_access(sc);
1306
1307 /* read the first word(s) */
1308 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1309 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1310 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1311 memcpy(ret_buf, &val, 4);
1312
1313 /* advance to the next dword */
1314 offset += sizeof(uint32_t);
1315 ret_buf += sizeof(uint32_t);
1316 buf_size -= sizeof(uint32_t);
1317 cmd_flags = 0;
1318 }
1319
1320 if (rc == 0) {
1321 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1322 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1323 memcpy(ret_buf, &val, 4);
1324 }
1325
1326 /* disable access to nvram interface */
1327 bxe_disable_nvram_access(sc);
1328 bxe_release_nvram_lock(sc);
1329
1330 return (rc);
1331 }
1332
1333 static int
bxe_nvram_write_dword(struct bxe_softc * sc,uint32_t offset,uint32_t val,uint32_t cmd_flags)1334 bxe_nvram_write_dword(struct bxe_softc *sc,
1335 uint32_t offset,
1336 uint32_t val,
1337 uint32_t cmd_flags)
1338 {
1339 int count, i, rc;
1340
1341 /* build the command word */
1342 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1343
1344 /* need to clear DONE bit separately */
1345 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1346
1347 /* write the data */
1348 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1349
1350 /* address of the NVRAM to write to */
1351 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1352 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1353
1354 /* issue the write command */
1355 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1356
1357 /* adjust timeout for emulation/FPGA */
1358 count = NVRAM_TIMEOUT_COUNT;
1359 if (CHIP_REV_IS_SLOW(sc)) {
1360 count *= 100;
1361 }
1362
1363 /* wait for completion */
1364 rc = -1;
1365 for (i = 0; i < count; i++) {
1366 DELAY(5);
1367 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1368 if (val & MCPR_NVM_COMMAND_DONE) {
1369 rc = 0;
1370 break;
1371 }
1372 }
1373
1374 if (rc == -1) {
1375 BLOGE(sc, "nvram write timeout expired "
1376 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1377 offset, cmd_flags, val);
1378 }
1379
1380 return (rc);
1381 }
1382
1383 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1384
1385 static int
bxe_nvram_write1(struct bxe_softc * sc,uint32_t offset,uint8_t * data_buf,int buf_size)1386 bxe_nvram_write1(struct bxe_softc *sc,
1387 uint32_t offset,
1388 uint8_t *data_buf,
1389 int buf_size)
1390 {
1391 uint32_t cmd_flags;
1392 uint32_t align_offset;
1393 uint32_t val;
1394 int rc;
1395
1396 if ((offset + buf_size) > sc->devinfo.flash_size) {
1397 BLOGE(sc, "Invalid parameter, "
1398 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1399 offset, buf_size, sc->devinfo.flash_size);
1400 return (-1);
1401 }
1402
1403 /* request access to nvram interface */
1404 rc = bxe_acquire_nvram_lock(sc);
1405 if (rc) {
1406 return (rc);
1407 }
1408
1409 /* enable access to nvram interface */
1410 bxe_enable_nvram_access(sc);
1411
1412 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1413 align_offset = (offset & ~0x03);
1414 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1415
1416 if (rc == 0) {
1417 val &= ~(0xff << BYTE_OFFSET(offset));
1418 val |= (*data_buf << BYTE_OFFSET(offset));
1419
1420 /* nvram data is returned as an array of bytes
1421 * convert it back to cpu order
1422 */
1423 val = be32toh(val);
1424
1425 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1426 }
1427
1428 /* disable access to nvram interface */
1429 bxe_disable_nvram_access(sc);
1430 bxe_release_nvram_lock(sc);
1431
1432 return (rc);
1433 }
1434
1435 static int
bxe_nvram_write(struct bxe_softc * sc,uint32_t offset,uint8_t * data_buf,int buf_size)1436 bxe_nvram_write(struct bxe_softc *sc,
1437 uint32_t offset,
1438 uint8_t *data_buf,
1439 int buf_size)
1440 {
1441 uint32_t cmd_flags;
1442 uint32_t val;
1443 uint32_t written_so_far;
1444 int rc;
1445
1446 if (buf_size == 1) {
1447 return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1448 }
1449
1450 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1451 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1452 offset, buf_size);
1453 return (-1);
1454 }
1455
1456 if (buf_size == 0) {
1457 return (0); /* nothing to do */
1458 }
1459
1460 if ((offset + buf_size) > sc->devinfo.flash_size) {
1461 BLOGE(sc, "Invalid parameter, "
1462 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1463 offset, buf_size, sc->devinfo.flash_size);
1464 return (-1);
1465 }
1466
1467 /* request access to nvram interface */
1468 rc = bxe_acquire_nvram_lock(sc);
1469 if (rc) {
1470 return (rc);
1471 }
1472
1473 /* enable access to nvram interface */
1474 bxe_enable_nvram_access(sc);
1475
1476 written_so_far = 0;
1477 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1478 while ((written_so_far < buf_size) && (rc == 0)) {
1479 if (written_so_far == (buf_size - sizeof(uint32_t))) {
1480 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1481 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1482 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1483 } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1484 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1485 }
1486
1487 memcpy(&val, data_buf, 4);
1488
1489 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1490
1491 /* advance to the next dword */
1492 offset += sizeof(uint32_t);
1493 data_buf += sizeof(uint32_t);
1494 written_so_far += sizeof(uint32_t);
1495 cmd_flags = 0;
1496 }
1497
1498 /* disable access to nvram interface */
1499 bxe_disable_nvram_access(sc);
1500 bxe_release_nvram_lock(sc);
1501
1502 return (rc);
1503 }
1504
1505 /* copy command into DMAE command memory and set DMAE command Go */
1506 void
bxe_post_dmae(struct bxe_softc * sc,struct dmae_cmd * dmae,int idx)1507 bxe_post_dmae(struct bxe_softc *sc,
1508 struct dmae_cmd *dmae,
1509 int idx)
1510 {
1511 uint32_t cmd_offset;
1512 int i;
1513
1514 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1515 for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1516 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1517 }
1518
1519 REG_WR(sc, dmae_reg_go_c[idx], 1);
1520 }
1521
1522 uint32_t
bxe_dmae_opcode_add_comp(uint32_t opcode,uint8_t comp_type)1523 bxe_dmae_opcode_add_comp(uint32_t opcode,
1524 uint8_t comp_type)
1525 {
1526 return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1527 DMAE_CMD_C_TYPE_ENABLE));
1528 }
1529
1530 uint32_t
bxe_dmae_opcode_clr_src_reset(uint32_t opcode)1531 bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1532 {
1533 return (opcode & ~DMAE_CMD_SRC_RESET);
1534 }
1535
1536 uint32_t
bxe_dmae_opcode(struct bxe_softc * sc,uint8_t src_type,uint8_t dst_type,uint8_t with_comp,uint8_t comp_type)1537 bxe_dmae_opcode(struct bxe_softc *sc,
1538 uint8_t src_type,
1539 uint8_t dst_type,
1540 uint8_t with_comp,
1541 uint8_t comp_type)
1542 {
1543 uint32_t opcode = 0;
1544
1545 opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1546 (dst_type << DMAE_CMD_DST_SHIFT));
1547
1548 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1549
1550 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1551
1552 opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1553 (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1554
1555 opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1556
1557 #ifdef __BIG_ENDIAN
1558 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1559 #else
1560 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1561 #endif
1562
1563 if (with_comp) {
1564 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1565 }
1566
1567 return (opcode);
1568 }
1569
1570 static void
bxe_prep_dmae_with_comp(struct bxe_softc * sc,struct dmae_cmd * dmae,uint8_t src_type,uint8_t dst_type)1571 bxe_prep_dmae_with_comp(struct bxe_softc *sc,
1572 struct dmae_cmd *dmae,
1573 uint8_t src_type,
1574 uint8_t dst_type)
1575 {
1576 memset(dmae, 0, sizeof(struct dmae_cmd));
1577
1578 /* set the opcode */
1579 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1580 TRUE, DMAE_COMP_PCI);
1581
1582 /* fill in the completion parameters */
1583 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1584 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1585 dmae->comp_val = DMAE_COMP_VAL;
1586 }
1587
1588 /* issue a DMAE command over the init channel and wait for completion */
1589 static int
bxe_issue_dmae_with_comp(struct bxe_softc * sc,struct dmae_cmd * dmae)1590 bxe_issue_dmae_with_comp(struct bxe_softc *sc,
1591 struct dmae_cmd *dmae)
1592 {
1593 uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1594 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1595
1596 BXE_DMAE_LOCK(sc);
1597
1598 /* reset completion */
1599 *wb_comp = 0;
1600
1601 /* post the command on the channel used for initializations */
1602 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1603
1604 /* wait for completion */
1605 DELAY(5);
1606
1607 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1608 if (!timeout ||
1609 (sc->recovery_state != BXE_RECOVERY_DONE &&
1610 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1611 BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1612 *wb_comp, sc->recovery_state);
1613 BXE_DMAE_UNLOCK(sc);
1614 return (DMAE_TIMEOUT);
1615 }
1616
1617 timeout--;
1618 DELAY(50);
1619 }
1620
1621 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1622 BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1623 *wb_comp, sc->recovery_state);
1624 BXE_DMAE_UNLOCK(sc);
1625 return (DMAE_PCI_ERROR);
1626 }
1627
1628 BXE_DMAE_UNLOCK(sc);
1629 return (0);
1630 }
1631
1632 void
bxe_read_dmae(struct bxe_softc * sc,uint32_t src_addr,uint32_t len32)1633 bxe_read_dmae(struct bxe_softc *sc,
1634 uint32_t src_addr,
1635 uint32_t len32)
1636 {
1637 struct dmae_cmd dmae;
1638 uint32_t *data;
1639 int i, rc;
1640
1641 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1642
1643 if (!sc->dmae_ready) {
1644 data = BXE_SP(sc, wb_data[0]);
1645
1646 for (i = 0; i < len32; i++) {
1647 data[i] = (CHIP_IS_E1(sc)) ?
1648 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1649 REG_RD(sc, (src_addr + (i * 4)));
1650 }
1651
1652 return;
1653 }
1654
1655 /* set opcode and fixed command fields */
1656 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1657
1658 /* fill in addresses and len */
1659 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1660 dmae.src_addr_hi = 0;
1661 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1662 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1663 dmae.len = len32;
1664
1665 /* issue the command and wait for completion */
1666 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1667 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1668 }
1669 }
1670
1671 void
bxe_write_dmae(struct bxe_softc * sc,bus_addr_t dma_addr,uint32_t dst_addr,uint32_t len32)1672 bxe_write_dmae(struct bxe_softc *sc,
1673 bus_addr_t dma_addr,
1674 uint32_t dst_addr,
1675 uint32_t len32)
1676 {
1677 struct dmae_cmd dmae;
1678 int rc;
1679
1680 if (!sc->dmae_ready) {
1681 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1682
1683 if (CHIP_IS_E1(sc)) {
1684 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1685 } else {
1686 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1687 }
1688
1689 return;
1690 }
1691
1692 /* set opcode and fixed command fields */
1693 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1694
1695 /* fill in addresses and len */
1696 dmae.src_addr_lo = U64_LO(dma_addr);
1697 dmae.src_addr_hi = U64_HI(dma_addr);
1698 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1699 dmae.dst_addr_hi = 0;
1700 dmae.len = len32;
1701
1702 /* issue the command and wait for completion */
1703 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1704 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1705 }
1706 }
1707
1708 void
bxe_write_dmae_phys_len(struct bxe_softc * sc,bus_addr_t phys_addr,uint32_t addr,uint32_t len)1709 bxe_write_dmae_phys_len(struct bxe_softc *sc,
1710 bus_addr_t phys_addr,
1711 uint32_t addr,
1712 uint32_t len)
1713 {
1714 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1715 int offset = 0;
1716
1717 while (len > dmae_wr_max) {
1718 bxe_write_dmae(sc,
1719 (phys_addr + offset), /* src DMA address */
1720 (addr + offset), /* dst GRC address */
1721 dmae_wr_max);
1722 offset += (dmae_wr_max * 4);
1723 len -= dmae_wr_max;
1724 }
1725
1726 bxe_write_dmae(sc,
1727 (phys_addr + offset), /* src DMA address */
1728 (addr + offset), /* dst GRC address */
1729 len);
1730 }
1731
1732 void
bxe_set_ctx_validation(struct bxe_softc * sc,struct eth_context * cxt,uint32_t cid)1733 bxe_set_ctx_validation(struct bxe_softc *sc,
1734 struct eth_context *cxt,
1735 uint32_t cid)
1736 {
1737 /* ustorm cxt validation */
1738 cxt->ustorm_ag_context.cdu_usage =
1739 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1740 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1741 /* xcontext validation */
1742 cxt->xstorm_ag_context.cdu_reserved =
1743 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1744 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1745 }
1746
1747 static void
bxe_storm_memset_hc_timeout(struct bxe_softc * sc,uint8_t port,uint8_t fw_sb_id,uint8_t sb_index,uint8_t ticks)1748 bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1749 uint8_t port,
1750 uint8_t fw_sb_id,
1751 uint8_t sb_index,
1752 uint8_t ticks)
1753 {
1754 uint32_t addr =
1755 (BAR_CSTRORM_INTMEM +
1756 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1757
1758 REG_WR8(sc, addr, ticks);
1759
1760 BLOGD(sc, DBG_LOAD,
1761 "port %d fw_sb_id %d sb_index %d ticks %d\n",
1762 port, fw_sb_id, sb_index, ticks);
1763 }
1764
1765 static void
bxe_storm_memset_hc_disable(struct bxe_softc * sc,uint8_t port,uint16_t fw_sb_id,uint8_t sb_index,uint8_t disable)1766 bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1767 uint8_t port,
1768 uint16_t fw_sb_id,
1769 uint8_t sb_index,
1770 uint8_t disable)
1771 {
1772 uint32_t enable_flag =
1773 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1774 uint32_t addr =
1775 (BAR_CSTRORM_INTMEM +
1776 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1777 uint8_t flags;
1778
1779 /* clear and set */
1780 flags = REG_RD8(sc, addr);
1781 flags &= ~HC_INDEX_DATA_HC_ENABLED;
1782 flags |= enable_flag;
1783 REG_WR8(sc, addr, flags);
1784
1785 BLOGD(sc, DBG_LOAD,
1786 "port %d fw_sb_id %d sb_index %d disable %d\n",
1787 port, fw_sb_id, sb_index, disable);
1788 }
1789
1790 void
bxe_update_coalesce_sb_index(struct bxe_softc * sc,uint8_t fw_sb_id,uint8_t sb_index,uint8_t disable,uint16_t usec)1791 bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1792 uint8_t fw_sb_id,
1793 uint8_t sb_index,
1794 uint8_t disable,
1795 uint16_t usec)
1796 {
1797 int port = SC_PORT(sc);
1798 uint8_t ticks = (usec / 4); /* XXX ??? */
1799
1800 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1801
1802 disable = (disable) ? 1 : ((usec) ? 0 : 1);
1803 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1804 }
1805
1806 void
elink_cb_udelay(struct bxe_softc * sc,uint32_t usecs)1807 elink_cb_udelay(struct bxe_softc *sc,
1808 uint32_t usecs)
1809 {
1810 DELAY(usecs);
1811 }
1812
1813 uint32_t
elink_cb_reg_read(struct bxe_softc * sc,uint32_t reg_addr)1814 elink_cb_reg_read(struct bxe_softc *sc,
1815 uint32_t reg_addr)
1816 {
1817 return (REG_RD(sc, reg_addr));
1818 }
1819
1820 void
elink_cb_reg_write(struct bxe_softc * sc,uint32_t reg_addr,uint32_t val)1821 elink_cb_reg_write(struct bxe_softc *sc,
1822 uint32_t reg_addr,
1823 uint32_t val)
1824 {
1825 REG_WR(sc, reg_addr, val);
1826 }
1827
1828 void
elink_cb_reg_wb_write(struct bxe_softc * sc,uint32_t offset,uint32_t * wb_write,uint16_t len)1829 elink_cb_reg_wb_write(struct bxe_softc *sc,
1830 uint32_t offset,
1831 uint32_t *wb_write,
1832 uint16_t len)
1833 {
1834 REG_WR_DMAE(sc, offset, wb_write, len);
1835 }
1836
1837 void
elink_cb_reg_wb_read(struct bxe_softc * sc,uint32_t offset,uint32_t * wb_write,uint16_t len)1838 elink_cb_reg_wb_read(struct bxe_softc *sc,
1839 uint32_t offset,
1840 uint32_t *wb_write,
1841 uint16_t len)
1842 {
1843 REG_RD_DMAE(sc, offset, wb_write, len);
1844 }
1845
1846 uint8_t
elink_cb_path_id(struct bxe_softc * sc)1847 elink_cb_path_id(struct bxe_softc *sc)
1848 {
1849 return (SC_PATH(sc));
1850 }
1851
1852 void
elink_cb_event_log(struct bxe_softc * sc,const elink_log_id_t elink_log_id,...)1853 elink_cb_event_log(struct bxe_softc *sc,
1854 const elink_log_id_t elink_log_id,
1855 ...)
1856 {
1857 /* XXX */
1858 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1859 }
1860
1861 static int
bxe_set_spio(struct bxe_softc * sc,int spio,uint32_t mode)1862 bxe_set_spio(struct bxe_softc *sc,
1863 int spio,
1864 uint32_t mode)
1865 {
1866 uint32_t spio_reg;
1867
1868 /* Only 2 SPIOs are configurable */
1869 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1870 BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1871 return (-1);
1872 }
1873
1874 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1875
1876 /* read SPIO and mask except the float bits */
1877 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1878
1879 switch (mode) {
1880 case MISC_SPIO_OUTPUT_LOW:
1881 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1882 /* clear FLOAT and set CLR */
1883 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1884 spio_reg |= (spio << MISC_SPIO_CLR_POS);
1885 break;
1886
1887 case MISC_SPIO_OUTPUT_HIGH:
1888 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1889 /* clear FLOAT and set SET */
1890 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1891 spio_reg |= (spio << MISC_SPIO_SET_POS);
1892 break;
1893
1894 case MISC_SPIO_INPUT_HI_Z:
1895 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1896 /* set FLOAT */
1897 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1898 break;
1899
1900 default:
1901 break;
1902 }
1903
1904 REG_WR(sc, MISC_REG_SPIO, spio_reg);
1905 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1906
1907 return (0);
1908 }
1909
1910 static int
bxe_gpio_read(struct bxe_softc * sc,int gpio_num,uint8_t port)1911 bxe_gpio_read(struct bxe_softc *sc,
1912 int gpio_num,
1913 uint8_t port)
1914 {
1915 /* The GPIO should be swapped if swap register is set and active */
1916 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1917 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1918 int gpio_shift = (gpio_num +
1919 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1920 uint32_t gpio_mask = (1 << gpio_shift);
1921 uint32_t gpio_reg;
1922
1923 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1924 BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1925 " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1926 gpio_mask);
1927 return (-1);
1928 }
1929
1930 /* read GPIO value */
1931 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1932
1933 /* get the requested pin value */
1934 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1935 }
1936
1937 static int
bxe_gpio_write(struct bxe_softc * sc,int gpio_num,uint32_t mode,uint8_t port)1938 bxe_gpio_write(struct bxe_softc *sc,
1939 int gpio_num,
1940 uint32_t mode,
1941 uint8_t port)
1942 {
1943 /* The GPIO should be swapped if swap register is set and active */
1944 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1945 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1946 int gpio_shift = (gpio_num +
1947 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1948 uint32_t gpio_mask = (1 << gpio_shift);
1949 uint32_t gpio_reg;
1950
1951 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1952 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1953 " gpio_shift %d gpio_mask 0x%x\n",
1954 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1955 return (-1);
1956 }
1957
1958 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1959
1960 /* read GPIO and mask except the float bits */
1961 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1962
1963 switch (mode) {
1964 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1965 BLOGD(sc, DBG_PHY,
1966 "Set GPIO %d (shift %d) -> output low\n",
1967 gpio_num, gpio_shift);
1968 /* clear FLOAT and set CLR */
1969 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1970 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1971 break;
1972
1973 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1974 BLOGD(sc, DBG_PHY,
1975 "Set GPIO %d (shift %d) -> output high\n",
1976 gpio_num, gpio_shift);
1977 /* clear FLOAT and set SET */
1978 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1979 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1980 break;
1981
1982 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1983 BLOGD(sc, DBG_PHY,
1984 "Set GPIO %d (shift %d) -> input\n",
1985 gpio_num, gpio_shift);
1986 /* set FLOAT */
1987 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1988 break;
1989
1990 default:
1991 break;
1992 }
1993
1994 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1995 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1996
1997 return (0);
1998 }
1999
2000 static int
bxe_gpio_mult_write(struct bxe_softc * sc,uint8_t pins,uint32_t mode)2001 bxe_gpio_mult_write(struct bxe_softc *sc,
2002 uint8_t pins,
2003 uint32_t mode)
2004 {
2005 uint32_t gpio_reg;
2006
2007 /* any port swapping should be handled by caller */
2008
2009 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2010
2011 /* read GPIO and mask except the float bits */
2012 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2013 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2014 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2015 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2016
2017 switch (mode) {
2018 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2019 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2020 /* set CLR */
2021 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2022 break;
2023
2024 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2025 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2026 /* set SET */
2027 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2028 break;
2029
2030 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2031 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2032 /* set FLOAT */
2033 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2034 break;
2035
2036 default:
2037 BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2038 " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2039 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2040 return (-1);
2041 }
2042
2043 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2044 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2045
2046 return (0);
2047 }
2048
2049 static int
bxe_gpio_int_write(struct bxe_softc * sc,int gpio_num,uint32_t mode,uint8_t port)2050 bxe_gpio_int_write(struct bxe_softc *sc,
2051 int gpio_num,
2052 uint32_t mode,
2053 uint8_t port)
2054 {
2055 /* The GPIO should be swapped if swap register is set and active */
2056 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2057 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2058 int gpio_shift = (gpio_num +
2059 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2060 uint32_t gpio_mask = (1 << gpio_shift);
2061 uint32_t gpio_reg;
2062
2063 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2064 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2065 " gpio_shift %d gpio_mask 0x%x\n",
2066 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2067 return (-1);
2068 }
2069
2070 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2071
2072 /* read GPIO int */
2073 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2074
2075 switch (mode) {
2076 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2077 BLOGD(sc, DBG_PHY,
2078 "Clear GPIO INT %d (shift %d) -> output low\n",
2079 gpio_num, gpio_shift);
2080 /* clear SET and set CLR */
2081 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2082 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2083 break;
2084
2085 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2086 BLOGD(sc, DBG_PHY,
2087 "Set GPIO INT %d (shift %d) -> output high\n",
2088 gpio_num, gpio_shift);
2089 /* clear CLR and set SET */
2090 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2091 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2092 break;
2093
2094 default:
2095 break;
2096 }
2097
2098 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2099 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2100
2101 return (0);
2102 }
2103
2104 uint32_t
elink_cb_gpio_read(struct bxe_softc * sc,uint16_t gpio_num,uint8_t port)2105 elink_cb_gpio_read(struct bxe_softc *sc,
2106 uint16_t gpio_num,
2107 uint8_t port)
2108 {
2109 return (bxe_gpio_read(sc, gpio_num, port));
2110 }
2111
2112 uint8_t
elink_cb_gpio_write(struct bxe_softc * sc,uint16_t gpio_num,uint8_t mode,uint8_t port)2113 elink_cb_gpio_write(struct bxe_softc *sc,
2114 uint16_t gpio_num,
2115 uint8_t mode, /* 0=low 1=high */
2116 uint8_t port)
2117 {
2118 return (bxe_gpio_write(sc, gpio_num, mode, port));
2119 }
2120
2121 uint8_t
elink_cb_gpio_mult_write(struct bxe_softc * sc,uint8_t pins,uint8_t mode)2122 elink_cb_gpio_mult_write(struct bxe_softc *sc,
2123 uint8_t pins,
2124 uint8_t mode) /* 0=low 1=high */
2125 {
2126 return (bxe_gpio_mult_write(sc, pins, mode));
2127 }
2128
2129 uint8_t
elink_cb_gpio_int_write(struct bxe_softc * sc,uint16_t gpio_num,uint8_t mode,uint8_t port)2130 elink_cb_gpio_int_write(struct bxe_softc *sc,
2131 uint16_t gpio_num,
2132 uint8_t mode, /* 0=low 1=high */
2133 uint8_t port)
2134 {
2135 return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2136 }
2137
2138 void
elink_cb_notify_link_changed(struct bxe_softc * sc)2139 elink_cb_notify_link_changed(struct bxe_softc *sc)
2140 {
2141 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2142 (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2143 }
2144
2145 /* send the MCP a request, block until there is a reply */
2146 uint32_t
elink_cb_fw_command(struct bxe_softc * sc,uint32_t command,uint32_t param)2147 elink_cb_fw_command(struct bxe_softc *sc,
2148 uint32_t command,
2149 uint32_t param)
2150 {
2151 int mb_idx = SC_FW_MB_IDX(sc);
2152 uint32_t seq;
2153 uint32_t rc = 0;
2154 uint32_t cnt = 1;
2155 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2156
2157 BXE_FWMB_LOCK(sc);
2158
2159 seq = ++sc->fw_seq;
2160 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2161 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2162
2163 BLOGD(sc, DBG_PHY,
2164 "wrote command 0x%08x to FW MB param 0x%08x\n",
2165 (command | seq), param);
2166
2167 /* Let the FW do it's magic. GIve it up to 5 seconds... */
2168 do {
2169 DELAY(delay * 1000);
2170 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2171 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2172
2173 BLOGD(sc, DBG_PHY,
2174 "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2175 cnt*delay, rc, seq);
2176
2177 /* is this a reply to our command? */
2178 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2179 rc &= FW_MSG_CODE_MASK;
2180 } else {
2181 /* Ruh-roh! */
2182 BLOGE(sc, "FW failed to respond!\n");
2183 // XXX bxe_fw_dump(sc);
2184 rc = 0;
2185 }
2186
2187 BXE_FWMB_UNLOCK(sc);
2188 return (rc);
2189 }
2190
2191 static uint32_t
bxe_fw_command(struct bxe_softc * sc,uint32_t command,uint32_t param)2192 bxe_fw_command(struct bxe_softc *sc,
2193 uint32_t command,
2194 uint32_t param)
2195 {
2196 return (elink_cb_fw_command(sc, command, param));
2197 }
2198
2199 static void
__storm_memset_dma_mapping(struct bxe_softc * sc,uint32_t addr,bus_addr_t mapping)2200 __storm_memset_dma_mapping(struct bxe_softc *sc,
2201 uint32_t addr,
2202 bus_addr_t mapping)
2203 {
2204 REG_WR(sc, addr, U64_LO(mapping));
2205 REG_WR(sc, (addr + 4), U64_HI(mapping));
2206 }
2207
2208 static void
storm_memset_spq_addr(struct bxe_softc * sc,bus_addr_t mapping,uint16_t abs_fid)2209 storm_memset_spq_addr(struct bxe_softc *sc,
2210 bus_addr_t mapping,
2211 uint16_t abs_fid)
2212 {
2213 uint32_t addr = (XSEM_REG_FAST_MEMORY +
2214 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2215 __storm_memset_dma_mapping(sc, addr, mapping);
2216 }
2217
2218 static void
storm_memset_vf_to_pf(struct bxe_softc * sc,uint16_t abs_fid,uint16_t pf_id)2219 storm_memset_vf_to_pf(struct bxe_softc *sc,
2220 uint16_t abs_fid,
2221 uint16_t pf_id)
2222 {
2223 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2224 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2225 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2226 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2227 }
2228
2229 static void
storm_memset_func_en(struct bxe_softc * sc,uint16_t abs_fid,uint8_t enable)2230 storm_memset_func_en(struct bxe_softc *sc,
2231 uint16_t abs_fid,
2232 uint8_t enable)
2233 {
2234 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2235 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2236 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2237 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2238 }
2239
2240 static void
storm_memset_eq_data(struct bxe_softc * sc,struct event_ring_data * eq_data,uint16_t pfid)2241 storm_memset_eq_data(struct bxe_softc *sc,
2242 struct event_ring_data *eq_data,
2243 uint16_t pfid)
2244 {
2245 uint32_t addr;
2246 size_t size;
2247
2248 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2249 size = sizeof(struct event_ring_data);
2250 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2251 }
2252
2253 static void
storm_memset_eq_prod(struct bxe_softc * sc,uint16_t eq_prod,uint16_t pfid)2254 storm_memset_eq_prod(struct bxe_softc *sc,
2255 uint16_t eq_prod,
2256 uint16_t pfid)
2257 {
2258 uint32_t addr = (BAR_CSTRORM_INTMEM +
2259 CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2260 REG_WR16(sc, addr, eq_prod);
2261 }
2262
2263 /*
2264 * Post a slowpath command.
2265 *
2266 * A slowpath command is used to propagate a configuration change through
2267 * the controller in a controlled manner, allowing each STORM processor and
2268 * other H/W blocks to phase in the change. The commands sent on the
2269 * slowpath are referred to as ramrods. Depending on the ramrod used the
2270 * completion of the ramrod will occur in different ways. Here's a
2271 * breakdown of ramrods and how they complete:
2272 *
2273 * RAMROD_CMD_ID_ETH_PORT_SETUP
2274 * Used to setup the leading connection on a port. Completes on the
2275 * Receive Completion Queue (RCQ) of that port (typically fp[0]).
2276 *
2277 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2278 * Used to setup an additional connection on a port. Completes on the
2279 * RCQ of the multi-queue/RSS connection being initialized.
2280 *
2281 * RAMROD_CMD_ID_ETH_STAT_QUERY
2282 * Used to force the storm processors to update the statistics database
2283 * in host memory. This ramrod is send on the leading connection CID and
2284 * completes as an index increment of the CSTORM on the default status
2285 * block.
2286 *
2287 * RAMROD_CMD_ID_ETH_UPDATE
2288 * Used to update the state of the leading connection, usually to udpate
2289 * the RSS indirection table. Completes on the RCQ of the leading
2290 * connection. (Not currently used under FreeBSD until OS support becomes
2291 * available.)
2292 *
2293 * RAMROD_CMD_ID_ETH_HALT
2294 * Used when tearing down a connection prior to driver unload. Completes
2295 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
2296 * use this on the leading connection.
2297 *
2298 * RAMROD_CMD_ID_ETH_SET_MAC
2299 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
2300 * the RCQ of the leading connection.
2301 *
2302 * RAMROD_CMD_ID_ETH_CFC_DEL
2303 * Used when tearing down a conneciton prior to driver unload. Completes
2304 * on the RCQ of the leading connection (since the current connection
2305 * has been completely removed from controller memory).
2306 *
2307 * RAMROD_CMD_ID_ETH_PORT_DEL
2308 * Used to tear down the leading connection prior to driver unload,
2309 * typically fp[0]. Completes as an index increment of the CSTORM on the
2310 * default status block.
2311 *
2312 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2313 * Used for connection offload. Completes on the RCQ of the multi-queue
2314 * RSS connection that is being offloaded. (Not currently used under
2315 * FreeBSD.)
2316 *
2317 * There can only be one command pending per function.
2318 *
2319 * Returns:
2320 * 0 = Success, !0 = Failure.
2321 */
2322
2323 /* must be called under the spq lock */
2324 static inline
bxe_sp_get_next(struct bxe_softc * sc)2325 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2326 {
2327 struct eth_spe *next_spe = sc->spq_prod_bd;
2328
2329 if (sc->spq_prod_bd == sc->spq_last_bd) {
2330 /* wrap back to the first eth_spq */
2331 sc->spq_prod_bd = sc->spq;
2332 sc->spq_prod_idx = 0;
2333 } else {
2334 sc->spq_prod_bd++;
2335 sc->spq_prod_idx++;
2336 }
2337
2338 return (next_spe);
2339 }
2340
2341 /* must be called under the spq lock */
2342 static inline
bxe_sp_prod_update(struct bxe_softc * sc)2343 void bxe_sp_prod_update(struct bxe_softc *sc)
2344 {
2345 int func = SC_FUNC(sc);
2346
2347 /*
2348 * Make sure that BD data is updated before writing the producer.
2349 * BD data is written to the memory, the producer is read from the
2350 * memory, thus we need a full memory barrier to ensure the ordering.
2351 */
2352 mb();
2353
2354 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2355 sc->spq_prod_idx);
2356
2357 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2358 BUS_SPACE_BARRIER_WRITE);
2359 }
2360
2361 /**
2362 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2363 *
2364 * @cmd: command to check
2365 * @cmd_type: command type
2366 */
2367 static inline
bxe_is_contextless_ramrod(int cmd,int cmd_type)2368 int bxe_is_contextless_ramrod(int cmd,
2369 int cmd_type)
2370 {
2371 if ((cmd_type == NONE_CONNECTION_TYPE) ||
2372 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2373 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2374 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2375 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2376 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2377 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2378 return (TRUE);
2379 } else {
2380 return (FALSE);
2381 }
2382 }
2383
2384 /**
2385 * bxe_sp_post - place a single command on an SP ring
2386 *
2387 * @sc: driver handle
2388 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
2389 * @cid: SW CID the command is related to
2390 * @data_hi: command private data address (high 32 bits)
2391 * @data_lo: command private data address (low 32 bits)
2392 * @cmd_type: command type (e.g. NONE, ETH)
2393 *
2394 * SP data is handled as if it's always an address pair, thus data fields are
2395 * not swapped to little endian in upper functions. Instead this function swaps
2396 * data as if it's two uint32 fields.
2397 */
2398 int
bxe_sp_post(struct bxe_softc * sc,int command,int cid,uint32_t data_hi,uint32_t data_lo,int cmd_type)2399 bxe_sp_post(struct bxe_softc *sc,
2400 int command,
2401 int cid,
2402 uint32_t data_hi,
2403 uint32_t data_lo,
2404 int cmd_type)
2405 {
2406 struct eth_spe *spe;
2407 uint16_t type;
2408 int common;
2409
2410 common = bxe_is_contextless_ramrod(command, cmd_type);
2411
2412 BXE_SP_LOCK(sc);
2413
2414 if (common) {
2415 if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2416 BLOGE(sc, "EQ ring is full!\n");
2417 BXE_SP_UNLOCK(sc);
2418 return (-1);
2419 }
2420 } else {
2421 if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2422 BLOGE(sc, "SPQ ring is full!\n");
2423 BXE_SP_UNLOCK(sc);
2424 return (-1);
2425 }
2426 }
2427
2428 spe = bxe_sp_get_next(sc);
2429
2430 /* CID needs port number to be encoded int it */
2431 spe->hdr.conn_and_cmd_data =
2432 htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2433
2434 type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2435
2436 /* TBD: Check if it works for VFs */
2437 type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2438 SPE_HDR_T_FUNCTION_ID);
2439
2440 spe->hdr.type = htole16(type);
2441
2442 spe->data.update_data_addr.hi = htole32(data_hi);
2443 spe->data.update_data_addr.lo = htole32(data_lo);
2444
2445 /*
2446 * It's ok if the actual decrement is issued towards the memory
2447 * somewhere between the lock and unlock. Thus no more explict
2448 * memory barrier is needed.
2449 */
2450 if (common) {
2451 atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2452 } else {
2453 atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2454 }
2455
2456 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2457 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2458 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2459 BLOGD(sc, DBG_SP,
2460 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2461 sc->spq_prod_idx,
2462 (uint32_t)U64_HI(sc->spq_dma.paddr),
2463 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2464 command,
2465 common,
2466 HW_CID(sc, cid),
2467 data_hi,
2468 data_lo,
2469 type,
2470 atomic_load_acq_long(&sc->cq_spq_left),
2471 atomic_load_acq_long(&sc->eq_spq_left));
2472
2473 bxe_sp_prod_update(sc);
2474
2475 BXE_SP_UNLOCK(sc);
2476 return (0);
2477 }
2478
2479 /**
2480 * bxe_debug_print_ind_table - prints the indirection table configuration.
2481 *
2482 * @sc: driver hanlde
2483 * @p: pointer to rss configuration
2484 */
2485
2486 /*
2487 * FreeBSD Device probe function.
2488 *
2489 * Compares the device found to the driver's list of supported devices and
2490 * reports back to the bsd loader whether this is the right driver for the device.
2491 * This is the driver entry function called from the "kldload" command.
2492 *
2493 * Returns:
2494 * BUS_PROBE_DEFAULT on success, positive value on failure.
2495 */
2496 static int
bxe_probe(device_t dev)2497 bxe_probe(device_t dev)
2498 {
2499 struct bxe_device_type *t;
2500 char *descbuf;
2501 uint16_t did, sdid, svid, vid;
2502
2503 /* Find our device structure */
2504 t = bxe_devs;
2505
2506 /* Get the data for the device to be probed. */
2507 vid = pci_get_vendor(dev);
2508 did = pci_get_device(dev);
2509 svid = pci_get_subvendor(dev);
2510 sdid = pci_get_subdevice(dev);
2511
2512 /* Look through the list of known devices for a match. */
2513 while (t->bxe_name != NULL) {
2514 if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2515 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2516 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2517 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2518 if (descbuf == NULL)
2519 return (ENOMEM);
2520
2521 /* Print out the device identity. */
2522 snprintf(descbuf, BXE_DEVDESC_MAX,
2523 "%s (%c%d) BXE v:%s", t->bxe_name,
2524 (((pci_read_config(dev, PCIR_REVID, 4) &
2525 0xf0) >> 4) + 'A'),
2526 (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2527 BXE_DRIVER_VERSION);
2528
2529 device_set_desc_copy(dev, descbuf);
2530 free(descbuf, M_TEMP);
2531 return (BUS_PROBE_DEFAULT);
2532 }
2533 t++;
2534 }
2535
2536 return (ENXIO);
2537 }
2538
2539 static void
bxe_init_mutexes(struct bxe_softc * sc)2540 bxe_init_mutexes(struct bxe_softc *sc)
2541 {
2542 #ifdef BXE_CORE_LOCK_SX
2543 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2544 "bxe%d_core_lock", sc->unit);
2545 sx_init(&sc->core_sx, sc->core_sx_name);
2546 #else
2547 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2548 "bxe%d_core_lock", sc->unit);
2549 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2550 #endif
2551
2552 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2553 "bxe%d_sp_lock", sc->unit);
2554 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2555
2556 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2557 "bxe%d_dmae_lock", sc->unit);
2558 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2559
2560 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2561 "bxe%d_phy_lock", sc->unit);
2562 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2563
2564 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2565 "bxe%d_fwmb_lock", sc->unit);
2566 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2567
2568 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2569 "bxe%d_print_lock", sc->unit);
2570 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2571
2572 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2573 "bxe%d_stats_lock", sc->unit);
2574 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2575
2576 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2577 "bxe%d_mcast_lock", sc->unit);
2578 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2579 }
2580
2581 static void
bxe_release_mutexes(struct bxe_softc * sc)2582 bxe_release_mutexes(struct bxe_softc *sc)
2583 {
2584 #ifdef BXE_CORE_LOCK_SX
2585 sx_destroy(&sc->core_sx);
2586 #else
2587 if (mtx_initialized(&sc->core_mtx)) {
2588 mtx_destroy(&sc->core_mtx);
2589 }
2590 #endif
2591
2592 if (mtx_initialized(&sc->sp_mtx)) {
2593 mtx_destroy(&sc->sp_mtx);
2594 }
2595
2596 if (mtx_initialized(&sc->dmae_mtx)) {
2597 mtx_destroy(&sc->dmae_mtx);
2598 }
2599
2600 if (mtx_initialized(&sc->port.phy_mtx)) {
2601 mtx_destroy(&sc->port.phy_mtx);
2602 }
2603
2604 if (mtx_initialized(&sc->fwmb_mtx)) {
2605 mtx_destroy(&sc->fwmb_mtx);
2606 }
2607
2608 if (mtx_initialized(&sc->print_mtx)) {
2609 mtx_destroy(&sc->print_mtx);
2610 }
2611
2612 if (mtx_initialized(&sc->stats_mtx)) {
2613 mtx_destroy(&sc->stats_mtx);
2614 }
2615
2616 if (mtx_initialized(&sc->mcast_mtx)) {
2617 mtx_destroy(&sc->mcast_mtx);
2618 }
2619 }
2620
2621 static void
bxe_tx_disable(struct bxe_softc * sc)2622 bxe_tx_disable(struct bxe_softc* sc)
2623 {
2624 if_t ifp = sc->ifp;
2625
2626 /* tell the stack the driver is stopped and TX queue is full */
2627 if (ifp != NULL) {
2628 if_setdrvflags(ifp, 0);
2629 }
2630 }
2631
2632 static void
bxe_drv_pulse(struct bxe_softc * sc)2633 bxe_drv_pulse(struct bxe_softc *sc)
2634 {
2635 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2636 sc->fw_drv_pulse_wr_seq);
2637 }
2638
2639 static inline uint16_t
bxe_tx_avail(struct bxe_softc * sc,struct bxe_fastpath * fp)2640 bxe_tx_avail(struct bxe_softc *sc,
2641 struct bxe_fastpath *fp)
2642 {
2643 int16_t used;
2644 uint16_t prod;
2645 uint16_t cons;
2646
2647 prod = fp->tx_bd_prod;
2648 cons = fp->tx_bd_cons;
2649
2650 used = SUB_S16(prod, cons);
2651
2652 return (int16_t)(sc->tx_ring_size) - used;
2653 }
2654
2655 static inline int
bxe_tx_queue_has_work(struct bxe_fastpath * fp)2656 bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2657 {
2658 uint16_t hw_cons;
2659
2660 mb(); /* status block fields can change */
2661 hw_cons = le16toh(*fp->tx_cons_sb);
2662 return (hw_cons != fp->tx_pkt_cons);
2663 }
2664
2665 static inline uint8_t
bxe_has_tx_work(struct bxe_fastpath * fp)2666 bxe_has_tx_work(struct bxe_fastpath *fp)
2667 {
2668 /* expand this for multi-cos if ever supported */
2669 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2670 }
2671
2672 static inline int
bxe_has_rx_work(struct bxe_fastpath * fp)2673 bxe_has_rx_work(struct bxe_fastpath *fp)
2674 {
2675 uint16_t rx_cq_cons_sb;
2676
2677 mb(); /* status block fields can change */
2678 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2679 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2680 rx_cq_cons_sb++;
2681 return (fp->rx_cq_cons != rx_cq_cons_sb);
2682 }
2683
2684 static void
bxe_sp_event(struct bxe_softc * sc,struct bxe_fastpath * fp,union eth_rx_cqe * rr_cqe)2685 bxe_sp_event(struct bxe_softc *sc,
2686 struct bxe_fastpath *fp,
2687 union eth_rx_cqe *rr_cqe)
2688 {
2689 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2690 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2691 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2692 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2693
2694 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2695 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2696
2697 switch (command) {
2698 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2699 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2700 drv_cmd = ECORE_Q_CMD_UPDATE;
2701 break;
2702
2703 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2704 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2705 drv_cmd = ECORE_Q_CMD_SETUP;
2706 break;
2707
2708 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2709 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2710 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2711 break;
2712
2713 case (RAMROD_CMD_ID_ETH_HALT):
2714 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2715 drv_cmd = ECORE_Q_CMD_HALT;
2716 break;
2717
2718 case (RAMROD_CMD_ID_ETH_TERMINATE):
2719 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2720 drv_cmd = ECORE_Q_CMD_TERMINATE;
2721 break;
2722
2723 case (RAMROD_CMD_ID_ETH_EMPTY):
2724 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2725 drv_cmd = ECORE_Q_CMD_EMPTY;
2726 break;
2727
2728 default:
2729 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2730 command, fp->index);
2731 return;
2732 }
2733
2734 if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2735 q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2736 /*
2737 * q_obj->complete_cmd() failure means that this was
2738 * an unexpected completion.
2739 *
2740 * In this case we don't want to increase the sc->spq_left
2741 * because apparently we haven't sent this command the first
2742 * place.
2743 */
2744 // bxe_panic(sc, ("Unexpected SP completion\n"));
2745 return;
2746 }
2747
2748 atomic_add_acq_long(&sc->cq_spq_left, 1);
2749
2750 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2751 atomic_load_acq_long(&sc->cq_spq_left));
2752 }
2753
2754 /*
2755 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2756 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2757 * the current aggregation queue as in-progress.
2758 */
2759 static void
bxe_tpa_start(struct bxe_softc * sc,struct bxe_fastpath * fp,uint16_t queue,uint16_t cons,uint16_t prod,struct eth_fast_path_rx_cqe * cqe)2760 bxe_tpa_start(struct bxe_softc *sc,
2761 struct bxe_fastpath *fp,
2762 uint16_t queue,
2763 uint16_t cons,
2764 uint16_t prod,
2765 struct eth_fast_path_rx_cqe *cqe)
2766 {
2767 struct bxe_sw_rx_bd tmp_bd;
2768 struct bxe_sw_rx_bd *rx_buf;
2769 struct eth_rx_bd *rx_bd;
2770 int max_agg_queues;
2771 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2772 uint16_t index;
2773
2774 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2775 "cons=%d prod=%d\n",
2776 fp->index, queue, cons, prod);
2777
2778 max_agg_queues = MAX_AGG_QS(sc);
2779
2780 KASSERT((queue < max_agg_queues),
2781 ("fp[%02d] invalid aggr queue (%d >= %d)!",
2782 fp->index, queue, max_agg_queues));
2783
2784 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2785 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2786 fp->index, queue));
2787
2788 /* copy the existing mbuf and mapping from the TPA pool */
2789 tmp_bd = tpa_info->bd;
2790
2791 if (tmp_bd.m == NULL) {
2792 uint32_t *tmp;
2793
2794 tmp = (uint32_t *)cqe;
2795
2796 BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2797 fp->index, queue, cons, prod);
2798 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2799 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2800
2801 /* XXX Error handling? */
2802 return;
2803 }
2804
2805 /* change the TPA queue to the start state */
2806 tpa_info->state = BXE_TPA_STATE_START;
2807 tpa_info->placement_offset = cqe->placement_offset;
2808 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags);
2809 tpa_info->vlan_tag = le16toh(cqe->vlan_tag);
2810 tpa_info->len_on_bd = le16toh(cqe->len_on_bd);
2811
2812 fp->rx_tpa_queue_used |= (1 << queue);
2813
2814 /*
2815 * If all the buffer descriptors are filled with mbufs then fill in
2816 * the current consumer index with a new BD. Else if a maximum Rx
2817 * buffer limit is imposed then fill in the next producer index.
2818 */
2819 index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2820 prod : cons;
2821
2822 /* move the received mbuf and mapping to TPA pool */
2823 tpa_info->bd = fp->rx_mbuf_chain[cons];
2824
2825 /* release any existing RX BD mbuf mappings */
2826 if (cons != index) {
2827 rx_buf = &fp->rx_mbuf_chain[cons];
2828
2829 if (rx_buf->m_map != NULL) {
2830 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2831 BUS_DMASYNC_POSTREAD);
2832 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2833 }
2834
2835 /*
2836 * We get here when the maximum number of rx buffers is less than
2837 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2838 * it out here without concern of a memory leak.
2839 */
2840 fp->rx_mbuf_chain[cons].m = NULL;
2841 }
2842
2843 /* update the Rx SW BD with the mbuf info from the TPA pool */
2844 fp->rx_mbuf_chain[index] = tmp_bd;
2845
2846 /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2847 rx_bd = &fp->rx_chain[index];
2848 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2849 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2850 }
2851
2852 /*
2853 * When a TPA aggregation is completed, loop through the individual mbufs
2854 * of the aggregation, combining them into a single mbuf which will be sent
2855 * up the stack. Refill all freed SGEs with mbufs as we go along.
2856 */
2857 static int
bxe_fill_frag_mbuf(struct bxe_softc * sc,struct bxe_fastpath * fp,struct bxe_sw_tpa_info * tpa_info,uint16_t queue,uint16_t pages,struct mbuf * m,struct eth_end_agg_rx_cqe * cqe,uint16_t cqe_idx)2858 bxe_fill_frag_mbuf(struct bxe_softc *sc,
2859 struct bxe_fastpath *fp,
2860 struct bxe_sw_tpa_info *tpa_info,
2861 uint16_t queue,
2862 uint16_t pages,
2863 struct mbuf *m,
2864 struct eth_end_agg_rx_cqe *cqe,
2865 uint16_t cqe_idx)
2866 {
2867 struct mbuf *m_frag;
2868 uint32_t frag_len, frag_size, i;
2869 uint16_t sge_idx;
2870 int rc = 0;
2871 int j;
2872
2873 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2874
2875 BLOGD(sc, DBG_LRO,
2876 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2877 fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2878
2879 /* make sure the aggregated frame is not too big to handle */
2880 if (pages > 8 * PAGES_PER_SGE) {
2881
2882 uint32_t *tmp = (uint32_t *)cqe;
2883
2884 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2885 "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2886 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2887 tpa_info->len_on_bd, frag_size);
2888
2889 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2890 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2891
2892 bxe_panic(sc, ("sge page count error\n"));
2893 return (EINVAL);
2894 }
2895
2896 /*
2897 * Scan through the scatter gather list pulling individual mbufs into a
2898 * single mbuf for the host stack.
2899 */
2900 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2901 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2902
2903 /*
2904 * Firmware gives the indices of the SGE as if the ring is an array
2905 * (meaning that the "next" element will consume 2 indices).
2906 */
2907 frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2908
2909 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2910 "sge_idx=%d frag_size=%d frag_len=%d\n",
2911 fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2912
2913 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2914
2915 /* allocate a new mbuf for the SGE */
2916 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2917 if (rc) {
2918 /* Leave all remaining SGEs in the ring! */
2919 return (rc);
2920 }
2921
2922 /* update the fragment length */
2923 m_frag->m_len = frag_len;
2924
2925 /* concatenate the fragment to the head mbuf */
2926 m_cat(m, m_frag);
2927 fp->eth_q_stats.mbuf_alloc_sge--;
2928
2929 /* update the TPA mbuf size and remaining fragment size */
2930 m->m_pkthdr.len += frag_len;
2931 frag_size -= frag_len;
2932 }
2933
2934 BLOGD(sc, DBG_LRO,
2935 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2936 fp->index, queue, frag_size);
2937
2938 return (rc);
2939 }
2940
2941 static inline void
bxe_clear_sge_mask_next_elems(struct bxe_fastpath * fp)2942 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2943 {
2944 int i, j;
2945
2946 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2947 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2948
2949 for (j = 0; j < 2; j++) {
2950 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2951 idx--;
2952 }
2953 }
2954 }
2955
2956 static inline void
bxe_init_sge_ring_bit_mask(struct bxe_fastpath * fp)2957 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2958 {
2959 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2960 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2961
2962 /*
2963 * Clear the two last indices in the page to 1. These are the indices that
2964 * correspond to the "next" element, hence will never be indicated and
2965 * should be removed from the calculations.
2966 */
2967 bxe_clear_sge_mask_next_elems(fp);
2968 }
2969
2970 static inline void
bxe_update_last_max_sge(struct bxe_fastpath * fp,uint16_t idx)2971 bxe_update_last_max_sge(struct bxe_fastpath *fp,
2972 uint16_t idx)
2973 {
2974 uint16_t last_max = fp->last_max_sge;
2975
2976 if (SUB_S16(idx, last_max) > 0) {
2977 fp->last_max_sge = idx;
2978 }
2979 }
2980
2981 static inline void
bxe_update_sge_prod(struct bxe_softc * sc,struct bxe_fastpath * fp,uint16_t sge_len,union eth_sgl_or_raw_data * cqe)2982 bxe_update_sge_prod(struct bxe_softc *sc,
2983 struct bxe_fastpath *fp,
2984 uint16_t sge_len,
2985 union eth_sgl_or_raw_data *cqe)
2986 {
2987 uint16_t last_max, last_elem, first_elem;
2988 uint16_t delta = 0;
2989 uint16_t i;
2990
2991 if (!sge_len) {
2992 return;
2993 }
2994
2995 /* first mark all used pages */
2996 for (i = 0; i < sge_len; i++) {
2997 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2998 RX_SGE(le16toh(cqe->sgl[i])));
2999 }
3000
3001 BLOGD(sc, DBG_LRO,
3002 "fp[%02d] fp_cqe->sgl[%d] = %d\n",
3003 fp->index, sge_len - 1,
3004 le16toh(cqe->sgl[sge_len - 1]));
3005
3006 /* assume that the last SGE index is the biggest */
3007 bxe_update_last_max_sge(fp,
3008 le16toh(cqe->sgl[sge_len - 1]));
3009
3010 last_max = RX_SGE(fp->last_max_sge);
3011 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3012 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3013
3014 /* if ring is not full */
3015 if (last_elem + 1 != first_elem) {
3016 last_elem++;
3017 }
3018
3019 /* now update the prod */
3020 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3021 if (__predict_true(fp->sge_mask[i])) {
3022 break;
3023 }
3024
3025 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3026 delta += BIT_VEC64_ELEM_SZ;
3027 }
3028
3029 if (delta > 0) {
3030 fp->rx_sge_prod += delta;
3031 /* clear page-end entries */
3032 bxe_clear_sge_mask_next_elems(fp);
3033 }
3034
3035 BLOGD(sc, DBG_LRO,
3036 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3037 fp->index, fp->last_max_sge, fp->rx_sge_prod);
3038 }
3039
3040 /*
3041 * The aggregation on the current TPA queue has completed. Pull the individual
3042 * mbuf fragments together into a single mbuf, perform all necessary checksum
3043 * calculations, and send the resuting mbuf to the stack.
3044 */
3045 static void
bxe_tpa_stop(struct bxe_softc * sc,struct bxe_fastpath * fp,struct bxe_sw_tpa_info * tpa_info,uint16_t queue,uint16_t pages,struct eth_end_agg_rx_cqe * cqe,uint16_t cqe_idx)3046 bxe_tpa_stop(struct bxe_softc *sc,
3047 struct bxe_fastpath *fp,
3048 struct bxe_sw_tpa_info *tpa_info,
3049 uint16_t queue,
3050 uint16_t pages,
3051 struct eth_end_agg_rx_cqe *cqe,
3052 uint16_t cqe_idx)
3053 {
3054 if_t ifp = sc->ifp;
3055 struct mbuf *m;
3056 int rc = 0;
3057
3058 BLOGD(sc, DBG_LRO,
3059 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3060 fp->index, queue, tpa_info->placement_offset,
3061 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3062
3063 m = tpa_info->bd.m;
3064
3065 /* allocate a replacement before modifying existing mbuf */
3066 rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3067 if (rc) {
3068 /* drop the frame and log an error */
3069 fp->eth_q_stats.rx_soft_errors++;
3070 goto bxe_tpa_stop_exit;
3071 }
3072
3073 /* we have a replacement, fixup the current mbuf */
3074 m_adj(m, tpa_info->placement_offset);
3075 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3076
3077 /* mark the checksums valid (taken care of by the firmware) */
3078 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3079 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3080 m->m_pkthdr.csum_data = 0xffff;
3081 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3082 CSUM_IP_VALID |
3083 CSUM_DATA_VALID |
3084 CSUM_PSEUDO_HDR);
3085
3086 /* aggregate all of the SGEs into a single mbuf */
3087 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3088 if (rc) {
3089 /* drop the packet and log an error */
3090 fp->eth_q_stats.rx_soft_errors++;
3091 m_freem(m);
3092 } else {
3093 if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3094 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3095 m->m_flags |= M_VLANTAG;
3096 }
3097
3098 /* assign packet to this interface interface */
3099 if_setrcvif(m, ifp);
3100
3101 /* specify what RSS queue was used for this flow */
3102 m->m_pkthdr.flowid = fp->index;
3103 BXE_SET_FLOWID(m);
3104
3105 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3106 fp->eth_q_stats.rx_tpa_pkts++;
3107
3108 /* pass the frame to the stack */
3109 if_input(ifp, m);
3110 }
3111
3112 /* we passed an mbuf up the stack or dropped the frame */
3113 fp->eth_q_stats.mbuf_alloc_tpa--;
3114
3115 bxe_tpa_stop_exit:
3116
3117 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3118 fp->rx_tpa_queue_used &= ~(1 << queue);
3119 }
3120
3121 static uint8_t
bxe_service_rxsgl(struct bxe_fastpath * fp,uint16_t len,uint16_t lenonbd,struct mbuf * m,struct eth_fast_path_rx_cqe * cqe_fp)3122 bxe_service_rxsgl(
3123 struct bxe_fastpath *fp,
3124 uint16_t len,
3125 uint16_t lenonbd,
3126 struct mbuf *m,
3127 struct eth_fast_path_rx_cqe *cqe_fp)
3128 {
3129 struct mbuf *m_frag;
3130 uint16_t frags, frag_len;
3131 uint16_t sge_idx = 0;
3132 uint16_t j;
3133 uint8_t i, rc = 0;
3134 uint32_t frag_size;
3135
3136 /* adjust the mbuf */
3137 m->m_len = lenonbd;
3138
3139 frag_size = len - lenonbd;
3140 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3141
3142 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3143 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3144
3145 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3146 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3147 m_frag->m_len = frag_len;
3148
3149 /* allocate a new mbuf for the SGE */
3150 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3151 if (rc) {
3152 /* Leave all remaining SGEs in the ring! */
3153 return (rc);
3154 }
3155 fp->eth_q_stats.mbuf_alloc_sge--;
3156
3157 /* concatenate the fragment to the head mbuf */
3158 m_cat(m, m_frag);
3159
3160 frag_size -= frag_len;
3161 }
3162
3163 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3164
3165 return rc;
3166 }
3167
3168 static uint8_t
bxe_rxeof(struct bxe_softc * sc,struct bxe_fastpath * fp)3169 bxe_rxeof(struct bxe_softc *sc,
3170 struct bxe_fastpath *fp)
3171 {
3172 if_t ifp = sc->ifp;
3173 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3174 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3175 int rx_pkts = 0;
3176 int rc = 0;
3177
3178 BXE_FP_RX_LOCK(fp);
3179
3180 /* CQ "next element" is of the size of the regular element */
3181 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3182 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3183 hw_cq_cons++;
3184 }
3185
3186 bd_cons = fp->rx_bd_cons;
3187 bd_prod = fp->rx_bd_prod;
3188 bd_prod_fw = bd_prod;
3189 sw_cq_cons = fp->rx_cq_cons;
3190 sw_cq_prod = fp->rx_cq_prod;
3191
3192 /*
3193 * Memory barrier necessary as speculative reads of the rx
3194 * buffer can be ahead of the index in the status block
3195 */
3196 rmb();
3197
3198 BLOGD(sc, DBG_RX,
3199 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3200 fp->index, hw_cq_cons, sw_cq_cons);
3201
3202 while (sw_cq_cons != hw_cq_cons) {
3203 struct bxe_sw_rx_bd *rx_buf = NULL;
3204 union eth_rx_cqe *cqe;
3205 struct eth_fast_path_rx_cqe *cqe_fp;
3206 uint8_t cqe_fp_flags;
3207 enum eth_rx_cqe_type cqe_fp_type;
3208 uint16_t len, lenonbd, pad;
3209 struct mbuf *m = NULL;
3210
3211 comp_ring_cons = RCQ(sw_cq_cons);
3212 bd_prod = RX_BD(bd_prod);
3213 bd_cons = RX_BD(bd_cons);
3214
3215 cqe = &fp->rcq_chain[comp_ring_cons];
3216 cqe_fp = &cqe->fast_path_cqe;
3217 cqe_fp_flags = cqe_fp->type_error_flags;
3218 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3219
3220 BLOGD(sc, DBG_RX,
3221 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3222 "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3223 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3224 fp->index,
3225 hw_cq_cons,
3226 sw_cq_cons,
3227 bd_prod,
3228 bd_cons,
3229 CQE_TYPE(cqe_fp_flags),
3230 cqe_fp_flags,
3231 cqe_fp->status_flags,
3232 le32toh(cqe_fp->rss_hash_result),
3233 le16toh(cqe_fp->vlan_tag),
3234 le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3235 le16toh(cqe_fp->len_on_bd));
3236
3237 /* is this a slowpath msg? */
3238 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3239 bxe_sp_event(sc, fp, cqe);
3240 goto next_cqe;
3241 }
3242
3243 rx_buf = &fp->rx_mbuf_chain[bd_cons];
3244
3245 if (!CQE_TYPE_FAST(cqe_fp_type)) {
3246 struct bxe_sw_tpa_info *tpa_info;
3247 uint16_t frag_size, pages;
3248 uint8_t queue;
3249
3250 if (CQE_TYPE_START(cqe_fp_type)) {
3251 bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3252 bd_cons, bd_prod, cqe_fp);
3253 m = NULL; /* packet not ready yet */
3254 goto next_rx;
3255 }
3256
3257 KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3258 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3259
3260 queue = cqe->end_agg_cqe.queue_index;
3261 tpa_info = &fp->rx_tpa_info[queue];
3262
3263 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3264 fp->index, queue);
3265
3266 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3267 tpa_info->len_on_bd);
3268 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3269
3270 bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3271 &cqe->end_agg_cqe, comp_ring_cons);
3272
3273 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3274
3275 goto next_cqe;
3276 }
3277
3278 /* non TPA */
3279
3280 /* is this an error packet? */
3281 if (__predict_false(cqe_fp_flags &
3282 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3283 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3284 fp->eth_q_stats.rx_soft_errors++;
3285 goto next_rx;
3286 }
3287
3288 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3289 lenonbd = le16toh(cqe_fp->len_on_bd);
3290 pad = cqe_fp->placement_offset;
3291
3292 m = rx_buf->m;
3293
3294 if (__predict_false(m == NULL)) {
3295 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3296 bd_cons, fp->index);
3297 goto next_rx;
3298 }
3299
3300 /* XXX double copy if packet length under a threshold */
3301
3302 /*
3303 * If all the buffer descriptors are filled with mbufs then fill in
3304 * the current consumer index with a new BD. Else if a maximum Rx
3305 * buffer limit is imposed then fill in the next producer index.
3306 */
3307 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3308 (sc->max_rx_bufs != RX_BD_USABLE) ?
3309 bd_prod : bd_cons);
3310 if (rc != 0) {
3311
3312 /* we simply reuse the received mbuf and don't post it to the stack */
3313 m = NULL;
3314
3315 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3316 fp->index, rc);
3317 fp->eth_q_stats.rx_soft_errors++;
3318
3319 if (sc->max_rx_bufs != RX_BD_USABLE) {
3320 /* copy this consumer index to the producer index */
3321 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3322 sizeof(struct bxe_sw_rx_bd));
3323 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3324 }
3325
3326 goto next_rx;
3327 }
3328
3329 /* current mbuf was detached from the bd */
3330 fp->eth_q_stats.mbuf_alloc_rx--;
3331
3332 /* we allocated a replacement mbuf, fixup the current one */
3333 m_adj(m, pad);
3334 m->m_pkthdr.len = m->m_len = len;
3335
3336 if ((len > 60) && (len > lenonbd)) {
3337 fp->eth_q_stats.rx_bxe_service_rxsgl++;
3338 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3339 if (rc)
3340 break;
3341 fp->eth_q_stats.rx_jumbo_sge_pkts++;
3342 } else if (lenonbd < len) {
3343 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3344 }
3345
3346 /* assign packet to this interface interface */
3347 if_setrcvif(m, ifp);
3348
3349 /* assume no hardware checksum has complated */
3350 m->m_pkthdr.csum_flags = 0;
3351
3352 /* validate checksum if offload enabled */
3353 if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3354 /* check for a valid IP frame */
3355 if (!(cqe->fast_path_cqe.status_flags &
3356 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3357 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3358 if (__predict_false(cqe_fp_flags &
3359 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3360 fp->eth_q_stats.rx_hw_csum_errors++;
3361 } else {
3362 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3363 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3364 }
3365 }
3366
3367 /* check for a valid TCP/UDP frame */
3368 if (!(cqe->fast_path_cqe.status_flags &
3369 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3370 if (__predict_false(cqe_fp_flags &
3371 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3372 fp->eth_q_stats.rx_hw_csum_errors++;
3373 } else {
3374 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3375 m->m_pkthdr.csum_data = 0xFFFF;
3376 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3377 CSUM_PSEUDO_HDR);
3378 }
3379 }
3380 }
3381
3382 /* if there is a VLAN tag then flag that info */
3383 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3384 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3385 m->m_flags |= M_VLANTAG;
3386 }
3387
3388 /* specify what RSS queue was used for this flow */
3389 m->m_pkthdr.flowid = fp->index;
3390 BXE_SET_FLOWID(m);
3391
3392 next_rx:
3393
3394 bd_cons = RX_BD_NEXT(bd_cons);
3395 bd_prod = RX_BD_NEXT(bd_prod);
3396 bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3397
3398 /* pass the frame to the stack */
3399 if (__predict_true(m != NULL)) {
3400 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3401 rx_pkts++;
3402 if_input(ifp, m);
3403 }
3404
3405 next_cqe:
3406
3407 sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3408 sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3409
3410 /* limit spinning on the queue */
3411 if (rc != 0)
3412 break;
3413
3414 if (rx_pkts == sc->rx_budget) {
3415 fp->eth_q_stats.rx_budget_reached++;
3416 break;
3417 }
3418 } /* while work to do */
3419
3420 fp->rx_bd_cons = bd_cons;
3421 fp->rx_bd_prod = bd_prod_fw;
3422 fp->rx_cq_cons = sw_cq_cons;
3423 fp->rx_cq_prod = sw_cq_prod;
3424
3425 /* Update producers */
3426 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3427
3428 fp->eth_q_stats.rx_pkts += rx_pkts;
3429 fp->eth_q_stats.rx_calls++;
3430
3431 BXE_FP_RX_UNLOCK(fp);
3432
3433 return (sw_cq_cons != hw_cq_cons);
3434 }
3435
3436 static uint16_t
bxe_free_tx_pkt(struct bxe_softc * sc,struct bxe_fastpath * fp,uint16_t idx)3437 bxe_free_tx_pkt(struct bxe_softc *sc,
3438 struct bxe_fastpath *fp,
3439 uint16_t idx)
3440 {
3441 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3442 struct eth_tx_start_bd *tx_start_bd;
3443 uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3444 uint16_t new_cons;
3445 int nbd;
3446
3447 /* unmap the mbuf from non-paged memory */
3448 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3449
3450 tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3451 nbd = le16toh(tx_start_bd->nbd) - 1;
3452
3453 new_cons = (tx_buf->first_bd + nbd);
3454
3455 /* free the mbuf */
3456 if (__predict_true(tx_buf->m != NULL)) {
3457 m_freem(tx_buf->m);
3458 fp->eth_q_stats.mbuf_alloc_tx--;
3459 } else {
3460 fp->eth_q_stats.tx_chain_lost_mbuf++;
3461 }
3462
3463 tx_buf->m = NULL;
3464 tx_buf->first_bd = 0;
3465
3466 return (new_cons);
3467 }
3468
3469 /* transmit timeout watchdog */
3470 static int
bxe_watchdog(struct bxe_softc * sc,struct bxe_fastpath * fp)3471 bxe_watchdog(struct bxe_softc *sc,
3472 struct bxe_fastpath *fp)
3473 {
3474 BXE_FP_TX_LOCK(fp);
3475
3476 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3477 BXE_FP_TX_UNLOCK(fp);
3478 return (0);
3479 }
3480
3481 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3482
3483 BXE_FP_TX_UNLOCK(fp);
3484 BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK);
3485 taskqueue_enqueue_timeout(taskqueue_thread,
3486 &sc->sp_err_timeout_task, hz/10);
3487
3488 return (-1);
3489 }
3490
3491 /* processes transmit completions */
3492 static uint8_t
bxe_txeof(struct bxe_softc * sc,struct bxe_fastpath * fp)3493 bxe_txeof(struct bxe_softc *sc,
3494 struct bxe_fastpath *fp)
3495 {
3496 if_t ifp = sc->ifp;
3497 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3498 uint16_t tx_bd_avail;
3499
3500 BXE_FP_TX_LOCK_ASSERT(fp);
3501
3502 bd_cons = fp->tx_bd_cons;
3503 hw_cons = le16toh(*fp->tx_cons_sb);
3504 sw_cons = fp->tx_pkt_cons;
3505
3506 while (sw_cons != hw_cons) {
3507 pkt_cons = TX_BD(sw_cons);
3508
3509 BLOGD(sc, DBG_TX,
3510 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3511 fp->index, hw_cons, sw_cons, pkt_cons);
3512
3513 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3514
3515 sw_cons++;
3516 }
3517
3518 fp->tx_pkt_cons = sw_cons;
3519 fp->tx_bd_cons = bd_cons;
3520
3521 BLOGD(sc, DBG_TX,
3522 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3523 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3524
3525 mb();
3526
3527 tx_bd_avail = bxe_tx_avail(sc, fp);
3528
3529 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3530 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3531 } else {
3532 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3533 }
3534
3535 if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3536 /* reset the watchdog timer if there are pending transmits */
3537 fp->watchdog_timer = BXE_TX_TIMEOUT;
3538 return (TRUE);
3539 } else {
3540 /* clear watchdog when there are no pending transmits */
3541 fp->watchdog_timer = 0;
3542 return (FALSE);
3543 }
3544 }
3545
3546 static void
bxe_drain_tx_queues(struct bxe_softc * sc)3547 bxe_drain_tx_queues(struct bxe_softc *sc)
3548 {
3549 struct bxe_fastpath *fp;
3550 int i, count;
3551
3552 /* wait until all TX fastpath tasks have completed */
3553 for (i = 0; i < sc->num_queues; i++) {
3554 fp = &sc->fp[i];
3555
3556 count = 1000;
3557
3558 while (bxe_has_tx_work(fp)) {
3559
3560 BXE_FP_TX_LOCK(fp);
3561 bxe_txeof(sc, fp);
3562 BXE_FP_TX_UNLOCK(fp);
3563
3564 if (count == 0) {
3565 BLOGE(sc, "Timeout waiting for fp[%d] "
3566 "transmits to complete!\n", i);
3567 bxe_panic(sc, ("tx drain failure\n"));
3568 return;
3569 }
3570
3571 count--;
3572 DELAY(1000);
3573 rmb();
3574 }
3575 }
3576
3577 return;
3578 }
3579
3580 static int
bxe_del_all_macs(struct bxe_softc * sc,struct ecore_vlan_mac_obj * mac_obj,int mac_type,uint8_t wait_for_comp)3581 bxe_del_all_macs(struct bxe_softc *sc,
3582 struct ecore_vlan_mac_obj *mac_obj,
3583 int mac_type,
3584 uint8_t wait_for_comp)
3585 {
3586 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3587 int rc;
3588
3589 /* wait for completion of requested */
3590 if (wait_for_comp) {
3591 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3592 }
3593
3594 /* Set the mac type of addresses we want to clear */
3595 bxe_set_bit(mac_type, &vlan_mac_flags);
3596
3597 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3598 if (rc < 0) {
3599 BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3600 rc, mac_type, wait_for_comp);
3601 }
3602
3603 return (rc);
3604 }
3605
3606 static int
bxe_fill_accept_flags(struct bxe_softc * sc,uint32_t rx_mode,unsigned long * rx_accept_flags,unsigned long * tx_accept_flags)3607 bxe_fill_accept_flags(struct bxe_softc *sc,
3608 uint32_t rx_mode,
3609 unsigned long *rx_accept_flags,
3610 unsigned long *tx_accept_flags)
3611 {
3612 /* Clear the flags first */
3613 *rx_accept_flags = 0;
3614 *tx_accept_flags = 0;
3615
3616 switch (rx_mode) {
3617 case BXE_RX_MODE_NONE:
3618 /*
3619 * 'drop all' supersedes any accept flags that may have been
3620 * passed to the function.
3621 */
3622 break;
3623
3624 case BXE_RX_MODE_NORMAL:
3625 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3626 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3627 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3628
3629 /* internal switching mode */
3630 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3631 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3632 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3633
3634 break;
3635
3636 case BXE_RX_MODE_ALLMULTI:
3637 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3638 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3639 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3640
3641 /* internal switching mode */
3642 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3643 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3644 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3645
3646 break;
3647
3648 case BXE_RX_MODE_PROMISC:
3649 /*
3650 * According to deffinition of SI mode, iface in promisc mode
3651 * should receive matched and unmatched (in resolution of port)
3652 * unicast packets.
3653 */
3654 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3655 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3656 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3657 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3658
3659 /* internal switching mode */
3660 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3661 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3662
3663 if (IS_MF_SI(sc)) {
3664 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3665 } else {
3666 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3667 }
3668
3669 break;
3670
3671 default:
3672 BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3673 return (-1);
3674 }
3675
3676 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3677 if (rx_mode != BXE_RX_MODE_NONE) {
3678 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3679 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3680 }
3681
3682 return (0);
3683 }
3684
3685 static int
bxe_set_q_rx_mode(struct bxe_softc * sc,uint8_t cl_id,unsigned long rx_mode_flags,unsigned long rx_accept_flags,unsigned long tx_accept_flags,unsigned long ramrod_flags)3686 bxe_set_q_rx_mode(struct bxe_softc *sc,
3687 uint8_t cl_id,
3688 unsigned long rx_mode_flags,
3689 unsigned long rx_accept_flags,
3690 unsigned long tx_accept_flags,
3691 unsigned long ramrod_flags)
3692 {
3693 struct ecore_rx_mode_ramrod_params ramrod_param;
3694 int rc;
3695
3696 memset(&ramrod_param, 0, sizeof(ramrod_param));
3697
3698 /* Prepare ramrod parameters */
3699 ramrod_param.cid = 0;
3700 ramrod_param.cl_id = cl_id;
3701 ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3702 ramrod_param.func_id = SC_FUNC(sc);
3703
3704 ramrod_param.pstate = &sc->sp_state;
3705 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3706
3707 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3708 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3709
3710 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3711
3712 ramrod_param.ramrod_flags = ramrod_flags;
3713 ramrod_param.rx_mode_flags = rx_mode_flags;
3714
3715 ramrod_param.rx_accept_flags = rx_accept_flags;
3716 ramrod_param.tx_accept_flags = tx_accept_flags;
3717
3718 rc = ecore_config_rx_mode(sc, &ramrod_param);
3719 if (rc < 0) {
3720 BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3721 "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3722 "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3723 (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3724 (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3725 return (rc);
3726 }
3727
3728 return (0);
3729 }
3730
3731 static int
bxe_set_storm_rx_mode(struct bxe_softc * sc)3732 bxe_set_storm_rx_mode(struct bxe_softc *sc)
3733 {
3734 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3735 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3736 int rc;
3737
3738 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3739 &tx_accept_flags);
3740 if (rc) {
3741 return (rc);
3742 }
3743
3744 bxe_set_bit(RAMROD_RX, &ramrod_flags);
3745 bxe_set_bit(RAMROD_TX, &ramrod_flags);
3746
3747 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3748 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3749 rx_accept_flags, tx_accept_flags,
3750 ramrod_flags));
3751 }
3752
3753 /* returns the "mcp load_code" according to global load_count array */
3754 static int
bxe_nic_load_no_mcp(struct bxe_softc * sc)3755 bxe_nic_load_no_mcp(struct bxe_softc *sc)
3756 {
3757 int path = SC_PATH(sc);
3758 int port = SC_PORT(sc);
3759
3760 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3761 path, load_count[path][0], load_count[path][1],
3762 load_count[path][2]);
3763 load_count[path][0]++;
3764 load_count[path][1 + port]++;
3765 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3766 path, load_count[path][0], load_count[path][1],
3767 load_count[path][2]);
3768 if (load_count[path][0] == 1) {
3769 return (FW_MSG_CODE_DRV_LOAD_COMMON);
3770 } else if (load_count[path][1 + port] == 1) {
3771 return (FW_MSG_CODE_DRV_LOAD_PORT);
3772 } else {
3773 return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3774 }
3775 }
3776
3777 /* returns the "mcp load_code" according to global load_count array */
3778 static int
bxe_nic_unload_no_mcp(struct bxe_softc * sc)3779 bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3780 {
3781 int port = SC_PORT(sc);
3782 int path = SC_PATH(sc);
3783
3784 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3785 path, load_count[path][0], load_count[path][1],
3786 load_count[path][2]);
3787 load_count[path][0]--;
3788 load_count[path][1 + port]--;
3789 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3790 path, load_count[path][0], load_count[path][1],
3791 load_count[path][2]);
3792 if (load_count[path][0] == 0) {
3793 return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3794 } else if (load_count[path][1 + port] == 0) {
3795 return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3796 } else {
3797 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3798 }
3799 }
3800
3801 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3802 static uint32_t
bxe_send_unload_req(struct bxe_softc * sc,int unload_mode)3803 bxe_send_unload_req(struct bxe_softc *sc,
3804 int unload_mode)
3805 {
3806 uint32_t reset_code = 0;
3807
3808 /* Select the UNLOAD request mode */
3809 if (unload_mode == UNLOAD_NORMAL) {
3810 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3811 } else {
3812 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3813 }
3814
3815 /* Send the request to the MCP */
3816 if (!BXE_NOMCP(sc)) {
3817 reset_code = bxe_fw_command(sc, reset_code, 0);
3818 } else {
3819 reset_code = bxe_nic_unload_no_mcp(sc);
3820 }
3821
3822 return (reset_code);
3823 }
3824
3825 /* send UNLOAD_DONE command to the MCP */
3826 static void
bxe_send_unload_done(struct bxe_softc * sc,uint8_t keep_link)3827 bxe_send_unload_done(struct bxe_softc *sc,
3828 uint8_t keep_link)
3829 {
3830 uint32_t reset_param =
3831 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3832
3833 /* Report UNLOAD_DONE to MCP */
3834 if (!BXE_NOMCP(sc)) {
3835 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3836 }
3837 }
3838
3839 static int
bxe_func_wait_started(struct bxe_softc * sc)3840 bxe_func_wait_started(struct bxe_softc *sc)
3841 {
3842 int tout = 50;
3843
3844 if (!sc->port.pmf) {
3845 return (0);
3846 }
3847
3848 /*
3849 * (assumption: No Attention from MCP at this stage)
3850 * PMF probably in the middle of TX disable/enable transaction
3851 * 1. Sync IRS for default SB
3852 * 2. Sync SP queue - this guarantees us that attention handling started
3853 * 3. Wait, that TX disable/enable transaction completes
3854 *
3855 * 1+2 guarantee that if DCBX attention was scheduled it already changed
3856 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3857 * received completion for the transaction the state is TX_STOPPED.
3858 * State will return to STARTED after completion of TX_STOPPED-->STARTED
3859 * transaction.
3860 */
3861
3862 /* XXX make sure default SB ISR is done */
3863 /* need a way to synchronize an irq (intr_mtx?) */
3864
3865 /* XXX flush any work queues */
3866
3867 while (ecore_func_get_state(sc, &sc->func_obj) !=
3868 ECORE_F_STATE_STARTED && tout--) {
3869 DELAY(20000);
3870 }
3871
3872 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3873 /*
3874 * Failed to complete the transaction in a "good way"
3875 * Force both transactions with CLR bit.
3876 */
3877 struct ecore_func_state_params func_params = { NULL };
3878
3879 BLOGE(sc, "Unexpected function state! "
3880 "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3881
3882 func_params.f_obj = &sc->func_obj;
3883 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3884
3885 /* STARTED-->TX_STOPPED */
3886 func_params.cmd = ECORE_F_CMD_TX_STOP;
3887 ecore_func_state_change(sc, &func_params);
3888
3889 /* TX_STOPPED-->STARTED */
3890 func_params.cmd = ECORE_F_CMD_TX_START;
3891 return (ecore_func_state_change(sc, &func_params));
3892 }
3893
3894 return (0);
3895 }
3896
3897 static int
bxe_stop_queue(struct bxe_softc * sc,int index)3898 bxe_stop_queue(struct bxe_softc *sc,
3899 int index)
3900 {
3901 struct bxe_fastpath *fp = &sc->fp[index];
3902 struct ecore_queue_state_params q_params = { NULL };
3903 int rc;
3904
3905 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3906
3907 q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3908 /* We want to wait for completion in this context */
3909 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3910
3911 /* Stop the primary connection: */
3912
3913 /* ...halt the connection */
3914 q_params.cmd = ECORE_Q_CMD_HALT;
3915 rc = ecore_queue_state_change(sc, &q_params);
3916 if (rc) {
3917 return (rc);
3918 }
3919
3920 /* ...terminate the connection */
3921 q_params.cmd = ECORE_Q_CMD_TERMINATE;
3922 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3923 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3924 rc = ecore_queue_state_change(sc, &q_params);
3925 if (rc) {
3926 return (rc);
3927 }
3928
3929 /* ...delete cfc entry */
3930 q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3931 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3932 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3933 return (ecore_queue_state_change(sc, &q_params));
3934 }
3935
3936 /* wait for the outstanding SP commands */
3937 static inline uint8_t
bxe_wait_sp_comp(struct bxe_softc * sc,unsigned long mask)3938 bxe_wait_sp_comp(struct bxe_softc *sc,
3939 unsigned long mask)
3940 {
3941 unsigned long tmp;
3942 int tout = 5000; /* wait for 5 secs tops */
3943
3944 while (tout--) {
3945 mb();
3946 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3947 return (TRUE);
3948 }
3949
3950 DELAY(1000);
3951 }
3952
3953 mb();
3954
3955 tmp = atomic_load_acq_long(&sc->sp_state);
3956 if (tmp & mask) {
3957 BLOGE(sc, "Filtering completion timed out: "
3958 "sp_state 0x%lx, mask 0x%lx\n",
3959 tmp, mask);
3960 return (FALSE);
3961 }
3962
3963 return (FALSE);
3964 }
3965
3966 static int
bxe_func_stop(struct bxe_softc * sc)3967 bxe_func_stop(struct bxe_softc *sc)
3968 {
3969 struct ecore_func_state_params func_params = { NULL };
3970 int rc;
3971
3972 /* prepare parameters for function state transitions */
3973 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3974 func_params.f_obj = &sc->func_obj;
3975 func_params.cmd = ECORE_F_CMD_STOP;
3976
3977 /*
3978 * Try to stop the function the 'good way'. If it fails (in case
3979 * of a parity error during bxe_chip_cleanup()) and we are
3980 * not in a debug mode, perform a state transaction in order to
3981 * enable further HW_RESET transaction.
3982 */
3983 rc = ecore_func_state_change(sc, &func_params);
3984 if (rc) {
3985 BLOGE(sc, "FUNC_STOP ramrod failed. "
3986 "Running a dry transaction (%d)\n", rc);
3987 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3988 return (ecore_func_state_change(sc, &func_params));
3989 }
3990
3991 return (0);
3992 }
3993
3994 static int
bxe_reset_hw(struct bxe_softc * sc,uint32_t load_code)3995 bxe_reset_hw(struct bxe_softc *sc,
3996 uint32_t load_code)
3997 {
3998 struct ecore_func_state_params func_params = { NULL };
3999
4000 /* Prepare parameters for function state transitions */
4001 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4002
4003 func_params.f_obj = &sc->func_obj;
4004 func_params.cmd = ECORE_F_CMD_HW_RESET;
4005
4006 func_params.params.hw_init.load_phase = load_code;
4007
4008 return (ecore_func_state_change(sc, &func_params));
4009 }
4010
4011 static void
bxe_int_disable_sync(struct bxe_softc * sc,int disable_hw)4012 bxe_int_disable_sync(struct bxe_softc *sc,
4013 int disable_hw)
4014 {
4015 if (disable_hw) {
4016 /* prevent the HW from sending interrupts */
4017 bxe_int_disable(sc);
4018 }
4019
4020 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4021 /* make sure all ISRs are done */
4022
4023 /* XXX make sure sp_task is not running */
4024 /* cancel and flush work queues */
4025 }
4026
4027 static void
bxe_chip_cleanup(struct bxe_softc * sc,uint32_t unload_mode,uint8_t keep_link)4028 bxe_chip_cleanup(struct bxe_softc *sc,
4029 uint32_t unload_mode,
4030 uint8_t keep_link)
4031 {
4032 int port = SC_PORT(sc);
4033 struct ecore_mcast_ramrod_params rparam = { NULL };
4034 uint32_t reset_code;
4035 int i, rc = 0;
4036
4037 bxe_drain_tx_queues(sc);
4038
4039 /* give HW time to discard old tx messages */
4040 DELAY(1000);
4041
4042 /* Clean all ETH MACs */
4043 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4044 if (rc < 0) {
4045 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4046 }
4047
4048 /* Clean up UC list */
4049 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4050 if (rc < 0) {
4051 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4052 }
4053
4054 /* Disable LLH */
4055 if (!CHIP_IS_E1(sc)) {
4056 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4057 }
4058
4059 /* Set "drop all" to stop Rx */
4060
4061 /*
4062 * We need to take the BXE_MCAST_LOCK() here in order to prevent
4063 * a race between the completion code and this code.
4064 */
4065 BXE_MCAST_LOCK(sc);
4066
4067 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4068 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4069 } else {
4070 bxe_set_storm_rx_mode(sc);
4071 }
4072
4073 /* Clean up multicast configuration */
4074 rparam.mcast_obj = &sc->mcast_obj;
4075 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4076 if (rc < 0) {
4077 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4078 }
4079
4080 BXE_MCAST_UNLOCK(sc);
4081
4082 // XXX bxe_iov_chip_cleanup(sc);
4083
4084 /*
4085 * Send the UNLOAD_REQUEST to the MCP. This will return if
4086 * this function should perform FUNCTION, PORT, or COMMON HW
4087 * reset.
4088 */
4089 reset_code = bxe_send_unload_req(sc, unload_mode);
4090
4091 /*
4092 * (assumption: No Attention from MCP at this stage)
4093 * PMF probably in the middle of TX disable/enable transaction
4094 */
4095 rc = bxe_func_wait_started(sc);
4096 if (rc) {
4097 BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4098 }
4099
4100 /*
4101 * Close multi and leading connections
4102 * Completions for ramrods are collected in a synchronous way
4103 */
4104 for (i = 0; i < sc->num_queues; i++) {
4105 if (bxe_stop_queue(sc, i)) {
4106 goto unload_error;
4107 }
4108 }
4109
4110 /*
4111 * If SP settings didn't get completed so far - something
4112 * very wrong has happen.
4113 */
4114 if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4115 BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4116 }
4117
4118 unload_error:
4119
4120 rc = bxe_func_stop(sc);
4121 if (rc) {
4122 BLOGE(sc, "Function stop failed!(%d)\n", rc);
4123 }
4124
4125 /* disable HW interrupts */
4126 bxe_int_disable_sync(sc, TRUE);
4127
4128 /* detach interrupts */
4129 bxe_interrupt_detach(sc);
4130
4131 /* Reset the chip */
4132 rc = bxe_reset_hw(sc, reset_code);
4133 if (rc) {
4134 BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4135 }
4136
4137 /* Report UNLOAD_DONE to MCP */
4138 bxe_send_unload_done(sc, keep_link);
4139 }
4140
4141 static void
bxe_disable_close_the_gate(struct bxe_softc * sc)4142 bxe_disable_close_the_gate(struct bxe_softc *sc)
4143 {
4144 uint32_t val;
4145 int port = SC_PORT(sc);
4146
4147 BLOGD(sc, DBG_LOAD,
4148 "Disabling 'close the gates'\n");
4149
4150 if (CHIP_IS_E1(sc)) {
4151 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4152 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4153 val = REG_RD(sc, addr);
4154 val &= ~(0x300);
4155 REG_WR(sc, addr, val);
4156 } else {
4157 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4158 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4159 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4160 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4161 }
4162 }
4163
4164 /*
4165 * Cleans the object that have internal lists without sending
4166 * ramrods. Should be run when interrutps are disabled.
4167 */
4168 static void
bxe_squeeze_objects(struct bxe_softc * sc)4169 bxe_squeeze_objects(struct bxe_softc *sc)
4170 {
4171 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4172 struct ecore_mcast_ramrod_params rparam = { NULL };
4173 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4174 int rc;
4175
4176 /* Cleanup MACs' object first... */
4177
4178 /* Wait for completion of requested */
4179 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4180 /* Perform a dry cleanup */
4181 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4182
4183 /* Clean ETH primary MAC */
4184 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4185 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4186 &ramrod_flags);
4187 if (rc != 0) {
4188 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4189 }
4190
4191 /* Cleanup UC list */
4192 vlan_mac_flags = 0;
4193 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4194 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4195 &ramrod_flags);
4196 if (rc != 0) {
4197 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4198 }
4199
4200 /* Now clean mcast object... */
4201
4202 rparam.mcast_obj = &sc->mcast_obj;
4203 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4204
4205 /* Add a DEL command... */
4206 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4207 if (rc < 0) {
4208 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4209 }
4210
4211 /* now wait until all pending commands are cleared */
4212
4213 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4214 while (rc != 0) {
4215 if (rc < 0) {
4216 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4217 return;
4218 }
4219
4220 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4221 }
4222 }
4223
4224 /* stop the controller */
4225 static __noinline int
bxe_nic_unload(struct bxe_softc * sc,uint32_t unload_mode,uint8_t keep_link)4226 bxe_nic_unload(struct bxe_softc *sc,
4227 uint32_t unload_mode,
4228 uint8_t keep_link)
4229 {
4230 uint8_t global = FALSE;
4231 uint32_t val;
4232 int i;
4233
4234 BXE_CORE_LOCK_ASSERT(sc);
4235
4236 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4237
4238 for (i = 0; i < sc->num_queues; i++) {
4239 struct bxe_fastpath *fp;
4240
4241 fp = &sc->fp[i];
4242 fp->watchdog_timer = 0;
4243 BXE_FP_TX_LOCK(fp);
4244 BXE_FP_TX_UNLOCK(fp);
4245 }
4246
4247 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4248
4249 /* mark driver as unloaded in shmem2 */
4250 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4251 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4252 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4253 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4254 }
4255
4256 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4257 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4258
4259 if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
4260 /*
4261 * We can get here if the driver has been unloaded
4262 * during parity error recovery and is either waiting for a
4263 * leader to complete or for other functions to unload and
4264 * then ifconfig down has been issued. In this case we want to
4265 * unload and let other functions to complete a recovery
4266 * process.
4267 */
4268 sc->recovery_state = BXE_RECOVERY_DONE;
4269 sc->is_leader = 0;
4270 bxe_release_leader_lock(sc);
4271 mb();
4272 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4273 }
4274 BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4275 " state = 0x%x\n", sc->recovery_state, sc->state);
4276 return (-1);
4277 }
4278
4279 /*
4280 * Nothing to do during unload if previous bxe_nic_load()
4281 * did not completed successfully - all resourses are released.
4282 */
4283 if ((sc->state == BXE_STATE_CLOSED) ||
4284 (sc->state == BXE_STATE_ERROR)) {
4285 return (0);
4286 }
4287
4288 sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4289 mb();
4290
4291 /* stop tx */
4292 bxe_tx_disable(sc);
4293
4294 sc->rx_mode = BXE_RX_MODE_NONE;
4295 /* XXX set rx mode ??? */
4296
4297 if (IS_PF(sc) && !sc->grcdump_done) {
4298 /* set ALWAYS_ALIVE bit in shmem */
4299 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4300
4301 bxe_drv_pulse(sc);
4302
4303 bxe_stats_handle(sc, STATS_EVENT_STOP);
4304 bxe_save_statistics(sc);
4305 }
4306
4307 /* wait till consumers catch up with producers in all queues */
4308 bxe_drain_tx_queues(sc);
4309
4310 /* if VF indicate to PF this function is going down (PF will delete sp
4311 * elements and clear initializations
4312 */
4313 if (IS_VF(sc)) {
4314 ; /* bxe_vfpf_close_vf(sc); */
4315 } else if (unload_mode != UNLOAD_RECOVERY) {
4316 /* if this is a normal/close unload need to clean up chip */
4317 if (!sc->grcdump_done)
4318 bxe_chip_cleanup(sc, unload_mode, keep_link);
4319 } else {
4320 /* Send the UNLOAD_REQUEST to the MCP */
4321 bxe_send_unload_req(sc, unload_mode);
4322
4323 /*
4324 * Prevent transactions to host from the functions on the
4325 * engine that doesn't reset global blocks in case of global
4326 * attention once gloabl blocks are reset and gates are opened
4327 * (the engine which leader will perform the recovery
4328 * last).
4329 */
4330 if (!CHIP_IS_E1x(sc)) {
4331 bxe_pf_disable(sc);
4332 }
4333
4334 /* disable HW interrupts */
4335 bxe_int_disable_sync(sc, TRUE);
4336
4337 /* detach interrupts */
4338 bxe_interrupt_detach(sc);
4339
4340 /* Report UNLOAD_DONE to MCP */
4341 bxe_send_unload_done(sc, FALSE);
4342 }
4343
4344 /*
4345 * At this stage no more interrupts will arrive so we may safely clean
4346 * the queue'able objects here in case they failed to get cleaned so far.
4347 */
4348 if (IS_PF(sc)) {
4349 bxe_squeeze_objects(sc);
4350 }
4351
4352 /* There should be no more pending SP commands at this stage */
4353 sc->sp_state = 0;
4354
4355 sc->port.pmf = 0;
4356
4357 bxe_free_fp_buffers(sc);
4358
4359 if (IS_PF(sc)) {
4360 bxe_free_mem(sc);
4361 }
4362
4363 bxe_free_fw_stats_mem(sc);
4364
4365 sc->state = BXE_STATE_CLOSED;
4366
4367 /*
4368 * Check if there are pending parity attentions. If there are - set
4369 * RECOVERY_IN_PROGRESS.
4370 */
4371 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4372 bxe_set_reset_in_progress(sc);
4373
4374 /* Set RESET_IS_GLOBAL if needed */
4375 if (global) {
4376 bxe_set_reset_global(sc);
4377 }
4378 }
4379
4380 /*
4381 * The last driver must disable a "close the gate" if there is no
4382 * parity attention or "process kill" pending.
4383 */
4384 if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4385 bxe_reset_is_done(sc, SC_PATH(sc))) {
4386 bxe_disable_close_the_gate(sc);
4387 }
4388
4389 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4390
4391 bxe_link_report(sc);
4392
4393 return (0);
4394 }
4395
4396 /*
4397 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4398 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4399 */
4400 static int
bxe_ifmedia_update(struct ifnet * ifp)4401 bxe_ifmedia_update(struct ifnet *ifp)
4402 {
4403 struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4404 struct ifmedia *ifm;
4405
4406 ifm = &sc->ifmedia;
4407
4408 /* We only support Ethernet media type. */
4409 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4410 return (EINVAL);
4411 }
4412
4413 switch (IFM_SUBTYPE(ifm->ifm_media)) {
4414 case IFM_AUTO:
4415 break;
4416 case IFM_10G_CX4:
4417 case IFM_10G_SR:
4418 case IFM_10G_T:
4419 case IFM_10G_TWINAX:
4420 default:
4421 /* We don't support changing the media type. */
4422 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4423 IFM_SUBTYPE(ifm->ifm_media));
4424 return (EINVAL);
4425 }
4426
4427 return (0);
4428 }
4429
4430 /*
4431 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4432 */
4433 static void
bxe_ifmedia_status(struct ifnet * ifp,struct ifmediareq * ifmr)4434 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4435 {
4436 struct bxe_softc *sc = if_getsoftc(ifp);
4437
4438 /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..."
4439 line if the IFM_AVALID flag is *NOT* set. So we need to set this
4440 flag unconditionally (irrespective of the admininistrative
4441 'up/down' state of the interface) to ensure that that line is always
4442 displayed.
4443 */
4444 ifmr->ifm_status = IFM_AVALID;
4445
4446 /* Setup the default interface info. */
4447 ifmr->ifm_active = IFM_ETHER;
4448
4449 /* Report link down if the driver isn't running. */
4450 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4451 ifmr->ifm_active |= IFM_NONE;
4452 BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__);
4453 BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n",
4454 __func__, sc->link_vars.link_up);
4455 return;
4456 }
4457
4458
4459 if (sc->link_vars.link_up) {
4460 ifmr->ifm_status |= IFM_ACTIVE;
4461 ifmr->ifm_active |= IFM_FDX;
4462 } else {
4463 ifmr->ifm_active |= IFM_NONE;
4464 BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n",
4465 __func__);
4466 return;
4467 }
4468
4469 ifmr->ifm_active |= sc->media;
4470 return;
4471 }
4472
4473 static void
bxe_handle_chip_tq(void * context,int pending)4474 bxe_handle_chip_tq(void *context,
4475 int pending)
4476 {
4477 struct bxe_softc *sc = (struct bxe_softc *)context;
4478 long work = atomic_load_acq_long(&sc->chip_tq_flags);
4479
4480 switch (work)
4481 {
4482
4483 case CHIP_TQ_REINIT:
4484 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4485 /* restart the interface */
4486 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4487 bxe_periodic_stop(sc);
4488 BXE_CORE_LOCK(sc);
4489 bxe_stop_locked(sc);
4490 bxe_init_locked(sc);
4491 BXE_CORE_UNLOCK(sc);
4492 }
4493 break;
4494
4495 default:
4496 break;
4497 }
4498 }
4499
4500 /*
4501 * Handles any IOCTL calls from the operating system.
4502 *
4503 * Returns:
4504 * 0 = Success, >0 Failure
4505 */
4506 static int
bxe_ioctl(if_t ifp,u_long command,caddr_t data)4507 bxe_ioctl(if_t ifp,
4508 u_long command,
4509 caddr_t data)
4510 {
4511 struct bxe_softc *sc = if_getsoftc(ifp);
4512 struct ifreq *ifr = (struct ifreq *)data;
4513 int mask = 0;
4514 int reinit = 0;
4515 int error = 0;
4516
4517 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4518 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4519
4520 switch (command)
4521 {
4522 case SIOCSIFMTU:
4523 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4524 ifr->ifr_mtu);
4525
4526 if (sc->mtu == ifr->ifr_mtu) {
4527 /* nothing to change */
4528 break;
4529 }
4530
4531 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4532 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4533 ifr->ifr_mtu, mtu_min, mtu_max);
4534 error = EINVAL;
4535 break;
4536 }
4537
4538 atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4539 (unsigned long)ifr->ifr_mtu);
4540 /*
4541 atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4542 (unsigned long)ifr->ifr_mtu);
4543 XXX - Not sure why it needs to be atomic
4544 */
4545 if_setmtu(ifp, ifr->ifr_mtu);
4546 reinit = 1;
4547 break;
4548
4549 case SIOCSIFFLAGS:
4550 /* toggle the interface state up or down */
4551 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4552
4553 BXE_CORE_LOCK(sc);
4554 /* check if the interface is up */
4555 if (if_getflags(ifp) & IFF_UP) {
4556 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4557 /* set the receive mode flags */
4558 bxe_set_rx_mode(sc);
4559 } else if(sc->state != BXE_STATE_DISABLED) {
4560 bxe_init_locked(sc);
4561 }
4562 } else {
4563 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4564 bxe_periodic_stop(sc);
4565 bxe_stop_locked(sc);
4566 }
4567 }
4568 BXE_CORE_UNLOCK(sc);
4569
4570 break;
4571
4572 case SIOCADDMULTI:
4573 case SIOCDELMULTI:
4574 /* add/delete multicast addresses */
4575 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4576
4577 /* check if the interface is up */
4578 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4579 /* set the receive mode flags */
4580 BXE_CORE_LOCK(sc);
4581 bxe_set_rx_mode(sc);
4582 BXE_CORE_UNLOCK(sc);
4583 }
4584
4585 break;
4586
4587 case SIOCSIFCAP:
4588 /* find out which capabilities have changed */
4589 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4590
4591 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4592 mask);
4593
4594 /* toggle the LRO capabilites enable flag */
4595 if (mask & IFCAP_LRO) {
4596 if_togglecapenable(ifp, IFCAP_LRO);
4597 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4598 (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4599 reinit = 1;
4600 }
4601
4602 /* toggle the TXCSUM checksum capabilites enable flag */
4603 if (mask & IFCAP_TXCSUM) {
4604 if_togglecapenable(ifp, IFCAP_TXCSUM);
4605 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4606 (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4607 if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4608 if_sethwassistbits(ifp, (CSUM_IP |
4609 CSUM_TCP |
4610 CSUM_UDP |
4611 CSUM_TSO |
4612 CSUM_TCP_IPV6 |
4613 CSUM_UDP_IPV6), 0);
4614 } else {
4615 if_clearhwassist(ifp); /* XXX */
4616 }
4617 }
4618
4619 /* toggle the RXCSUM checksum capabilities enable flag */
4620 if (mask & IFCAP_RXCSUM) {
4621 if_togglecapenable(ifp, IFCAP_RXCSUM);
4622 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4623 (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4624 if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4625 if_sethwassistbits(ifp, (CSUM_IP |
4626 CSUM_TCP |
4627 CSUM_UDP |
4628 CSUM_TSO |
4629 CSUM_TCP_IPV6 |
4630 CSUM_UDP_IPV6), 0);
4631 } else {
4632 if_clearhwassist(ifp); /* XXX */
4633 }
4634 }
4635
4636 /* toggle TSO4 capabilities enabled flag */
4637 if (mask & IFCAP_TSO4) {
4638 if_togglecapenable(ifp, IFCAP_TSO4);
4639 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4640 (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4641 }
4642
4643 /* toggle TSO6 capabilities enabled flag */
4644 if (mask & IFCAP_TSO6) {
4645 if_togglecapenable(ifp, IFCAP_TSO6);
4646 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4647 (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4648 }
4649
4650 /* toggle VLAN_HWTSO capabilities enabled flag */
4651 if (mask & IFCAP_VLAN_HWTSO) {
4652
4653 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4654 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4655 (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4656 }
4657
4658 /* toggle VLAN_HWCSUM capabilities enabled flag */
4659 if (mask & IFCAP_VLAN_HWCSUM) {
4660 /* XXX investigate this... */
4661 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4662 error = EINVAL;
4663 }
4664
4665 /* toggle VLAN_MTU capabilities enable flag */
4666 if (mask & IFCAP_VLAN_MTU) {
4667 /* XXX investigate this... */
4668 BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4669 error = EINVAL;
4670 }
4671
4672 /* toggle VLAN_HWTAGGING capabilities enabled flag */
4673 if (mask & IFCAP_VLAN_HWTAGGING) {
4674 /* XXX investigate this... */
4675 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4676 error = EINVAL;
4677 }
4678
4679 /* toggle VLAN_HWFILTER capabilities enabled flag */
4680 if (mask & IFCAP_VLAN_HWFILTER) {
4681 /* XXX investigate this... */
4682 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4683 error = EINVAL;
4684 }
4685
4686 /* XXX not yet...
4687 * IFCAP_WOL_MAGIC
4688 */
4689
4690 break;
4691
4692 case SIOCSIFMEDIA:
4693 case SIOCGIFMEDIA:
4694 /* set/get interface media */
4695 BLOGD(sc, DBG_IOCTL,
4696 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4697 (command & 0xff));
4698 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4699 break;
4700
4701 default:
4702 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4703 (command & 0xff));
4704 error = ether_ioctl(ifp, command, data);
4705 break;
4706 }
4707
4708 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4709 BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4710 "Re-initializing hardware from IOCTL change\n");
4711 bxe_periodic_stop(sc);
4712 BXE_CORE_LOCK(sc);
4713 bxe_stop_locked(sc);
4714 bxe_init_locked(sc);
4715 BXE_CORE_UNLOCK(sc);
4716 }
4717
4718 return (error);
4719 }
4720
4721 static __noinline void
bxe_dump_mbuf(struct bxe_softc * sc,struct mbuf * m,uint8_t contents)4722 bxe_dump_mbuf(struct bxe_softc *sc,
4723 struct mbuf *m,
4724 uint8_t contents)
4725 {
4726 char * type;
4727 int i = 0;
4728
4729 if (!(sc->debug & DBG_MBUF)) {
4730 return;
4731 }
4732
4733 if (m == NULL) {
4734 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4735 return;
4736 }
4737
4738 while (m) {
4739
4740 BLOGD(sc, DBG_MBUF,
4741 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4742 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4743
4744 if (m->m_flags & M_PKTHDR) {
4745 BLOGD(sc, DBG_MBUF,
4746 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4747 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4748 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4749 }
4750
4751 if (m->m_flags & M_EXT) {
4752 switch (m->m_ext.ext_type) {
4753 case EXT_CLUSTER: type = "EXT_CLUSTER"; break;
4754 case EXT_SFBUF: type = "EXT_SFBUF"; break;
4755 case EXT_JUMBOP: type = "EXT_JUMBOP"; break;
4756 case EXT_JUMBO9: type = "EXT_JUMBO9"; break;
4757 case EXT_JUMBO16: type = "EXT_JUMBO16"; break;
4758 case EXT_PACKET: type = "EXT_PACKET"; break;
4759 case EXT_MBUF: type = "EXT_MBUF"; break;
4760 case EXT_NET_DRV: type = "EXT_NET_DRV"; break;
4761 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break;
4762 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4763 case EXT_EXTREF: type = "EXT_EXTREF"; break;
4764 default: type = "UNKNOWN"; break;
4765 }
4766
4767 BLOGD(sc, DBG_MBUF,
4768 "%02d: - m_ext: %p ext_size=%d type=%s\n",
4769 i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4770 }
4771
4772 if (contents) {
4773 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4774 }
4775
4776 m = m->m_next;
4777 i++;
4778 }
4779 }
4780
4781 /*
4782 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4783 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4784 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4785 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4786 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4787 */
4788 static int
bxe_chktso_window(struct bxe_softc * sc,int nsegs,bus_dma_segment_t * segs,struct mbuf * m)4789 bxe_chktso_window(struct bxe_softc *sc,
4790 int nsegs,
4791 bus_dma_segment_t *segs,
4792 struct mbuf *m)
4793 {
4794 uint32_t num_wnds, wnd_size, wnd_sum;
4795 int32_t frag_idx, wnd_idx;
4796 unsigned short lso_mss;
4797 int defrag;
4798
4799 defrag = 0;
4800 wnd_sum = 0;
4801 wnd_size = 10;
4802 num_wnds = nsegs - wnd_size;
4803 lso_mss = htole16(m->m_pkthdr.tso_segsz);
4804
4805 /*
4806 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4807 * first window sum of data while skipping the first assuming it is the
4808 * header in FreeBSD.
4809 */
4810 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4811 wnd_sum += htole16(segs[frag_idx].ds_len);
4812 }
4813
4814 /* check the first 10 bd window size */
4815 if (wnd_sum < lso_mss) {
4816 return (1);
4817 }
4818
4819 /* run through the windows */
4820 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4821 /* subtract the first mbuf->m_len of the last wndw(-header) */
4822 wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4823 /* add the next mbuf len to the len of our new window */
4824 wnd_sum += htole16(segs[frag_idx].ds_len);
4825 if (wnd_sum < lso_mss) {
4826 return (1);
4827 }
4828 }
4829
4830 return (0);
4831 }
4832
4833 static uint8_t
bxe_set_pbd_csum_e2(struct bxe_fastpath * fp,struct mbuf * m,uint32_t * parsing_data)4834 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4835 struct mbuf *m,
4836 uint32_t *parsing_data)
4837 {
4838 struct ether_vlan_header *eh = NULL;
4839 struct ip *ip4 = NULL;
4840 struct ip6_hdr *ip6 = NULL;
4841 caddr_t ip = NULL;
4842 struct tcphdr *th = NULL;
4843 int e_hlen, ip_hlen, l4_off;
4844 uint16_t proto;
4845
4846 if (m->m_pkthdr.csum_flags == CSUM_IP) {
4847 /* no L4 checksum offload needed */
4848 return (0);
4849 }
4850
4851 /* get the Ethernet header */
4852 eh = mtod(m, struct ether_vlan_header *);
4853
4854 /* handle VLAN encapsulation if present */
4855 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4856 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4857 proto = ntohs(eh->evl_proto);
4858 } else {
4859 e_hlen = ETHER_HDR_LEN;
4860 proto = ntohs(eh->evl_encap_proto);
4861 }
4862
4863 switch (proto) {
4864 case ETHERTYPE_IP:
4865 /* get the IP header, if mbuf len < 20 then header in next mbuf */
4866 ip4 = (m->m_len < sizeof(struct ip)) ?
4867 (struct ip *)m->m_next->m_data :
4868 (struct ip *)(m->m_data + e_hlen);
4869 /* ip_hl is number of 32-bit words */
4870 ip_hlen = (ip4->ip_hl << 2);
4871 ip = (caddr_t)ip4;
4872 break;
4873 case ETHERTYPE_IPV6:
4874 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4875 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4876 (struct ip6_hdr *)m->m_next->m_data :
4877 (struct ip6_hdr *)(m->m_data + e_hlen);
4878 /* XXX cannot support offload with IPv6 extensions */
4879 ip_hlen = sizeof(struct ip6_hdr);
4880 ip = (caddr_t)ip6;
4881 break;
4882 default:
4883 /* We can't offload in this case... */
4884 /* XXX error stat ??? */
4885 return (0);
4886 }
4887
4888 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4889 l4_off = (e_hlen + ip_hlen);
4890
4891 *parsing_data |=
4892 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4893 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4894
4895 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4896 CSUM_TSO |
4897 CSUM_TCP_IPV6)) {
4898 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4899 th = (struct tcphdr *)(ip + ip_hlen);
4900 /* th_off is number of 32-bit words */
4901 *parsing_data |= ((th->th_off <<
4902 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4903 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4904 return (l4_off + (th->th_off << 2)); /* entire header length */
4905 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4906 CSUM_UDP_IPV6)) {
4907 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4908 return (l4_off + sizeof(struct udphdr)); /* entire header length */
4909 } else {
4910 /* XXX error stat ??? */
4911 return (0);
4912 }
4913 }
4914
4915 static uint8_t
bxe_set_pbd_csum(struct bxe_fastpath * fp,struct mbuf * m,struct eth_tx_parse_bd_e1x * pbd)4916 bxe_set_pbd_csum(struct bxe_fastpath *fp,
4917 struct mbuf *m,
4918 struct eth_tx_parse_bd_e1x *pbd)
4919 {
4920 struct ether_vlan_header *eh = NULL;
4921 struct ip *ip4 = NULL;
4922 struct ip6_hdr *ip6 = NULL;
4923 caddr_t ip = NULL;
4924 struct tcphdr *th = NULL;
4925 struct udphdr *uh = NULL;
4926 int e_hlen, ip_hlen;
4927 uint16_t proto;
4928 uint8_t hlen;
4929 uint16_t tmp_csum;
4930 uint32_t *tmp_uh;
4931
4932 /* get the Ethernet header */
4933 eh = mtod(m, struct ether_vlan_header *);
4934
4935 /* handle VLAN encapsulation if present */
4936 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4937 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4938 proto = ntohs(eh->evl_proto);
4939 } else {
4940 e_hlen = ETHER_HDR_LEN;
4941 proto = ntohs(eh->evl_encap_proto);
4942 }
4943
4944 switch (proto) {
4945 case ETHERTYPE_IP:
4946 /* get the IP header, if mbuf len < 20 then header in next mbuf */
4947 ip4 = (m->m_len < sizeof(struct ip)) ?
4948 (struct ip *)m->m_next->m_data :
4949 (struct ip *)(m->m_data + e_hlen);
4950 /* ip_hl is number of 32-bit words */
4951 ip_hlen = (ip4->ip_hl << 1);
4952 ip = (caddr_t)ip4;
4953 break;
4954 case ETHERTYPE_IPV6:
4955 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4956 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4957 (struct ip6_hdr *)m->m_next->m_data :
4958 (struct ip6_hdr *)(m->m_data + e_hlen);
4959 /* XXX cannot support offload with IPv6 extensions */
4960 ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4961 ip = (caddr_t)ip6;
4962 break;
4963 default:
4964 /* We can't offload in this case... */
4965 /* XXX error stat ??? */
4966 return (0);
4967 }
4968
4969 hlen = (e_hlen >> 1);
4970
4971 /* note that rest of global_data is indirectly zeroed here */
4972 if (m->m_flags & M_VLANTAG) {
4973 pbd->global_data =
4974 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4975 } else {
4976 pbd->global_data = htole16(hlen);
4977 }
4978
4979 pbd->ip_hlen_w = ip_hlen;
4980
4981 hlen += pbd->ip_hlen_w;
4982
4983 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4984
4985 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4986 CSUM_TSO |
4987 CSUM_TCP_IPV6)) {
4988 th = (struct tcphdr *)(ip + (ip_hlen << 1));
4989 /* th_off is number of 32-bit words */
4990 hlen += (uint16_t)(th->th_off << 1);
4991 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4992 CSUM_UDP_IPV6)) {
4993 uh = (struct udphdr *)(ip + (ip_hlen << 1));
4994 hlen += (sizeof(struct udphdr) / 2);
4995 } else {
4996 /* valid case as only CSUM_IP was set */
4997 return (0);
4998 }
4999
5000 pbd->total_hlen_w = htole16(hlen);
5001
5002 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5003 CSUM_TSO |
5004 CSUM_TCP_IPV6)) {
5005 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5006 pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5007 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5008 CSUM_UDP_IPV6)) {
5009 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5010
5011 /*
5012 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5013 * checksums and does not know anything about the UDP header and where
5014 * the checksum field is located. It only knows about TCP. Therefore
5015 * we "lie" to the hardware for outgoing UDP packets w/ checksum
5016 * offload. Since the checksum field offset for TCP is 16 bytes and
5017 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5018 * bytes less than the start of the UDP header. This allows the
5019 * hardware to write the checksum in the correct spot. But the
5020 * hardware will compute a checksum which includes the last 10 bytes
5021 * of the IP header. To correct this we tweak the stack computed
5022 * pseudo checksum by folding in the calculation of the inverse
5023 * checksum for those final 10 bytes of the IP header. This allows
5024 * the correct checksum to be computed by the hardware.
5025 */
5026
5027 /* set pointer 10 bytes before UDP header */
5028 tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5029
5030 /* calculate a pseudo header checksum over the first 10 bytes */
5031 tmp_csum = in_pseudo(*tmp_uh,
5032 *(tmp_uh + 1),
5033 *(uint16_t *)(tmp_uh + 2));
5034
5035 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5036 }
5037
5038 return (hlen * 2); /* entire header length, number of bytes */
5039 }
5040
5041 static void
bxe_set_pbd_lso_e2(struct mbuf * m,uint32_t * parsing_data)5042 bxe_set_pbd_lso_e2(struct mbuf *m,
5043 uint32_t *parsing_data)
5044 {
5045 *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5046 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5047 ETH_TX_PARSE_BD_E2_LSO_MSS);
5048
5049 /* XXX test for IPv6 with extension header... */
5050 }
5051
5052 static void
bxe_set_pbd_lso(struct mbuf * m,struct eth_tx_parse_bd_e1x * pbd)5053 bxe_set_pbd_lso(struct mbuf *m,
5054 struct eth_tx_parse_bd_e1x *pbd)
5055 {
5056 struct ether_vlan_header *eh = NULL;
5057 struct ip *ip = NULL;
5058 struct tcphdr *th = NULL;
5059 int e_hlen;
5060
5061 /* get the Ethernet header */
5062 eh = mtod(m, struct ether_vlan_header *);
5063
5064 /* handle VLAN encapsulation if present */
5065 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5066 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5067
5068 /* get the IP and TCP header, with LSO entire header in first mbuf */
5069 /* XXX assuming IPv4 */
5070 ip = (struct ip *)(m->m_data + e_hlen);
5071 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5072
5073 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5074 pbd->tcp_send_seq = ntohl(th->th_seq);
5075 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5076
5077 #if 1
5078 /* XXX IPv4 */
5079 pbd->ip_id = ntohs(ip->ip_id);
5080 pbd->tcp_pseudo_csum =
5081 ntohs(in_pseudo(ip->ip_src.s_addr,
5082 ip->ip_dst.s_addr,
5083 htons(IPPROTO_TCP)));
5084 #else
5085 /* XXX IPv6 */
5086 pbd->tcp_pseudo_csum =
5087 ntohs(in_pseudo(&ip6->ip6_src,
5088 &ip6->ip6_dst,
5089 htons(IPPROTO_TCP)));
5090 #endif
5091
5092 pbd->global_data |=
5093 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5094 }
5095
5096 /*
5097 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5098 * visible to the controller.
5099 *
5100 * If an mbuf is submitted to this routine and cannot be given to the
5101 * controller (e.g. it has too many fragments) then the function may free
5102 * the mbuf and return to the caller.
5103 *
5104 * Returns:
5105 * 0 = Success, !0 = Failure
5106 * Note the side effect that an mbuf may be freed if it causes a problem.
5107 */
5108 static int
bxe_tx_encap(struct bxe_fastpath * fp,struct mbuf ** m_head)5109 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5110 {
5111 bus_dma_segment_t segs[32];
5112 struct mbuf *m0;
5113 struct bxe_sw_tx_bd *tx_buf;
5114 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5115 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5116 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5117 struct eth_tx_bd *tx_data_bd;
5118 struct eth_tx_bd *tx_total_pkt_size_bd;
5119 struct eth_tx_start_bd *tx_start_bd;
5120 uint16_t bd_prod, pkt_prod, total_pkt_size;
5121 uint8_t mac_type;
5122 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5123 struct bxe_softc *sc;
5124 uint16_t tx_bd_avail;
5125 struct ether_vlan_header *eh;
5126 uint32_t pbd_e2_parsing_data = 0;
5127 uint8_t hlen = 0;
5128 int tmp_bd;
5129 int i;
5130
5131 sc = fp->sc;
5132
5133 M_ASSERTPKTHDR(*m_head);
5134
5135 m0 = *m_head;
5136 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5137 tx_start_bd = NULL;
5138 tx_data_bd = NULL;
5139 tx_total_pkt_size_bd = NULL;
5140
5141 /* get the H/W pointer for packets and BDs */
5142 pkt_prod = fp->tx_pkt_prod;
5143 bd_prod = fp->tx_bd_prod;
5144
5145 mac_type = UNICAST_ADDRESS;
5146
5147 /* map the mbuf into the next open DMAable memory */
5148 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5149 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5150 tx_buf->m_map, m0,
5151 segs, &nsegs, BUS_DMA_NOWAIT);
5152
5153 /* mapping errors */
5154 if(__predict_false(error != 0)) {
5155 fp->eth_q_stats.tx_dma_mapping_failure++;
5156 if (error == ENOMEM) {
5157 /* resource issue, try again later */
5158 rc = ENOMEM;
5159 } else if (error == EFBIG) {
5160 /* possibly recoverable with defragmentation */
5161 fp->eth_q_stats.mbuf_defrag_attempts++;
5162 m0 = m_defrag(*m_head, M_NOWAIT);
5163 if (m0 == NULL) {
5164 fp->eth_q_stats.mbuf_defrag_failures++;
5165 rc = ENOBUFS;
5166 } else {
5167 /* defrag successful, try mapping again */
5168 *m_head = m0;
5169 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5170 tx_buf->m_map, m0,
5171 segs, &nsegs, BUS_DMA_NOWAIT);
5172 if (error) {
5173 fp->eth_q_stats.tx_dma_mapping_failure++;
5174 rc = error;
5175 }
5176 }
5177 } else {
5178 /* unknown, unrecoverable mapping error */
5179 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5180 bxe_dump_mbuf(sc, m0, FALSE);
5181 rc = error;
5182 }
5183
5184 goto bxe_tx_encap_continue;
5185 }
5186
5187 tx_bd_avail = bxe_tx_avail(sc, fp);
5188
5189 /* make sure there is enough room in the send queue */
5190 if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5191 /* Recoverable, try again later. */
5192 fp->eth_q_stats.tx_hw_queue_full++;
5193 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5194 rc = ENOMEM;
5195 goto bxe_tx_encap_continue;
5196 }
5197
5198 /* capture the current H/W TX chain high watermark */
5199 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5200 (TX_BD_USABLE - tx_bd_avail))) {
5201 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5202 }
5203
5204 /* make sure it fits in the packet window */
5205 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5206 /*
5207 * The mbuf may be to big for the controller to handle. If the frame
5208 * is a TSO frame we'll need to do an additional check.
5209 */
5210 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5211 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5212 goto bxe_tx_encap_continue; /* OK to send */
5213 } else {
5214 fp->eth_q_stats.tx_window_violation_tso++;
5215 }
5216 } else {
5217 fp->eth_q_stats.tx_window_violation_std++;
5218 }
5219
5220 /* lets try to defragment this mbuf and remap it */
5221 fp->eth_q_stats.mbuf_defrag_attempts++;
5222 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5223
5224 m0 = m_defrag(*m_head, M_NOWAIT);
5225 if (m0 == NULL) {
5226 fp->eth_q_stats.mbuf_defrag_failures++;
5227 /* Ugh, just drop the frame... :( */
5228 rc = ENOBUFS;
5229 } else {
5230 /* defrag successful, try mapping again */
5231 *m_head = m0;
5232 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5233 tx_buf->m_map, m0,
5234 segs, &nsegs, BUS_DMA_NOWAIT);
5235 if (error) {
5236 fp->eth_q_stats.tx_dma_mapping_failure++;
5237 /* No sense in trying to defrag/copy chain, drop it. :( */
5238 rc = error;
5239 } else {
5240 /* if the chain is still too long then drop it */
5241 if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5242 /*
5243 * in case TSO is enabled nsegs should be checked against
5244 * BXE_TSO_MAX_SEGMENTS
5245 */
5246 if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5247 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5248 fp->eth_q_stats.nsegs_path1_errors++;
5249 rc = ENODEV;
5250 }
5251 } else {
5252 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5253 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5254 fp->eth_q_stats.nsegs_path2_errors++;
5255 rc = ENODEV;
5256 }
5257 }
5258 }
5259 }
5260 }
5261
5262 bxe_tx_encap_continue:
5263
5264 /* Check for errors */
5265 if (rc) {
5266 if (rc == ENOMEM) {
5267 /* recoverable try again later */
5268 } else {
5269 fp->eth_q_stats.tx_soft_errors++;
5270 fp->eth_q_stats.mbuf_alloc_tx--;
5271 m_freem(*m_head);
5272 *m_head = NULL;
5273 }
5274
5275 return (rc);
5276 }
5277
5278 /* set flag according to packet type (UNICAST_ADDRESS is default) */
5279 if (m0->m_flags & M_BCAST) {
5280 mac_type = BROADCAST_ADDRESS;
5281 } else if (m0->m_flags & M_MCAST) {
5282 mac_type = MULTICAST_ADDRESS;
5283 }
5284
5285 /* store the mbuf into the mbuf ring */
5286 tx_buf->m = m0;
5287 tx_buf->first_bd = fp->tx_bd_prod;
5288 tx_buf->flags = 0;
5289
5290 /* prepare the first transmit (start) BD for the mbuf */
5291 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5292
5293 BLOGD(sc, DBG_TX,
5294 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5295 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5296
5297 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5298 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5299 tx_start_bd->nbytes = htole16(segs[0].ds_len);
5300 total_pkt_size += tx_start_bd->nbytes;
5301 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5302
5303 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5304
5305 /* all frames have at least Start BD + Parsing BD */
5306 nbds = nsegs + 1;
5307 tx_start_bd->nbd = htole16(nbds);
5308
5309 if (m0->m_flags & M_VLANTAG) {
5310 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5311 tx_start_bd->bd_flags.as_bitfield |=
5312 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5313 } else {
5314 /* vf tx, start bd must hold the ethertype for fw to enforce it */
5315 if (IS_VF(sc)) {
5316 /* map ethernet header to find type and header length */
5317 eh = mtod(m0, struct ether_vlan_header *);
5318 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5319 } else {
5320 /* used by FW for packet accounting */
5321 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5322 }
5323 }
5324
5325 /*
5326 * add a parsing BD from the chain. The parsing BD is always added
5327 * though it is only used for TSO and chksum
5328 */
5329 bd_prod = TX_BD_NEXT(bd_prod);
5330
5331 if (m0->m_pkthdr.csum_flags) {
5332 if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5333 fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5334 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5335 }
5336
5337 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5338 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5339 ETH_TX_BD_FLAGS_L4_CSUM);
5340 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5341 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5342 ETH_TX_BD_FLAGS_IS_UDP |
5343 ETH_TX_BD_FLAGS_L4_CSUM);
5344 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5345 (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5346 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5347 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5348 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5349 ETH_TX_BD_FLAGS_IS_UDP);
5350 }
5351 }
5352
5353 if (!CHIP_IS_E1x(sc)) {
5354 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5355 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5356
5357 if (m0->m_pkthdr.csum_flags) {
5358 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5359 }
5360
5361 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5362 mac_type);
5363 } else {
5364 uint16_t global_data = 0;
5365
5366 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5367 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5368
5369 if (m0->m_pkthdr.csum_flags) {
5370 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5371 }
5372
5373 SET_FLAG(global_data,
5374 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5375 pbd_e1x->global_data |= htole16(global_data);
5376 }
5377
5378 /* setup the parsing BD with TSO specific info */
5379 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5380 fp->eth_q_stats.tx_ofld_frames_lso++;
5381 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5382
5383 if (__predict_false(tx_start_bd->nbytes > hlen)) {
5384 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5385
5386 /* split the first BD into header/data making the fw job easy */
5387 nbds++;
5388 tx_start_bd->nbd = htole16(nbds);
5389 tx_start_bd->nbytes = htole16(hlen);
5390
5391 bd_prod = TX_BD_NEXT(bd_prod);
5392
5393 /* new transmit BD after the tx_parse_bd */
5394 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5395 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5396 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5397 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen);
5398 if (tx_total_pkt_size_bd == NULL) {
5399 tx_total_pkt_size_bd = tx_data_bd;
5400 }
5401
5402 BLOGD(sc, DBG_TX,
5403 "TSO split header size is %d (%x:%x) nbds %d\n",
5404 le16toh(tx_start_bd->nbytes),
5405 le32toh(tx_start_bd->addr_hi),
5406 le32toh(tx_start_bd->addr_lo),
5407 nbds);
5408 }
5409
5410 if (!CHIP_IS_E1x(sc)) {
5411 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5412 } else {
5413 bxe_set_pbd_lso(m0, pbd_e1x);
5414 }
5415 }
5416
5417 if (pbd_e2_parsing_data) {
5418 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5419 }
5420
5421 /* prepare remaining BDs, start tx bd contains first seg/frag */
5422 for (i = 1; i < nsegs ; i++) {
5423 bd_prod = TX_BD_NEXT(bd_prod);
5424 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5425 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5426 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5427 tx_data_bd->nbytes = htole16(segs[i].ds_len);
5428 if (tx_total_pkt_size_bd == NULL) {
5429 tx_total_pkt_size_bd = tx_data_bd;
5430 }
5431 total_pkt_size += tx_data_bd->nbytes;
5432 }
5433
5434 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5435
5436 if (tx_total_pkt_size_bd != NULL) {
5437 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5438 }
5439
5440 if (__predict_false(sc->debug & DBG_TX)) {
5441 tmp_bd = tx_buf->first_bd;
5442 for (i = 0; i < nbds; i++)
5443 {
5444 if (i == 0) {
5445 BLOGD(sc, DBG_TX,
5446 "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5447 "bd_flags=0x%x hdr_nbds=%d\n",
5448 tx_start_bd,
5449 tmp_bd,
5450 le16toh(tx_start_bd->nbd),
5451 le16toh(tx_start_bd->vlan_or_ethertype),
5452 tx_start_bd->bd_flags.as_bitfield,
5453 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5454 } else if (i == 1) {
5455 if (pbd_e1x) {
5456 BLOGD(sc, DBG_TX,
5457 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5458 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5459 "tcp_seq=%u total_hlen_w=%u\n",
5460 pbd_e1x,
5461 tmp_bd,
5462 pbd_e1x->global_data,
5463 pbd_e1x->ip_hlen_w,
5464 pbd_e1x->ip_id,
5465 pbd_e1x->lso_mss,
5466 pbd_e1x->tcp_flags,
5467 pbd_e1x->tcp_pseudo_csum,
5468 pbd_e1x->tcp_send_seq,
5469 le16toh(pbd_e1x->total_hlen_w));
5470 } else { /* if (pbd_e2) */
5471 BLOGD(sc, DBG_TX,
5472 "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5473 "src=%02x:%02x:%02x parsing_data=0x%x\n",
5474 pbd_e2,
5475 tmp_bd,
5476 pbd_e2->data.mac_addr.dst_hi,
5477 pbd_e2->data.mac_addr.dst_mid,
5478 pbd_e2->data.mac_addr.dst_lo,
5479 pbd_e2->data.mac_addr.src_hi,
5480 pbd_e2->data.mac_addr.src_mid,
5481 pbd_e2->data.mac_addr.src_lo,
5482 pbd_e2->parsing_data);
5483 }
5484 }
5485
5486 if (i != 1) { /* skip parse db as it doesn't hold data */
5487 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5488 BLOGD(sc, DBG_TX,
5489 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5490 tx_data_bd,
5491 tmp_bd,
5492 le16toh(tx_data_bd->nbytes),
5493 le32toh(tx_data_bd->addr_hi),
5494 le32toh(tx_data_bd->addr_lo));
5495 }
5496
5497 tmp_bd = TX_BD_NEXT(tmp_bd);
5498 }
5499 }
5500
5501 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5502
5503 /* update TX BD producer index value for next TX */
5504 bd_prod = TX_BD_NEXT(bd_prod);
5505
5506 /*
5507 * If the chain of tx_bd's describing this frame is adjacent to or spans
5508 * an eth_tx_next_bd element then we need to increment the nbds value.
5509 */
5510 if (TX_BD_IDX(bd_prod) < nbds) {
5511 nbds++;
5512 }
5513
5514 /* don't allow reordering of writes for nbd and packets */
5515 mb();
5516
5517 fp->tx_db.data.prod += nbds;
5518
5519 /* producer points to the next free tx_bd at this point */
5520 fp->tx_pkt_prod++;
5521 fp->tx_bd_prod = bd_prod;
5522
5523 DOORBELL(sc, fp->index, fp->tx_db.raw);
5524
5525 fp->eth_q_stats.tx_pkts++;
5526
5527 /* Prevent speculative reads from getting ahead of the status block. */
5528 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5529 0, 0, BUS_SPACE_BARRIER_READ);
5530
5531 /* Prevent speculative reads from getting ahead of the doorbell. */
5532 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5533 0, 0, BUS_SPACE_BARRIER_READ);
5534
5535 return (0);
5536 }
5537
5538 static void
bxe_tx_start_locked(struct bxe_softc * sc,if_t ifp,struct bxe_fastpath * fp)5539 bxe_tx_start_locked(struct bxe_softc *sc,
5540 if_t ifp,
5541 struct bxe_fastpath *fp)
5542 {
5543 struct mbuf *m = NULL;
5544 int tx_count = 0;
5545 uint16_t tx_bd_avail;
5546
5547 BXE_FP_TX_LOCK_ASSERT(fp);
5548
5549 /* keep adding entries while there are frames to send */
5550 while (!if_sendq_empty(ifp)) {
5551
5552 /*
5553 * check for any frames to send
5554 * dequeue can still be NULL even if queue is not empty
5555 */
5556 m = if_dequeue(ifp);
5557 if (__predict_false(m == NULL)) {
5558 break;
5559 }
5560
5561 /* the mbuf now belongs to us */
5562 fp->eth_q_stats.mbuf_alloc_tx++;
5563
5564 /*
5565 * Put the frame into the transmit ring. If we don't have room,
5566 * place the mbuf back at the head of the TX queue, set the
5567 * OACTIVE flag, and wait for the NIC to drain the chain.
5568 */
5569 if (__predict_false(bxe_tx_encap(fp, &m))) {
5570 fp->eth_q_stats.tx_encap_failures++;
5571 if (m != NULL) {
5572 /* mark the TX queue as full and return the frame */
5573 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5574 if_sendq_prepend(ifp, m);
5575 fp->eth_q_stats.mbuf_alloc_tx--;
5576 fp->eth_q_stats.tx_queue_xoff++;
5577 }
5578
5579 /* stop looking for more work */
5580 break;
5581 }
5582
5583 /* the frame was enqueued successfully */
5584 tx_count++;
5585
5586 /* send a copy of the frame to any BPF listeners. */
5587 if_etherbpfmtap(ifp, m);
5588
5589 tx_bd_avail = bxe_tx_avail(sc, fp);
5590
5591 /* handle any completions if we're running low */
5592 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5593 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5594 bxe_txeof(sc, fp);
5595 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5596 break;
5597 }
5598 }
5599 }
5600
5601 /* all TX packets were dequeued and/or the tx ring is full */
5602 if (tx_count > 0) {
5603 /* reset the TX watchdog timeout timer */
5604 fp->watchdog_timer = BXE_TX_TIMEOUT;
5605 }
5606 }
5607
5608 /* Legacy (non-RSS) dispatch routine */
5609 static void
bxe_tx_start(if_t ifp)5610 bxe_tx_start(if_t ifp)
5611 {
5612 struct bxe_softc *sc;
5613 struct bxe_fastpath *fp;
5614
5615 sc = if_getsoftc(ifp);
5616
5617 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5618 BLOGW(sc, "Interface not running, ignoring transmit request\n");
5619 return;
5620 }
5621
5622 if (!sc->link_vars.link_up) {
5623 BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5624 return;
5625 }
5626
5627 fp = &sc->fp[0];
5628
5629 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5630 fp->eth_q_stats.tx_queue_full_return++;
5631 return;
5632 }
5633
5634 BXE_FP_TX_LOCK(fp);
5635 bxe_tx_start_locked(sc, ifp, fp);
5636 BXE_FP_TX_UNLOCK(fp);
5637 }
5638
5639 static int
bxe_tx_mq_start_locked(struct bxe_softc * sc,if_t ifp,struct bxe_fastpath * fp,struct mbuf * m)5640 bxe_tx_mq_start_locked(struct bxe_softc *sc,
5641 if_t ifp,
5642 struct bxe_fastpath *fp,
5643 struct mbuf *m)
5644 {
5645 struct buf_ring *tx_br = fp->tx_br;
5646 struct mbuf *next;
5647 int depth, rc, tx_count;
5648 uint16_t tx_bd_avail;
5649
5650 rc = tx_count = 0;
5651
5652 BXE_FP_TX_LOCK_ASSERT(fp);
5653
5654 if (sc->state != BXE_STATE_OPEN) {
5655 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5656 return ENETDOWN;
5657 }
5658
5659 if (!tx_br) {
5660 BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5661 return (EINVAL);
5662 }
5663
5664 if (m != NULL) {
5665 rc = drbr_enqueue(ifp, tx_br, m);
5666 if (rc != 0) {
5667 fp->eth_q_stats.tx_soft_errors++;
5668 goto bxe_tx_mq_start_locked_exit;
5669 }
5670 }
5671
5672 if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5673 fp->eth_q_stats.tx_request_link_down_failures++;
5674 goto bxe_tx_mq_start_locked_exit;
5675 }
5676
5677 /* fetch the depth of the driver queue */
5678 depth = drbr_inuse_drv(ifp, tx_br);
5679 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5680 fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5681 }
5682
5683 /* keep adding entries while there are frames to send */
5684 while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5685 /* handle any completions if we're running low */
5686 tx_bd_avail = bxe_tx_avail(sc, fp);
5687 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5688 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5689 bxe_txeof(sc, fp);
5690 tx_bd_avail = bxe_tx_avail(sc, fp);
5691 if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5692 fp->eth_q_stats.bd_avail_too_less_failures++;
5693 m_freem(next);
5694 drbr_advance(ifp, tx_br);
5695 rc = ENOBUFS;
5696 break;
5697 }
5698 }
5699
5700 /* the mbuf now belongs to us */
5701 fp->eth_q_stats.mbuf_alloc_tx++;
5702
5703 /*
5704 * Put the frame into the transmit ring. If we don't have room,
5705 * place the mbuf back at the head of the TX queue, set the
5706 * OACTIVE flag, and wait for the NIC to drain the chain.
5707 */
5708 rc = bxe_tx_encap(fp, &next);
5709 if (__predict_false(rc != 0)) {
5710 fp->eth_q_stats.tx_encap_failures++;
5711 if (next != NULL) {
5712 /* mark the TX queue as full and save the frame */
5713 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5714 drbr_putback(ifp, tx_br, next);
5715 fp->eth_q_stats.mbuf_alloc_tx--;
5716 fp->eth_q_stats.tx_frames_deferred++;
5717 } else
5718 drbr_advance(ifp, tx_br);
5719
5720 /* stop looking for more work */
5721 break;
5722 }
5723
5724 /* the transmit frame was enqueued successfully */
5725 tx_count++;
5726
5727 /* send a copy of the frame to any BPF listeners */
5728 if_etherbpfmtap(ifp, next);
5729
5730 drbr_advance(ifp, tx_br);
5731 }
5732
5733 /* all TX packets were dequeued and/or the tx ring is full */
5734 if (tx_count > 0) {
5735 /* reset the TX watchdog timeout timer */
5736 fp->watchdog_timer = BXE_TX_TIMEOUT;
5737 }
5738
5739 bxe_tx_mq_start_locked_exit:
5740 /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5741 if (!drbr_empty(ifp, tx_br)) {
5742 fp->eth_q_stats.tx_mq_not_empty++;
5743 taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5744 }
5745
5746 return (rc);
5747 }
5748
5749 static void
bxe_tx_mq_start_deferred(void * arg,int pending)5750 bxe_tx_mq_start_deferred(void *arg,
5751 int pending)
5752 {
5753 struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5754 struct bxe_softc *sc = fp->sc;
5755 if_t ifp = sc->ifp;
5756
5757 BXE_FP_TX_LOCK(fp);
5758 bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5759 BXE_FP_TX_UNLOCK(fp);
5760 }
5761
5762 /* Multiqueue (TSS) dispatch routine. */
5763 static int
bxe_tx_mq_start(struct ifnet * ifp,struct mbuf * m)5764 bxe_tx_mq_start(struct ifnet *ifp,
5765 struct mbuf *m)
5766 {
5767 struct bxe_softc *sc = if_getsoftc(ifp);
5768 struct bxe_fastpath *fp;
5769 int fp_index, rc;
5770
5771 fp_index = 0; /* default is the first queue */
5772
5773 /* check if flowid is set */
5774
5775 if (BXE_VALID_FLOWID(m))
5776 fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5777
5778 fp = &sc->fp[fp_index];
5779
5780 if (sc->state != BXE_STATE_OPEN) {
5781 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5782 return ENETDOWN;
5783 }
5784
5785 if (BXE_FP_TX_TRYLOCK(fp)) {
5786 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5787 BXE_FP_TX_UNLOCK(fp);
5788 } else {
5789 rc = drbr_enqueue(ifp, fp->tx_br, m);
5790 taskqueue_enqueue(fp->tq, &fp->tx_task);
5791 }
5792
5793 return (rc);
5794 }
5795
5796 static void
bxe_mq_flush(struct ifnet * ifp)5797 bxe_mq_flush(struct ifnet *ifp)
5798 {
5799 struct bxe_softc *sc = if_getsoftc(ifp);
5800 struct bxe_fastpath *fp;
5801 struct mbuf *m;
5802 int i;
5803
5804 for (i = 0; i < sc->num_queues; i++) {
5805 fp = &sc->fp[i];
5806
5807 if (fp->state != BXE_FP_STATE_IRQ) {
5808 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5809 fp->index, fp->state);
5810 continue;
5811 }
5812
5813 if (fp->tx_br != NULL) {
5814 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5815 BXE_FP_TX_LOCK(fp);
5816 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5817 m_freem(m);
5818 }
5819 BXE_FP_TX_UNLOCK(fp);
5820 }
5821 }
5822
5823 if_qflush(ifp);
5824 }
5825
5826 static uint16_t
bxe_cid_ilt_lines(struct bxe_softc * sc)5827 bxe_cid_ilt_lines(struct bxe_softc *sc)
5828 {
5829 if (IS_SRIOV(sc)) {
5830 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5831 }
5832 return (L2_ILT_LINES(sc));
5833 }
5834
5835 static void
bxe_ilt_set_info(struct bxe_softc * sc)5836 bxe_ilt_set_info(struct bxe_softc *sc)
5837 {
5838 struct ilt_client_info *ilt_client;
5839 struct ecore_ilt *ilt = sc->ilt;
5840 uint16_t line = 0;
5841
5842 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5843 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5844
5845 /* CDU */
5846 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5847 ilt_client->client_num = ILT_CLIENT_CDU;
5848 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5849 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5850 ilt_client->start = line;
5851 line += bxe_cid_ilt_lines(sc);
5852
5853 if (CNIC_SUPPORT(sc)) {
5854 line += CNIC_ILT_LINES;
5855 }
5856
5857 ilt_client->end = (line - 1);
5858
5859 BLOGD(sc, DBG_LOAD,
5860 "ilt client[CDU]: start %d, end %d, "
5861 "psz 0x%x, flags 0x%x, hw psz %d\n",
5862 ilt_client->start, ilt_client->end,
5863 ilt_client->page_size,
5864 ilt_client->flags,
5865 ilog2(ilt_client->page_size >> 12));
5866
5867 /* QM */
5868 if (QM_INIT(sc->qm_cid_count)) {
5869 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5870 ilt_client->client_num = ILT_CLIENT_QM;
5871 ilt_client->page_size = QM_ILT_PAGE_SZ;
5872 ilt_client->flags = 0;
5873 ilt_client->start = line;
5874
5875 /* 4 bytes for each cid */
5876 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5877 QM_ILT_PAGE_SZ);
5878
5879 ilt_client->end = (line - 1);
5880
5881 BLOGD(sc, DBG_LOAD,
5882 "ilt client[QM]: start %d, end %d, "
5883 "psz 0x%x, flags 0x%x, hw psz %d\n",
5884 ilt_client->start, ilt_client->end,
5885 ilt_client->page_size, ilt_client->flags,
5886 ilog2(ilt_client->page_size >> 12));
5887 }
5888
5889 if (CNIC_SUPPORT(sc)) {
5890 /* SRC */
5891 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5892 ilt_client->client_num = ILT_CLIENT_SRC;
5893 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5894 ilt_client->flags = 0;
5895 ilt_client->start = line;
5896 line += SRC_ILT_LINES;
5897 ilt_client->end = (line - 1);
5898
5899 BLOGD(sc, DBG_LOAD,
5900 "ilt client[SRC]: start %d, end %d, "
5901 "psz 0x%x, flags 0x%x, hw psz %d\n",
5902 ilt_client->start, ilt_client->end,
5903 ilt_client->page_size, ilt_client->flags,
5904 ilog2(ilt_client->page_size >> 12));
5905
5906 /* TM */
5907 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5908 ilt_client->client_num = ILT_CLIENT_TM;
5909 ilt_client->page_size = TM_ILT_PAGE_SZ;
5910 ilt_client->flags = 0;
5911 ilt_client->start = line;
5912 line += TM_ILT_LINES;
5913 ilt_client->end = (line - 1);
5914
5915 BLOGD(sc, DBG_LOAD,
5916 "ilt client[TM]: start %d, end %d, "
5917 "psz 0x%x, flags 0x%x, hw psz %d\n",
5918 ilt_client->start, ilt_client->end,
5919 ilt_client->page_size, ilt_client->flags,
5920 ilog2(ilt_client->page_size >> 12));
5921 }
5922
5923 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5924 }
5925
5926 static void
bxe_set_fp_rx_buf_size(struct bxe_softc * sc)5927 bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5928 {
5929 int i;
5930 uint32_t rx_buf_size;
5931
5932 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5933
5934 for (i = 0; i < sc->num_queues; i++) {
5935 if(rx_buf_size <= MCLBYTES){
5936 sc->fp[i].rx_buf_size = rx_buf_size;
5937 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5938 }else if (rx_buf_size <= MJUMPAGESIZE){
5939 sc->fp[i].rx_buf_size = rx_buf_size;
5940 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5941 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5942 sc->fp[i].rx_buf_size = MCLBYTES;
5943 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5944 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5945 sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5946 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5947 }else {
5948 sc->fp[i].rx_buf_size = MCLBYTES;
5949 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5950 }
5951 }
5952 }
5953
5954 static int
bxe_alloc_ilt_mem(struct bxe_softc * sc)5955 bxe_alloc_ilt_mem(struct bxe_softc *sc)
5956 {
5957 int rc = 0;
5958
5959 if ((sc->ilt =
5960 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5961 M_BXE_ILT,
5962 (M_NOWAIT | M_ZERO))) == NULL) {
5963 rc = 1;
5964 }
5965
5966 return (rc);
5967 }
5968
5969 static int
bxe_alloc_ilt_lines_mem(struct bxe_softc * sc)5970 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5971 {
5972 int rc = 0;
5973
5974 if ((sc->ilt->lines =
5975 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5976 M_BXE_ILT,
5977 (M_NOWAIT | M_ZERO))) == NULL) {
5978 rc = 1;
5979 }
5980
5981 return (rc);
5982 }
5983
5984 static void
bxe_free_ilt_mem(struct bxe_softc * sc)5985 bxe_free_ilt_mem(struct bxe_softc *sc)
5986 {
5987 if (sc->ilt != NULL) {
5988 free(sc->ilt, M_BXE_ILT);
5989 sc->ilt = NULL;
5990 }
5991 }
5992
5993 static void
bxe_free_ilt_lines_mem(struct bxe_softc * sc)5994 bxe_free_ilt_lines_mem(struct bxe_softc *sc)
5995 {
5996 if (sc->ilt->lines != NULL) {
5997 free(sc->ilt->lines, M_BXE_ILT);
5998 sc->ilt->lines = NULL;
5999 }
6000 }
6001
6002 static void
bxe_free_mem(struct bxe_softc * sc)6003 bxe_free_mem(struct bxe_softc *sc)
6004 {
6005 int i;
6006
6007 for (i = 0; i < L2_ILT_LINES(sc); i++) {
6008 bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6009 sc->context[i].vcxt = NULL;
6010 sc->context[i].size = 0;
6011 }
6012
6013 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6014
6015 bxe_free_ilt_lines_mem(sc);
6016
6017 }
6018
6019 static int
bxe_alloc_mem(struct bxe_softc * sc)6020 bxe_alloc_mem(struct bxe_softc *sc)
6021 {
6022
6023 int context_size;
6024 int allocated;
6025 int i;
6026
6027 /*
6028 * Allocate memory for CDU context:
6029 * This memory is allocated separately and not in the generic ILT
6030 * functions because CDU differs in few aspects:
6031 * 1. There can be multiple entities allocating memory for context -
6032 * regular L2, CNIC, and SRIOV drivers. Each separately controls
6033 * its own ILT lines.
6034 * 2. Since CDU page-size is not a single 4KB page (which is the case
6035 * for the other ILT clients), to be efficient we want to support
6036 * allocation of sub-page-size in the last entry.
6037 * 3. Context pointers are used by the driver to pass to FW / update
6038 * the context (for the other ILT clients the pointers are used just to
6039 * free the memory during unload).
6040 */
6041 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6042 for (i = 0, allocated = 0; allocated < context_size; i++) {
6043 sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6044 (context_size - allocated));
6045
6046 if (bxe_dma_alloc(sc, sc->context[i].size,
6047 &sc->context[i].vcxt_dma,
6048 "cdu context") != 0) {
6049 bxe_free_mem(sc);
6050 return (-1);
6051 }
6052
6053 sc->context[i].vcxt =
6054 (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6055
6056 allocated += sc->context[i].size;
6057 }
6058
6059 bxe_alloc_ilt_lines_mem(sc);
6060
6061 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6062 sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6063 {
6064 for (i = 0; i < 4; i++) {
6065 BLOGD(sc, DBG_LOAD,
6066 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6067 i,
6068 sc->ilt->clients[i].page_size,
6069 sc->ilt->clients[i].start,
6070 sc->ilt->clients[i].end,
6071 sc->ilt->clients[i].client_num,
6072 sc->ilt->clients[i].flags);
6073 }
6074 }
6075 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6076 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6077 bxe_free_mem(sc);
6078 return (-1);
6079 }
6080
6081 return (0);
6082 }
6083
6084 static void
bxe_free_rx_bd_chain(struct bxe_fastpath * fp)6085 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6086 {
6087 struct bxe_softc *sc;
6088 int i;
6089
6090 sc = fp->sc;
6091
6092 if (fp->rx_mbuf_tag == NULL) {
6093 return;
6094 }
6095
6096 /* free all mbufs and unload all maps */
6097 for (i = 0; i < RX_BD_TOTAL; i++) {
6098 if (fp->rx_mbuf_chain[i].m_map != NULL) {
6099 bus_dmamap_sync(fp->rx_mbuf_tag,
6100 fp->rx_mbuf_chain[i].m_map,
6101 BUS_DMASYNC_POSTREAD);
6102 bus_dmamap_unload(fp->rx_mbuf_tag,
6103 fp->rx_mbuf_chain[i].m_map);
6104 }
6105
6106 if (fp->rx_mbuf_chain[i].m != NULL) {
6107 m_freem(fp->rx_mbuf_chain[i].m);
6108 fp->rx_mbuf_chain[i].m = NULL;
6109 fp->eth_q_stats.mbuf_alloc_rx--;
6110 }
6111 }
6112 }
6113
6114 static void
bxe_free_tpa_pool(struct bxe_fastpath * fp)6115 bxe_free_tpa_pool(struct bxe_fastpath *fp)
6116 {
6117 struct bxe_softc *sc;
6118 int i, max_agg_queues;
6119
6120 sc = fp->sc;
6121
6122 if (fp->rx_mbuf_tag == NULL) {
6123 return;
6124 }
6125
6126 max_agg_queues = MAX_AGG_QS(sc);
6127
6128 /* release all mbufs and unload all DMA maps in the TPA pool */
6129 for (i = 0; i < max_agg_queues; i++) {
6130 if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6131 bus_dmamap_sync(fp->rx_mbuf_tag,
6132 fp->rx_tpa_info[i].bd.m_map,
6133 BUS_DMASYNC_POSTREAD);
6134 bus_dmamap_unload(fp->rx_mbuf_tag,
6135 fp->rx_tpa_info[i].bd.m_map);
6136 }
6137
6138 if (fp->rx_tpa_info[i].bd.m != NULL) {
6139 m_freem(fp->rx_tpa_info[i].bd.m);
6140 fp->rx_tpa_info[i].bd.m = NULL;
6141 fp->eth_q_stats.mbuf_alloc_tpa--;
6142 }
6143 }
6144 }
6145
6146 static void
bxe_free_sge_chain(struct bxe_fastpath * fp)6147 bxe_free_sge_chain(struct bxe_fastpath *fp)
6148 {
6149 struct bxe_softc *sc;
6150 int i;
6151
6152 sc = fp->sc;
6153
6154 if (fp->rx_sge_mbuf_tag == NULL) {
6155 return;
6156 }
6157
6158 /* rree all mbufs and unload all maps */
6159 for (i = 0; i < RX_SGE_TOTAL; i++) {
6160 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6161 bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6162 fp->rx_sge_mbuf_chain[i].m_map,
6163 BUS_DMASYNC_POSTREAD);
6164 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6165 fp->rx_sge_mbuf_chain[i].m_map);
6166 }
6167
6168 if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6169 m_freem(fp->rx_sge_mbuf_chain[i].m);
6170 fp->rx_sge_mbuf_chain[i].m = NULL;
6171 fp->eth_q_stats.mbuf_alloc_sge--;
6172 }
6173 }
6174 }
6175
6176 static void
bxe_free_fp_buffers(struct bxe_softc * sc)6177 bxe_free_fp_buffers(struct bxe_softc *sc)
6178 {
6179 struct bxe_fastpath *fp;
6180 int i;
6181
6182 for (i = 0; i < sc->num_queues; i++) {
6183 fp = &sc->fp[i];
6184
6185 if (fp->tx_br != NULL) {
6186 /* just in case bxe_mq_flush() wasn't called */
6187 if (mtx_initialized(&fp->tx_mtx)) {
6188 struct mbuf *m;
6189
6190 BXE_FP_TX_LOCK(fp);
6191 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6192 m_freem(m);
6193 BXE_FP_TX_UNLOCK(fp);
6194 }
6195 }
6196
6197 /* free all RX buffers */
6198 bxe_free_rx_bd_chain(fp);
6199 bxe_free_tpa_pool(fp);
6200 bxe_free_sge_chain(fp);
6201
6202 if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6203 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6204 fp->eth_q_stats.mbuf_alloc_rx);
6205 }
6206
6207 if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6208 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6209 fp->eth_q_stats.mbuf_alloc_sge);
6210 }
6211
6212 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6213 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6214 fp->eth_q_stats.mbuf_alloc_tpa);
6215 }
6216
6217 if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6218 BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6219 fp->eth_q_stats.mbuf_alloc_tx);
6220 }
6221
6222 /* XXX verify all mbufs were reclaimed */
6223 }
6224 }
6225
6226 static int
bxe_alloc_rx_bd_mbuf(struct bxe_fastpath * fp,uint16_t prev_index,uint16_t index)6227 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6228 uint16_t prev_index,
6229 uint16_t index)
6230 {
6231 struct bxe_sw_rx_bd *rx_buf;
6232 struct eth_rx_bd *rx_bd;
6233 bus_dma_segment_t segs[1];
6234 bus_dmamap_t map;
6235 struct mbuf *m;
6236 int nsegs, rc;
6237
6238 rc = 0;
6239
6240 /* allocate the new RX BD mbuf */
6241 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6242 if (__predict_false(m == NULL)) {
6243 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6244 return (ENOBUFS);
6245 }
6246
6247 fp->eth_q_stats.mbuf_alloc_rx++;
6248
6249 /* initialize the mbuf buffer length */
6250 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6251
6252 /* map the mbuf into non-paged pool */
6253 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6254 fp->rx_mbuf_spare_map,
6255 m, segs, &nsegs, BUS_DMA_NOWAIT);
6256 if (__predict_false(rc != 0)) {
6257 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6258 m_freem(m);
6259 fp->eth_q_stats.mbuf_alloc_rx--;
6260 return (rc);
6261 }
6262
6263 /* all mbufs must map to a single segment */
6264 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6265
6266 /* release any existing RX BD mbuf mappings */
6267
6268 if (prev_index != index) {
6269 rx_buf = &fp->rx_mbuf_chain[prev_index];
6270
6271 if (rx_buf->m_map != NULL) {
6272 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6273 BUS_DMASYNC_POSTREAD);
6274 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6275 }
6276
6277 /*
6278 * We only get here from bxe_rxeof() when the maximum number
6279 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6280 * holds the mbuf in the prev_index so it's OK to NULL it out
6281 * here without concern of a memory leak.
6282 */
6283 fp->rx_mbuf_chain[prev_index].m = NULL;
6284 }
6285
6286 rx_buf = &fp->rx_mbuf_chain[index];
6287
6288 if (rx_buf->m_map != NULL) {
6289 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6290 BUS_DMASYNC_POSTREAD);
6291 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6292 }
6293
6294 /* save the mbuf and mapping info for a future packet */
6295 map = (prev_index != index) ?
6296 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6297 rx_buf->m_map = fp->rx_mbuf_spare_map;
6298 fp->rx_mbuf_spare_map = map;
6299 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6300 BUS_DMASYNC_PREREAD);
6301 rx_buf->m = m;
6302
6303 rx_bd = &fp->rx_chain[index];
6304 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6305 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6306
6307 return (rc);
6308 }
6309
6310 static int
bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath * fp,int queue)6311 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6312 int queue)
6313 {
6314 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6315 bus_dma_segment_t segs[1];
6316 bus_dmamap_t map;
6317 struct mbuf *m;
6318 int nsegs;
6319 int rc = 0;
6320
6321 /* allocate the new TPA mbuf */
6322 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6323 if (__predict_false(m == NULL)) {
6324 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6325 return (ENOBUFS);
6326 }
6327
6328 fp->eth_q_stats.mbuf_alloc_tpa++;
6329
6330 /* initialize the mbuf buffer length */
6331 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6332
6333 /* map the mbuf into non-paged pool */
6334 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6335 fp->rx_tpa_info_mbuf_spare_map,
6336 m, segs, &nsegs, BUS_DMA_NOWAIT);
6337 if (__predict_false(rc != 0)) {
6338 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6339 m_free(m);
6340 fp->eth_q_stats.mbuf_alloc_tpa--;
6341 return (rc);
6342 }
6343
6344 /* all mbufs must map to a single segment */
6345 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6346
6347 /* release any existing TPA mbuf mapping */
6348 if (tpa_info->bd.m_map != NULL) {
6349 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6350 BUS_DMASYNC_POSTREAD);
6351 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6352 }
6353
6354 /* save the mbuf and mapping info for the TPA mbuf */
6355 map = tpa_info->bd.m_map;
6356 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6357 fp->rx_tpa_info_mbuf_spare_map = map;
6358 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6359 BUS_DMASYNC_PREREAD);
6360 tpa_info->bd.m = m;
6361 tpa_info->seg = segs[0];
6362
6363 return (rc);
6364 }
6365
6366 /*
6367 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6368 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6369 * chain.
6370 */
6371 static int
bxe_alloc_rx_sge_mbuf(struct bxe_fastpath * fp,uint16_t index)6372 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6373 uint16_t index)
6374 {
6375 struct bxe_sw_rx_bd *sge_buf;
6376 struct eth_rx_sge *sge;
6377 bus_dma_segment_t segs[1];
6378 bus_dmamap_t map;
6379 struct mbuf *m;
6380 int nsegs;
6381 int rc = 0;
6382
6383 /* allocate a new SGE mbuf */
6384 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6385 if (__predict_false(m == NULL)) {
6386 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6387 return (ENOMEM);
6388 }
6389
6390 fp->eth_q_stats.mbuf_alloc_sge++;
6391
6392 /* initialize the mbuf buffer length */
6393 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6394
6395 /* map the SGE mbuf into non-paged pool */
6396 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6397 fp->rx_sge_mbuf_spare_map,
6398 m, segs, &nsegs, BUS_DMA_NOWAIT);
6399 if (__predict_false(rc != 0)) {
6400 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6401 m_freem(m);
6402 fp->eth_q_stats.mbuf_alloc_sge--;
6403 return (rc);
6404 }
6405
6406 /* all mbufs must map to a single segment */
6407 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6408
6409 sge_buf = &fp->rx_sge_mbuf_chain[index];
6410
6411 /* release any existing SGE mbuf mapping */
6412 if (sge_buf->m_map != NULL) {
6413 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6414 BUS_DMASYNC_POSTREAD);
6415 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6416 }
6417
6418 /* save the mbuf and mapping info for a future packet */
6419 map = sge_buf->m_map;
6420 sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6421 fp->rx_sge_mbuf_spare_map = map;
6422 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6423 BUS_DMASYNC_PREREAD);
6424 sge_buf->m = m;
6425
6426 sge = &fp->rx_sge_chain[index];
6427 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6428 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6429
6430 return (rc);
6431 }
6432
6433 static __noinline int
bxe_alloc_fp_buffers(struct bxe_softc * sc)6434 bxe_alloc_fp_buffers(struct bxe_softc *sc)
6435 {
6436 struct bxe_fastpath *fp;
6437 int i, j, rc = 0;
6438 int ring_prod, cqe_ring_prod;
6439 int max_agg_queues;
6440
6441 for (i = 0; i < sc->num_queues; i++) {
6442 fp = &sc->fp[i];
6443
6444 ring_prod = cqe_ring_prod = 0;
6445 fp->rx_bd_cons = 0;
6446 fp->rx_cq_cons = 0;
6447
6448 /* allocate buffers for the RX BDs in RX BD chain */
6449 for (j = 0; j < sc->max_rx_bufs; j++) {
6450 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6451 if (rc != 0) {
6452 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6453 i, rc);
6454 goto bxe_alloc_fp_buffers_error;
6455 }
6456
6457 ring_prod = RX_BD_NEXT(ring_prod);
6458 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6459 }
6460
6461 fp->rx_bd_prod = ring_prod;
6462 fp->rx_cq_prod = cqe_ring_prod;
6463 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6464
6465 max_agg_queues = MAX_AGG_QS(sc);
6466
6467 fp->tpa_enable = TRUE;
6468
6469 /* fill the TPA pool */
6470 for (j = 0; j < max_agg_queues; j++) {
6471 rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6472 if (rc != 0) {
6473 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6474 i, j);
6475 fp->tpa_enable = FALSE;
6476 goto bxe_alloc_fp_buffers_error;
6477 }
6478
6479 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6480 }
6481
6482 if (fp->tpa_enable) {
6483 /* fill the RX SGE chain */
6484 ring_prod = 0;
6485 for (j = 0; j < RX_SGE_USABLE; j++) {
6486 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6487 if (rc != 0) {
6488 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6489 i, ring_prod);
6490 fp->tpa_enable = FALSE;
6491 ring_prod = 0;
6492 goto bxe_alloc_fp_buffers_error;
6493 }
6494
6495 ring_prod = RX_SGE_NEXT(ring_prod);
6496 }
6497
6498 fp->rx_sge_prod = ring_prod;
6499 }
6500 }
6501
6502 return (0);
6503
6504 bxe_alloc_fp_buffers_error:
6505
6506 /* unwind what was already allocated */
6507 bxe_free_rx_bd_chain(fp);
6508 bxe_free_tpa_pool(fp);
6509 bxe_free_sge_chain(fp);
6510
6511 return (ENOBUFS);
6512 }
6513
6514 static void
bxe_free_fw_stats_mem(struct bxe_softc * sc)6515 bxe_free_fw_stats_mem(struct bxe_softc *sc)
6516 {
6517 bxe_dma_free(sc, &sc->fw_stats_dma);
6518
6519 sc->fw_stats_num = 0;
6520
6521 sc->fw_stats_req_size = 0;
6522 sc->fw_stats_req = NULL;
6523 sc->fw_stats_req_mapping = 0;
6524
6525 sc->fw_stats_data_size = 0;
6526 sc->fw_stats_data = NULL;
6527 sc->fw_stats_data_mapping = 0;
6528 }
6529
6530 static int
bxe_alloc_fw_stats_mem(struct bxe_softc * sc)6531 bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6532 {
6533 uint8_t num_queue_stats;
6534 int num_groups;
6535
6536 /* number of queues for statistics is number of eth queues */
6537 num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6538
6539 /*
6540 * Total number of FW statistics requests =
6541 * 1 for port stats + 1 for PF stats + num of queues
6542 */
6543 sc->fw_stats_num = (2 + num_queue_stats);
6544
6545 /*
6546 * Request is built from stats_query_header and an array of
6547 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6548 * rules. The real number or requests is configured in the
6549 * stats_query_header.
6550 */
6551 num_groups =
6552 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6553 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6554
6555 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6556 sc->fw_stats_num, num_groups);
6557
6558 sc->fw_stats_req_size =
6559 (sizeof(struct stats_query_header) +
6560 (num_groups * sizeof(struct stats_query_cmd_group)));
6561
6562 /*
6563 * Data for statistics requests + stats_counter.
6564 * stats_counter holds per-STORM counters that are incremented when
6565 * STORM has finished with the current request. Memory for FCoE
6566 * offloaded statistics are counted anyway, even if they will not be sent.
6567 * VF stats are not accounted for here as the data of VF stats is stored
6568 * in memory allocated by the VF, not here.
6569 */
6570 sc->fw_stats_data_size =
6571 (sizeof(struct stats_counter) +
6572 sizeof(struct per_port_stats) +
6573 sizeof(struct per_pf_stats) +
6574 /* sizeof(struct fcoe_statistics_params) + */
6575 (sizeof(struct per_queue_stats) * num_queue_stats));
6576
6577 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6578 &sc->fw_stats_dma, "fw stats") != 0) {
6579 bxe_free_fw_stats_mem(sc);
6580 return (-1);
6581 }
6582
6583 /* set up the shortcuts */
6584
6585 sc->fw_stats_req =
6586 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6587 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6588
6589 sc->fw_stats_data =
6590 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6591 sc->fw_stats_req_size);
6592 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6593 sc->fw_stats_req_size);
6594
6595 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6596 (uintmax_t)sc->fw_stats_req_mapping);
6597
6598 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6599 (uintmax_t)sc->fw_stats_data_mapping);
6600
6601 return (0);
6602 }
6603
6604 /*
6605 * Bits map:
6606 * 0-7 - Engine0 load counter.
6607 * 8-15 - Engine1 load counter.
6608 * 16 - Engine0 RESET_IN_PROGRESS bit.
6609 * 17 - Engine1 RESET_IN_PROGRESS bit.
6610 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
6611 * function on the engine
6612 * 19 - Engine1 ONE_IS_LOADED.
6613 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
6614 * leader to complete (check for both RESET_IN_PROGRESS bits and not
6615 * for just the one belonging to its engine).
6616 */
6617 #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
6618 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff
6619 #define BXE_PATH0_LOAD_CNT_SHIFT 0
6620 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00
6621 #define BXE_PATH1_LOAD_CNT_SHIFT 8
6622 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6623 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6624 #define BXE_GLOBAL_RESET_BIT 0x00040000
6625
6626 /* set the GLOBAL_RESET bit, should be run under rtnl lock */
6627 static void
bxe_set_reset_global(struct bxe_softc * sc)6628 bxe_set_reset_global(struct bxe_softc *sc)
6629 {
6630 uint32_t val;
6631 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6632 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6633 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6634 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6635 }
6636
6637 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6638 static void
bxe_clear_reset_global(struct bxe_softc * sc)6639 bxe_clear_reset_global(struct bxe_softc *sc)
6640 {
6641 uint32_t val;
6642 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6643 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6644 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6645 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6646 }
6647
6648 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6649 static uint8_t
bxe_reset_is_global(struct bxe_softc * sc)6650 bxe_reset_is_global(struct bxe_softc *sc)
6651 {
6652 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6653 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6654 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6655 }
6656
6657 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6658 static void
bxe_set_reset_done(struct bxe_softc * sc)6659 bxe_set_reset_done(struct bxe_softc *sc)
6660 {
6661 uint32_t val;
6662 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6663 BXE_PATH0_RST_IN_PROG_BIT;
6664
6665 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6666
6667 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6668 /* Clear the bit */
6669 val &= ~bit;
6670 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6671
6672 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6673 }
6674
6675 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6676 static void
bxe_set_reset_in_progress(struct bxe_softc * sc)6677 bxe_set_reset_in_progress(struct bxe_softc *sc)
6678 {
6679 uint32_t val;
6680 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6681 BXE_PATH0_RST_IN_PROG_BIT;
6682
6683 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6684
6685 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6686 /* Set the bit */
6687 val |= bit;
6688 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6689
6690 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6691 }
6692
6693 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6694 static uint8_t
bxe_reset_is_done(struct bxe_softc * sc,int engine)6695 bxe_reset_is_done(struct bxe_softc *sc,
6696 int engine)
6697 {
6698 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6699 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6700 BXE_PATH0_RST_IN_PROG_BIT;
6701
6702 /* return false if bit is set */
6703 return (val & bit) ? FALSE : TRUE;
6704 }
6705
6706 /* get the load status for an engine, should be run under rtnl lock */
6707 static uint8_t
bxe_get_load_status(struct bxe_softc * sc,int engine)6708 bxe_get_load_status(struct bxe_softc *sc,
6709 int engine)
6710 {
6711 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6712 BXE_PATH0_LOAD_CNT_MASK;
6713 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6714 BXE_PATH0_LOAD_CNT_SHIFT;
6715 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6716
6717 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6718
6719 val = ((val & mask) >> shift);
6720
6721 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6722
6723 return (val != 0);
6724 }
6725
6726 /* set pf load mark */
6727 /* XXX needs to be under rtnl lock */
6728 static void
bxe_set_pf_load(struct bxe_softc * sc)6729 bxe_set_pf_load(struct bxe_softc *sc)
6730 {
6731 uint32_t val;
6732 uint32_t val1;
6733 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6734 BXE_PATH0_LOAD_CNT_MASK;
6735 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6736 BXE_PATH0_LOAD_CNT_SHIFT;
6737
6738 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6739
6740 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6741 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6742
6743 /* get the current counter value */
6744 val1 = ((val & mask) >> shift);
6745
6746 /* set bit of this PF */
6747 val1 |= (1 << SC_ABS_FUNC(sc));
6748
6749 /* clear the old value */
6750 val &= ~mask;
6751
6752 /* set the new one */
6753 val |= ((val1 << shift) & mask);
6754
6755 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6756
6757 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6758 }
6759
6760 /* clear pf load mark */
6761 /* XXX needs to be under rtnl lock */
6762 static uint8_t
bxe_clear_pf_load(struct bxe_softc * sc)6763 bxe_clear_pf_load(struct bxe_softc *sc)
6764 {
6765 uint32_t val1, val;
6766 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6767 BXE_PATH0_LOAD_CNT_MASK;
6768 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6769 BXE_PATH0_LOAD_CNT_SHIFT;
6770
6771 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6772 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6773 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6774
6775 /* get the current counter value */
6776 val1 = (val & mask) >> shift;
6777
6778 /* clear bit of that PF */
6779 val1 &= ~(1 << SC_ABS_FUNC(sc));
6780
6781 /* clear the old value */
6782 val &= ~mask;
6783
6784 /* set the new one */
6785 val |= ((val1 << shift) & mask);
6786
6787 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6788 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6789 return (val1 != 0);
6790 }
6791
6792 /* send load requrest to mcp and analyze response */
6793 static int
bxe_nic_load_request(struct bxe_softc * sc,uint32_t * load_code)6794 bxe_nic_load_request(struct bxe_softc *sc,
6795 uint32_t *load_code)
6796 {
6797 /* init fw_seq */
6798 sc->fw_seq =
6799 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6800 DRV_MSG_SEQ_NUMBER_MASK);
6801
6802 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6803
6804 /* get the current FW pulse sequence */
6805 sc->fw_drv_pulse_wr_seq =
6806 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6807 DRV_PULSE_SEQ_MASK);
6808
6809 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6810 sc->fw_drv_pulse_wr_seq);
6811
6812 /* load request */
6813 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6814 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6815
6816 /* if the MCP fails to respond we must abort */
6817 if (!(*load_code)) {
6818 BLOGE(sc, "MCP response failure!\n");
6819 return (-1);
6820 }
6821
6822 /* if MCP refused then must abort */
6823 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6824 BLOGE(sc, "MCP refused load request\n");
6825 return (-1);
6826 }
6827
6828 return (0);
6829 }
6830
6831 /*
6832 * Check whether another PF has already loaded FW to chip. In virtualized
6833 * environments a pf from anoth VM may have already initialized the device
6834 * including loading FW.
6835 */
6836 static int
bxe_nic_load_analyze_req(struct bxe_softc * sc,uint32_t load_code)6837 bxe_nic_load_analyze_req(struct bxe_softc *sc,
6838 uint32_t load_code)
6839 {
6840 uint32_t my_fw, loaded_fw;
6841
6842 /* is another pf loaded on this engine? */
6843 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6844 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6845 /* build my FW version dword */
6846 my_fw = (BCM_5710_FW_MAJOR_VERSION +
6847 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6848 (BCM_5710_FW_REVISION_VERSION << 16) +
6849 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6850
6851 /* read loaded FW from chip */
6852 loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6853 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6854 loaded_fw, my_fw);
6855
6856 /* abort nic load if version mismatch */
6857 if (my_fw != loaded_fw) {
6858 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6859 loaded_fw, my_fw);
6860 return (-1);
6861 }
6862 }
6863
6864 return (0);
6865 }
6866
6867 /* mark PMF if applicable */
6868 static void
bxe_nic_load_pmf(struct bxe_softc * sc,uint32_t load_code)6869 bxe_nic_load_pmf(struct bxe_softc *sc,
6870 uint32_t load_code)
6871 {
6872 uint32_t ncsi_oem_data_addr;
6873
6874 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6875 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6876 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6877 /*
6878 * Barrier here for ordering between the writing to sc->port.pmf here
6879 * and reading it from the periodic task.
6880 */
6881 sc->port.pmf = 1;
6882 mb();
6883 } else {
6884 sc->port.pmf = 0;
6885 }
6886
6887 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6888
6889 /* XXX needed? */
6890 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6891 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6892 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6893 if (ncsi_oem_data_addr) {
6894 REG_WR(sc,
6895 (ncsi_oem_data_addr +
6896 offsetof(struct glob_ncsi_oem_data, driver_version)),
6897 0);
6898 }
6899 }
6900 }
6901 }
6902
6903 static void
bxe_read_mf_cfg(struct bxe_softc * sc)6904 bxe_read_mf_cfg(struct bxe_softc *sc)
6905 {
6906 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6907 int abs_func;
6908 int vn;
6909
6910 if (BXE_NOMCP(sc)) {
6911 return; /* what should be the default bvalue in this case */
6912 }
6913
6914 /*
6915 * The formula for computing the absolute function number is...
6916 * For 2 port configuration (4 functions per port):
6917 * abs_func = 2 * vn + SC_PORT + SC_PATH
6918 * For 4 port configuration (2 functions per port):
6919 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6920 */
6921 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6922 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6923 if (abs_func >= E1H_FUNC_MAX) {
6924 break;
6925 }
6926 sc->devinfo.mf_info.mf_config[vn] =
6927 MFCFG_RD(sc, func_mf_config[abs_func].config);
6928 }
6929
6930 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6931 FUNC_MF_CFG_FUNC_DISABLED) {
6932 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6933 sc->flags |= BXE_MF_FUNC_DIS;
6934 } else {
6935 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6936 sc->flags &= ~BXE_MF_FUNC_DIS;
6937 }
6938 }
6939
6940 /* acquire split MCP access lock register */
bxe_acquire_alr(struct bxe_softc * sc)6941 static int bxe_acquire_alr(struct bxe_softc *sc)
6942 {
6943 uint32_t j, val;
6944
6945 for (j = 0; j < 1000; j++) {
6946 val = (1UL << 31);
6947 REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6948 val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6949 if (val & (1L << 31))
6950 break;
6951
6952 DELAY(5000);
6953 }
6954
6955 if (!(val & (1L << 31))) {
6956 BLOGE(sc, "Cannot acquire MCP access lock register\n");
6957 return (-1);
6958 }
6959
6960 return (0);
6961 }
6962
6963 /* release split MCP access lock register */
bxe_release_alr(struct bxe_softc * sc)6964 static void bxe_release_alr(struct bxe_softc *sc)
6965 {
6966 REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6967 }
6968
6969 static void
bxe_fan_failure(struct bxe_softc * sc)6970 bxe_fan_failure(struct bxe_softc *sc)
6971 {
6972 int port = SC_PORT(sc);
6973 uint32_t ext_phy_config;
6974
6975 /* mark the failure */
6976 ext_phy_config =
6977 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6978
6979 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6980 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6981 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6982 ext_phy_config);
6983
6984 /* log the failure */
6985 BLOGW(sc, "Fan Failure has caused the driver to shutdown "
6986 "the card to prevent permanent damage. "
6987 "Please contact OEM Support for assistance\n");
6988
6989 /* XXX */
6990 #if 1
6991 bxe_panic(sc, ("Schedule task to handle fan failure\n"));
6992 #else
6993 /*
6994 * Schedule device reset (unload)
6995 * This is due to some boards consuming sufficient power when driver is
6996 * up to overheat if fan fails.
6997 */
6998 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6999 schedule_delayed_work(&sc->sp_rtnl_task, 0);
7000 #endif
7001 }
7002
7003 /* this function is called upon a link interrupt */
7004 static void
bxe_link_attn(struct bxe_softc * sc)7005 bxe_link_attn(struct bxe_softc *sc)
7006 {
7007 uint32_t pause_enabled = 0;
7008 struct host_port_stats *pstats;
7009 int cmng_fns;
7010 struct bxe_fastpath *fp;
7011 int i;
7012
7013 /* Make sure that we are synced with the current statistics */
7014 bxe_stats_handle(sc, STATS_EVENT_STOP);
7015 BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7016 elink_link_update(&sc->link_params, &sc->link_vars);
7017
7018 if (sc->link_vars.link_up) {
7019
7020 /* dropless flow control */
7021 if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7022 pause_enabled = 0;
7023
7024 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7025 pause_enabled = 1;
7026 }
7027
7028 REG_WR(sc,
7029 (BAR_USTRORM_INTMEM +
7030 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7031 pause_enabled);
7032 }
7033
7034 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7035 pstats = BXE_SP(sc, port_stats);
7036 /* reset old mac stats */
7037 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7038 }
7039
7040 if (sc->state == BXE_STATE_OPEN) {
7041 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7042 /* Restart tx when the link comes back. */
7043 FOR_EACH_ETH_QUEUE(sc, i) {
7044 fp = &sc->fp[i];
7045 taskqueue_enqueue(fp->tq, &fp->tx_task);
7046 }
7047 }
7048
7049 }
7050
7051 if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7052 cmng_fns = bxe_get_cmng_fns_mode(sc);
7053
7054 if (cmng_fns != CMNG_FNS_NONE) {
7055 bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7056 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7057 } else {
7058 /* rate shaping and fairness are disabled */
7059 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7060 }
7061 }
7062
7063 bxe_link_report_locked(sc);
7064
7065 if (IS_MF(sc)) {
7066 ; // XXX bxe_link_sync_notify(sc);
7067 }
7068 }
7069
7070 static void
bxe_attn_int_asserted(struct bxe_softc * sc,uint32_t asserted)7071 bxe_attn_int_asserted(struct bxe_softc *sc,
7072 uint32_t asserted)
7073 {
7074 int port = SC_PORT(sc);
7075 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7076 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7077 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7078 NIG_REG_MASK_INTERRUPT_PORT0;
7079 uint32_t aeu_mask;
7080 uint32_t nig_mask = 0;
7081 uint32_t reg_addr;
7082 uint32_t igu_acked;
7083 uint32_t cnt;
7084
7085 if (sc->attn_state & asserted) {
7086 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7087 }
7088
7089 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7090
7091 aeu_mask = REG_RD(sc, aeu_addr);
7092
7093 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7094 aeu_mask, asserted);
7095
7096 aeu_mask &= ~(asserted & 0x3ff);
7097
7098 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7099
7100 REG_WR(sc, aeu_addr, aeu_mask);
7101
7102 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7103
7104 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7105 sc->attn_state |= asserted;
7106 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7107
7108 if (asserted & ATTN_HARD_WIRED_MASK) {
7109 if (asserted & ATTN_NIG_FOR_FUNC) {
7110
7111 bxe_acquire_phy_lock(sc);
7112 /* save nig interrupt mask */
7113 nig_mask = REG_RD(sc, nig_int_mask_addr);
7114
7115 /* If nig_mask is not set, no need to call the update function */
7116 if (nig_mask) {
7117 REG_WR(sc, nig_int_mask_addr, 0);
7118
7119 bxe_link_attn(sc);
7120 }
7121
7122 /* handle unicore attn? */
7123 }
7124
7125 if (asserted & ATTN_SW_TIMER_4_FUNC) {
7126 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7127 }
7128
7129 if (asserted & GPIO_2_FUNC) {
7130 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7131 }
7132
7133 if (asserted & GPIO_3_FUNC) {
7134 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7135 }
7136
7137 if (asserted & GPIO_4_FUNC) {
7138 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7139 }
7140
7141 if (port == 0) {
7142 if (asserted & ATTN_GENERAL_ATTN_1) {
7143 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7144 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7145 }
7146 if (asserted & ATTN_GENERAL_ATTN_2) {
7147 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7148 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7149 }
7150 if (asserted & ATTN_GENERAL_ATTN_3) {
7151 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7152 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7153 }
7154 } else {
7155 if (asserted & ATTN_GENERAL_ATTN_4) {
7156 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7157 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7158 }
7159 if (asserted & ATTN_GENERAL_ATTN_5) {
7160 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7161 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7162 }
7163 if (asserted & ATTN_GENERAL_ATTN_6) {
7164 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7165 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7166 }
7167 }
7168 } /* hardwired */
7169
7170 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7171 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7172 } else {
7173 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7174 }
7175
7176 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7177 asserted,
7178 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7179 REG_WR(sc, reg_addr, asserted);
7180
7181 /* now set back the mask */
7182 if (asserted & ATTN_NIG_FOR_FUNC) {
7183 /*
7184 * Verify that IGU ack through BAR was written before restoring
7185 * NIG mask. This loop should exit after 2-3 iterations max.
7186 */
7187 if (sc->devinfo.int_block != INT_BLOCK_HC) {
7188 cnt = 0;
7189
7190 do {
7191 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7192 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7193 (++cnt < MAX_IGU_ATTN_ACK_TO));
7194
7195 if (!igu_acked) {
7196 BLOGE(sc, "Failed to verify IGU ack on time\n");
7197 }
7198
7199 mb();
7200 }
7201
7202 REG_WR(sc, nig_int_mask_addr, nig_mask);
7203
7204 bxe_release_phy_lock(sc);
7205 }
7206 }
7207
7208 static void
bxe_print_next_block(struct bxe_softc * sc,int idx,const char * blk)7209 bxe_print_next_block(struct bxe_softc *sc,
7210 int idx,
7211 const char *blk)
7212 {
7213 BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7214 }
7215
7216 static int
bxe_check_blocks_with_parity0(struct bxe_softc * sc,uint32_t sig,int par_num,uint8_t print)7217 bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7218 uint32_t sig,
7219 int par_num,
7220 uint8_t print)
7221 {
7222 uint32_t cur_bit = 0;
7223 int i = 0;
7224
7225 for (i = 0; sig; i++) {
7226 cur_bit = ((uint32_t)0x1 << i);
7227 if (sig & cur_bit) {
7228 switch (cur_bit) {
7229 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7230 if (print)
7231 bxe_print_next_block(sc, par_num++, "BRB");
7232 break;
7233 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7234 if (print)
7235 bxe_print_next_block(sc, par_num++, "PARSER");
7236 break;
7237 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7238 if (print)
7239 bxe_print_next_block(sc, par_num++, "TSDM");
7240 break;
7241 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7242 if (print)
7243 bxe_print_next_block(sc, par_num++, "SEARCHER");
7244 break;
7245 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7246 if (print)
7247 bxe_print_next_block(sc, par_num++, "TCM");
7248 break;
7249 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7250 if (print)
7251 bxe_print_next_block(sc, par_num++, "TSEMI");
7252 break;
7253 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7254 if (print)
7255 bxe_print_next_block(sc, par_num++, "XPB");
7256 break;
7257 }
7258
7259 /* Clear the bit */
7260 sig &= ~cur_bit;
7261 }
7262 }
7263
7264 return (par_num);
7265 }
7266
7267 static int
bxe_check_blocks_with_parity1(struct bxe_softc * sc,uint32_t sig,int par_num,uint8_t * global,uint8_t print)7268 bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7269 uint32_t sig,
7270 int par_num,
7271 uint8_t *global,
7272 uint8_t print)
7273 {
7274 int i = 0;
7275 uint32_t cur_bit = 0;
7276 for (i = 0; sig; i++) {
7277 cur_bit = ((uint32_t)0x1 << i);
7278 if (sig & cur_bit) {
7279 switch (cur_bit) {
7280 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7281 if (print)
7282 bxe_print_next_block(sc, par_num++, "PBF");
7283 break;
7284 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7285 if (print)
7286 bxe_print_next_block(sc, par_num++, "QM");
7287 break;
7288 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7289 if (print)
7290 bxe_print_next_block(sc, par_num++, "TM");
7291 break;
7292 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7293 if (print)
7294 bxe_print_next_block(sc, par_num++, "XSDM");
7295 break;
7296 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7297 if (print)
7298 bxe_print_next_block(sc, par_num++, "XCM");
7299 break;
7300 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7301 if (print)
7302 bxe_print_next_block(sc, par_num++, "XSEMI");
7303 break;
7304 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7305 if (print)
7306 bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7307 break;
7308 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7309 if (print)
7310 bxe_print_next_block(sc, par_num++, "NIG");
7311 break;
7312 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7313 if (print)
7314 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7315 *global = TRUE;
7316 break;
7317 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7318 if (print)
7319 bxe_print_next_block(sc, par_num++, "DEBUG");
7320 break;
7321 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7322 if (print)
7323 bxe_print_next_block(sc, par_num++, "USDM");
7324 break;
7325 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7326 if (print)
7327 bxe_print_next_block(sc, par_num++, "UCM");
7328 break;
7329 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7330 if (print)
7331 bxe_print_next_block(sc, par_num++, "USEMI");
7332 break;
7333 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7334 if (print)
7335 bxe_print_next_block(sc, par_num++, "UPB");
7336 break;
7337 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7338 if (print)
7339 bxe_print_next_block(sc, par_num++, "CSDM");
7340 break;
7341 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7342 if (print)
7343 bxe_print_next_block(sc, par_num++, "CCM");
7344 break;
7345 }
7346
7347 /* Clear the bit */
7348 sig &= ~cur_bit;
7349 }
7350 }
7351
7352 return (par_num);
7353 }
7354
7355 static int
bxe_check_blocks_with_parity2(struct bxe_softc * sc,uint32_t sig,int par_num,uint8_t print)7356 bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7357 uint32_t sig,
7358 int par_num,
7359 uint8_t print)
7360 {
7361 uint32_t cur_bit = 0;
7362 int i = 0;
7363
7364 for (i = 0; sig; i++) {
7365 cur_bit = ((uint32_t)0x1 << i);
7366 if (sig & cur_bit) {
7367 switch (cur_bit) {
7368 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7369 if (print)
7370 bxe_print_next_block(sc, par_num++, "CSEMI");
7371 break;
7372 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7373 if (print)
7374 bxe_print_next_block(sc, par_num++, "PXP");
7375 break;
7376 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7377 if (print)
7378 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7379 break;
7380 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7381 if (print)
7382 bxe_print_next_block(sc, par_num++, "CFC");
7383 break;
7384 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7385 if (print)
7386 bxe_print_next_block(sc, par_num++, "CDU");
7387 break;
7388 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7389 if (print)
7390 bxe_print_next_block(sc, par_num++, "DMAE");
7391 break;
7392 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7393 if (print)
7394 bxe_print_next_block(sc, par_num++, "IGU");
7395 break;
7396 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7397 if (print)
7398 bxe_print_next_block(sc, par_num++, "MISC");
7399 break;
7400 }
7401
7402 /* Clear the bit */
7403 sig &= ~cur_bit;
7404 }
7405 }
7406
7407 return (par_num);
7408 }
7409
7410 static int
bxe_check_blocks_with_parity3(struct bxe_softc * sc,uint32_t sig,int par_num,uint8_t * global,uint8_t print)7411 bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7412 uint32_t sig,
7413 int par_num,
7414 uint8_t *global,
7415 uint8_t print)
7416 {
7417 uint32_t cur_bit = 0;
7418 int i = 0;
7419
7420 for (i = 0; sig; i++) {
7421 cur_bit = ((uint32_t)0x1 << i);
7422 if (sig & cur_bit) {
7423 switch (cur_bit) {
7424 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7425 if (print)
7426 bxe_print_next_block(sc, par_num++, "MCP ROM");
7427 *global = TRUE;
7428 break;
7429 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7430 if (print)
7431 bxe_print_next_block(sc, par_num++,
7432 "MCP UMP RX");
7433 *global = TRUE;
7434 break;
7435 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7436 if (print)
7437 bxe_print_next_block(sc, par_num++,
7438 "MCP UMP TX");
7439 *global = TRUE;
7440 break;
7441 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7442 if (print)
7443 bxe_print_next_block(sc, par_num++,
7444 "MCP SCPAD");
7445 *global = TRUE;
7446 break;
7447 }
7448
7449 /* Clear the bit */
7450 sig &= ~cur_bit;
7451 }
7452 }
7453
7454 return (par_num);
7455 }
7456
7457 static int
bxe_check_blocks_with_parity4(struct bxe_softc * sc,uint32_t sig,int par_num,uint8_t print)7458 bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7459 uint32_t sig,
7460 int par_num,
7461 uint8_t print)
7462 {
7463 uint32_t cur_bit = 0;
7464 int i = 0;
7465
7466 for (i = 0; sig; i++) {
7467 cur_bit = ((uint32_t)0x1 << i);
7468 if (sig & cur_bit) {
7469 switch (cur_bit) {
7470 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7471 if (print)
7472 bxe_print_next_block(sc, par_num++, "PGLUE_B");
7473 break;
7474 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7475 if (print)
7476 bxe_print_next_block(sc, par_num++, "ATC");
7477 break;
7478 }
7479
7480 /* Clear the bit */
7481 sig &= ~cur_bit;
7482 }
7483 }
7484
7485 return (par_num);
7486 }
7487
7488 static uint8_t
bxe_parity_attn(struct bxe_softc * sc,uint8_t * global,uint8_t print,uint32_t * sig)7489 bxe_parity_attn(struct bxe_softc *sc,
7490 uint8_t *global,
7491 uint8_t print,
7492 uint32_t *sig)
7493 {
7494 int par_num = 0;
7495
7496 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7497 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7498 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7499 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7500 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7501 BLOGE(sc, "Parity error: HW block parity attention:\n"
7502 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7503 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7504 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7505 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7506 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7507 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7508
7509 if (print)
7510 BLOGI(sc, "Parity errors detected in blocks: ");
7511
7512 par_num =
7513 bxe_check_blocks_with_parity0(sc, sig[0] &
7514 HW_PRTY_ASSERT_SET_0,
7515 par_num, print);
7516 par_num =
7517 bxe_check_blocks_with_parity1(sc, sig[1] &
7518 HW_PRTY_ASSERT_SET_1,
7519 par_num, global, print);
7520 par_num =
7521 bxe_check_blocks_with_parity2(sc, sig[2] &
7522 HW_PRTY_ASSERT_SET_2,
7523 par_num, print);
7524 par_num =
7525 bxe_check_blocks_with_parity3(sc, sig[3] &
7526 HW_PRTY_ASSERT_SET_3,
7527 par_num, global, print);
7528 par_num =
7529 bxe_check_blocks_with_parity4(sc, sig[4] &
7530 HW_PRTY_ASSERT_SET_4,
7531 par_num, print);
7532
7533 if (print)
7534 BLOGI(sc, "\n");
7535
7536 if( *global == TRUE ) {
7537 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
7538 }
7539
7540 return (TRUE);
7541 }
7542
7543 return (FALSE);
7544 }
7545
7546 static uint8_t
bxe_chk_parity_attn(struct bxe_softc * sc,uint8_t * global,uint8_t print)7547 bxe_chk_parity_attn(struct bxe_softc *sc,
7548 uint8_t *global,
7549 uint8_t print)
7550 {
7551 struct attn_route attn = { {0} };
7552 int port = SC_PORT(sc);
7553
7554 if(sc->state != BXE_STATE_OPEN)
7555 return FALSE;
7556
7557 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7558 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7559 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7560 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7561
7562 /*
7563 * Since MCP attentions can't be disabled inside the block, we need to
7564 * read AEU registers to see whether they're currently disabled
7565 */
7566 attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7567 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7568 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7569 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7570
7571
7572 if (!CHIP_IS_E1x(sc))
7573 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7574
7575 return (bxe_parity_attn(sc, global, print, attn.sig));
7576 }
7577
7578 static void
bxe_attn_int_deasserted4(struct bxe_softc * sc,uint32_t attn)7579 bxe_attn_int_deasserted4(struct bxe_softc *sc,
7580 uint32_t attn)
7581 {
7582 uint32_t val;
7583 boolean_t err_flg = FALSE;
7584
7585 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7586 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7587 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7588 err_flg = TRUE;
7589 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7590 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7591 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7592 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7593 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7594 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7595 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7596 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7597 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7598 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7599 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7600 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7601 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7602 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7603 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7604 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7605 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7606 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7607 }
7608
7609 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7610 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7611 BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7612 err_flg = TRUE;
7613 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7614 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7615 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7616 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7617 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7618 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7619 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7620 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7621 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7622 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7623 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7624 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7625 }
7626
7627 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7628 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7629 BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7630 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7631 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7632 err_flg = TRUE;
7633 }
7634 if (err_flg) {
7635 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
7636 taskqueue_enqueue_timeout(taskqueue_thread,
7637 &sc->sp_err_timeout_task, hz/10);
7638 }
7639
7640 }
7641
7642 static void
bxe_e1h_disable(struct bxe_softc * sc)7643 bxe_e1h_disable(struct bxe_softc *sc)
7644 {
7645 int port = SC_PORT(sc);
7646
7647 bxe_tx_disable(sc);
7648
7649 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7650 }
7651
7652 static void
bxe_e1h_enable(struct bxe_softc * sc)7653 bxe_e1h_enable(struct bxe_softc *sc)
7654 {
7655 int port = SC_PORT(sc);
7656
7657 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7658
7659 // XXX bxe_tx_enable(sc);
7660 }
7661
7662 /*
7663 * called due to MCP event (on pmf):
7664 * reread new bandwidth configuration
7665 * configure FW
7666 * notify others function about the change
7667 */
7668 static void
bxe_config_mf_bw(struct bxe_softc * sc)7669 bxe_config_mf_bw(struct bxe_softc *sc)
7670 {
7671 if (sc->link_vars.link_up) {
7672 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7673 // XXX bxe_link_sync_notify(sc);
7674 }
7675
7676 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7677 }
7678
7679 static void
bxe_set_mf_bw(struct bxe_softc * sc)7680 bxe_set_mf_bw(struct bxe_softc *sc)
7681 {
7682 bxe_config_mf_bw(sc);
7683 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7684 }
7685
7686 static void
bxe_handle_eee_event(struct bxe_softc * sc)7687 bxe_handle_eee_event(struct bxe_softc *sc)
7688 {
7689 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7690 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7691 }
7692
7693 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7694
7695 static void
bxe_drv_info_ether_stat(struct bxe_softc * sc)7696 bxe_drv_info_ether_stat(struct bxe_softc *sc)
7697 {
7698 struct eth_stats_info *ether_stat =
7699 &sc->sp->drv_info_to_mcp.ether_stat;
7700
7701 strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7702 ETH_STAT_INFO_VERSION_LEN);
7703
7704 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7705 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7706 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7707 ether_stat->mac_local + MAC_PAD,
7708 MAC_PAD, ETH_ALEN);
7709
7710 ether_stat->mtu_size = sc->mtu;
7711
7712 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7713 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7714 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7715 }
7716
7717 // XXX ether_stat->feature_flags |= ???;
7718
7719 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7720
7721 ether_stat->txq_size = sc->tx_ring_size;
7722 ether_stat->rxq_size = sc->rx_ring_size;
7723 }
7724
7725 static void
bxe_handle_drv_info_req(struct bxe_softc * sc)7726 bxe_handle_drv_info_req(struct bxe_softc *sc)
7727 {
7728 enum drv_info_opcode op_code;
7729 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7730
7731 /* if drv_info version supported by MFW doesn't match - send NACK */
7732 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7733 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7734 return;
7735 }
7736
7737 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7738 DRV_INFO_CONTROL_OP_CODE_SHIFT);
7739
7740 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7741
7742 switch (op_code) {
7743 case ETH_STATS_OPCODE:
7744 bxe_drv_info_ether_stat(sc);
7745 break;
7746 case FCOE_STATS_OPCODE:
7747 case ISCSI_STATS_OPCODE:
7748 default:
7749 /* if op code isn't supported - send NACK */
7750 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7751 return;
7752 }
7753
7754 /*
7755 * If we got drv_info attn from MFW then these fields are defined in
7756 * shmem2 for sure
7757 */
7758 SHMEM2_WR(sc, drv_info_host_addr_lo,
7759 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7760 SHMEM2_WR(sc, drv_info_host_addr_hi,
7761 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7762
7763 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7764 }
7765
7766 static void
bxe_dcc_event(struct bxe_softc * sc,uint32_t dcc_event)7767 bxe_dcc_event(struct bxe_softc *sc,
7768 uint32_t dcc_event)
7769 {
7770 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7771
7772 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7773 /*
7774 * This is the only place besides the function initialization
7775 * where the sc->flags can change so it is done without any
7776 * locks
7777 */
7778 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7779 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7780 sc->flags |= BXE_MF_FUNC_DIS;
7781 bxe_e1h_disable(sc);
7782 } else {
7783 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7784 sc->flags &= ~BXE_MF_FUNC_DIS;
7785 bxe_e1h_enable(sc);
7786 }
7787 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7788 }
7789
7790 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7791 bxe_config_mf_bw(sc);
7792 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7793 }
7794
7795 /* Report results to MCP */
7796 if (dcc_event)
7797 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7798 else
7799 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7800 }
7801
7802 static void
bxe_pmf_update(struct bxe_softc * sc)7803 bxe_pmf_update(struct bxe_softc *sc)
7804 {
7805 int port = SC_PORT(sc);
7806 uint32_t val;
7807
7808 sc->port.pmf = 1;
7809 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7810
7811 /*
7812 * We need the mb() to ensure the ordering between the writing to
7813 * sc->port.pmf here and reading it from the bxe_periodic_task().
7814 */
7815 mb();
7816
7817 /* queue a periodic task */
7818 // XXX schedule task...
7819
7820 // XXX bxe_dcbx_pmf_update(sc);
7821
7822 /* enable nig attention */
7823 val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7824 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7825 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7826 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7827 } else if (!CHIP_IS_E1x(sc)) {
7828 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7829 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7830 }
7831
7832 bxe_stats_handle(sc, STATS_EVENT_PMF);
7833 }
7834
7835 static int
bxe_mc_assert(struct bxe_softc * sc)7836 bxe_mc_assert(struct bxe_softc *sc)
7837 {
7838 char last_idx;
7839 int i, rc = 0;
7840 uint32_t row0, row1, row2, row3;
7841
7842 /* XSTORM */
7843 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7844 if (last_idx)
7845 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7846
7847 /* print the asserts */
7848 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7849
7850 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7851 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7852 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7853 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7854
7855 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7856 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7857 i, row3, row2, row1, row0);
7858 rc++;
7859 } else {
7860 break;
7861 }
7862 }
7863
7864 /* TSTORM */
7865 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7866 if (last_idx) {
7867 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7868 }
7869
7870 /* print the asserts */
7871 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7872
7873 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7874 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7875 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7876 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7877
7878 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7879 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7880 i, row3, row2, row1, row0);
7881 rc++;
7882 } else {
7883 break;
7884 }
7885 }
7886
7887 /* CSTORM */
7888 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7889 if (last_idx) {
7890 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7891 }
7892
7893 /* print the asserts */
7894 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7895
7896 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7897 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7898 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7899 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7900
7901 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7902 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7903 i, row3, row2, row1, row0);
7904 rc++;
7905 } else {
7906 break;
7907 }
7908 }
7909
7910 /* USTORM */
7911 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7912 if (last_idx) {
7913 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7914 }
7915
7916 /* print the asserts */
7917 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7918
7919 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7920 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7921 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7922 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7923
7924 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7925 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7926 i, row3, row2, row1, row0);
7927 rc++;
7928 } else {
7929 break;
7930 }
7931 }
7932
7933 return (rc);
7934 }
7935
7936 static void
bxe_attn_int_deasserted3(struct bxe_softc * sc,uint32_t attn)7937 bxe_attn_int_deasserted3(struct bxe_softc *sc,
7938 uint32_t attn)
7939 {
7940 int func = SC_FUNC(sc);
7941 uint32_t val;
7942
7943 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7944
7945 if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7946
7947 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7948 bxe_read_mf_cfg(sc);
7949 sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7950 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7951 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7952
7953 if (val & DRV_STATUS_DCC_EVENT_MASK)
7954 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7955
7956 if (val & DRV_STATUS_SET_MF_BW)
7957 bxe_set_mf_bw(sc);
7958
7959 if (val & DRV_STATUS_DRV_INFO_REQ)
7960 bxe_handle_drv_info_req(sc);
7961
7962 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7963 bxe_pmf_update(sc);
7964
7965 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7966 bxe_handle_eee_event(sc);
7967
7968 if (sc->link_vars.periodic_flags &
7969 ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7970 /* sync with link */
7971 bxe_acquire_phy_lock(sc);
7972 sc->link_vars.periodic_flags &=
7973 ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7974 bxe_release_phy_lock(sc);
7975 if (IS_MF(sc))
7976 ; // XXX bxe_link_sync_notify(sc);
7977 bxe_link_report(sc);
7978 }
7979
7980 /*
7981 * Always call it here: bxe_link_report() will
7982 * prevent the link indication duplication.
7983 */
7984 bxe_link_status_update(sc);
7985
7986 } else if (attn & BXE_MC_ASSERT_BITS) {
7987
7988 BLOGE(sc, "MC assert!\n");
7989 bxe_mc_assert(sc);
7990 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7991 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7992 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7993 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7994 bxe_int_disable(sc);
7995 BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT);
7996 taskqueue_enqueue_timeout(taskqueue_thread,
7997 &sc->sp_err_timeout_task, hz/10);
7998
7999 } else if (attn & BXE_MCP_ASSERT) {
8000
8001 BLOGE(sc, "MCP assert!\n");
8002 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8003 BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT);
8004 taskqueue_enqueue_timeout(taskqueue_thread,
8005 &sc->sp_err_timeout_task, hz/10);
8006 bxe_int_disable(sc); /*avoid repetive assert alert */
8007
8008
8009 } else {
8010 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8011 }
8012 }
8013
8014 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8015 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8016 if (attn & BXE_GRC_TIMEOUT) {
8017 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8018 BLOGE(sc, "GRC time-out 0x%08x\n", val);
8019 }
8020 if (attn & BXE_GRC_RSV) {
8021 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8022 BLOGE(sc, "GRC reserved 0x%08x\n", val);
8023 }
8024 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8025 }
8026 }
8027
8028 static void
bxe_attn_int_deasserted2(struct bxe_softc * sc,uint32_t attn)8029 bxe_attn_int_deasserted2(struct bxe_softc *sc,
8030 uint32_t attn)
8031 {
8032 int port = SC_PORT(sc);
8033 int reg_offset;
8034 uint32_t val0, mask0, val1, mask1;
8035 uint32_t val;
8036 boolean_t err_flg = FALSE;
8037
8038 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8039 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8040 BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8041 /* CFC error attention */
8042 if (val & 0x2) {
8043 BLOGE(sc, "FATAL error from CFC\n");
8044 err_flg = TRUE;
8045 }
8046 }
8047
8048 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8049 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8050 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8051 /* RQ_USDMDP_FIFO_OVERFLOW */
8052 if (val & 0x18000) {
8053 BLOGE(sc, "FATAL error from PXP\n");
8054 err_flg = TRUE;
8055 }
8056
8057 if (!CHIP_IS_E1x(sc)) {
8058 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8059 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8060 err_flg = TRUE;
8061 }
8062 }
8063
8064 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8065 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8066
8067 if (attn & AEU_PXP2_HW_INT_BIT) {
8068 /* CQ47854 workaround do not panic on
8069 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8070 */
8071 if (!CHIP_IS_E1x(sc)) {
8072 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8073 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8074 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8075 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8076 /*
8077 * If the only PXP2_EOP_ERROR_BIT is set in
8078 * STS0 and STS1 - clear it
8079 *
8080 * probably we lose additional attentions between
8081 * STS0 and STS_CLR0, in this case user will not
8082 * be notified about them
8083 */
8084 if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8085 !(val1 & mask1))
8086 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8087
8088 /* print the register, since no one can restore it */
8089 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8090
8091 /*
8092 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8093 * then notify
8094 */
8095 if (val0 & PXP2_EOP_ERROR_BIT) {
8096 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8097 err_flg = TRUE;
8098
8099 /*
8100 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8101 * set then clear attention from PXP2 block without panic
8102 */
8103 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8104 ((val1 & mask1) == 0))
8105 attn &= ~AEU_PXP2_HW_INT_BIT;
8106 }
8107 }
8108 }
8109
8110 if (attn & HW_INTERRUT_ASSERT_SET_2) {
8111 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8112 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8113
8114 val = REG_RD(sc, reg_offset);
8115 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8116 REG_WR(sc, reg_offset, val);
8117
8118 BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8119 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8120 err_flg = TRUE;
8121 bxe_panic(sc, ("HW block attention set2\n"));
8122 }
8123 if(err_flg) {
8124 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
8125 taskqueue_enqueue_timeout(taskqueue_thread,
8126 &sc->sp_err_timeout_task, hz/10);
8127 }
8128
8129 }
8130
8131 static void
bxe_attn_int_deasserted1(struct bxe_softc * sc,uint32_t attn)8132 bxe_attn_int_deasserted1(struct bxe_softc *sc,
8133 uint32_t attn)
8134 {
8135 int port = SC_PORT(sc);
8136 int reg_offset;
8137 uint32_t val;
8138 boolean_t err_flg = FALSE;
8139
8140 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8141 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8142 BLOGE(sc, "DB hw attention 0x%08x\n", val);
8143 /* DORQ discard attention */
8144 if (val & 0x2) {
8145 BLOGE(sc, "FATAL error from DORQ\n");
8146 err_flg = TRUE;
8147 }
8148 }
8149
8150 if (attn & HW_INTERRUT_ASSERT_SET_1) {
8151 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8152 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8153
8154 val = REG_RD(sc, reg_offset);
8155 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8156 REG_WR(sc, reg_offset, val);
8157
8158 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8159 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8160 err_flg = TRUE;
8161 bxe_panic(sc, ("HW block attention set1\n"));
8162 }
8163 if(err_flg) {
8164 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8165 taskqueue_enqueue_timeout(taskqueue_thread,
8166 &sc->sp_err_timeout_task, hz/10);
8167 }
8168
8169 }
8170
8171 static void
bxe_attn_int_deasserted0(struct bxe_softc * sc,uint32_t attn)8172 bxe_attn_int_deasserted0(struct bxe_softc *sc,
8173 uint32_t attn)
8174 {
8175 int port = SC_PORT(sc);
8176 int reg_offset;
8177 uint32_t val;
8178
8179 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8180 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8181
8182 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8183 val = REG_RD(sc, reg_offset);
8184 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8185 REG_WR(sc, reg_offset, val);
8186
8187 BLOGW(sc, "SPIO5 hw attention\n");
8188
8189 /* Fan failure attention */
8190 elink_hw_reset_phy(&sc->link_params);
8191 bxe_fan_failure(sc);
8192 }
8193
8194 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8195 bxe_acquire_phy_lock(sc);
8196 elink_handle_module_detect_int(&sc->link_params);
8197 bxe_release_phy_lock(sc);
8198 }
8199
8200 if (attn & HW_INTERRUT_ASSERT_SET_0) {
8201 val = REG_RD(sc, reg_offset);
8202 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8203 REG_WR(sc, reg_offset, val);
8204
8205
8206 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8207 taskqueue_enqueue_timeout(taskqueue_thread,
8208 &sc->sp_err_timeout_task, hz/10);
8209
8210 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8211 (attn & HW_INTERRUT_ASSERT_SET_0)));
8212 }
8213 }
8214
8215 static void
bxe_attn_int_deasserted(struct bxe_softc * sc,uint32_t deasserted)8216 bxe_attn_int_deasserted(struct bxe_softc *sc,
8217 uint32_t deasserted)
8218 {
8219 struct attn_route attn;
8220 struct attn_route *group_mask;
8221 int port = SC_PORT(sc);
8222 int index;
8223 uint32_t reg_addr;
8224 uint32_t val;
8225 uint32_t aeu_mask;
8226 uint8_t global = FALSE;
8227
8228 /*
8229 * Need to take HW lock because MCP or other port might also
8230 * try to handle this event.
8231 */
8232 bxe_acquire_alr(sc);
8233
8234 if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8235 /* XXX
8236 * In case of parity errors don't handle attentions so that
8237 * other function would "see" parity errors.
8238 */
8239 // XXX schedule a recovery task...
8240 /* disable HW interrupts */
8241 bxe_int_disable(sc);
8242 BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY);
8243 taskqueue_enqueue_timeout(taskqueue_thread,
8244 &sc->sp_err_timeout_task, hz/10);
8245 bxe_release_alr(sc);
8246 return;
8247 }
8248
8249 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8250 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8251 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8252 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8253 if (!CHIP_IS_E1x(sc)) {
8254 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8255 } else {
8256 attn.sig[4] = 0;
8257 }
8258
8259 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8260 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8261
8262 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8263 if (deasserted & (1 << index)) {
8264 group_mask = &sc->attn_group[index];
8265
8266 BLOGD(sc, DBG_INTR,
8267 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8268 group_mask->sig[0], group_mask->sig[1],
8269 group_mask->sig[2], group_mask->sig[3],
8270 group_mask->sig[4]);
8271
8272 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8273 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8274 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8275 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8276 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8277 }
8278 }
8279
8280 bxe_release_alr(sc);
8281
8282 if (sc->devinfo.int_block == INT_BLOCK_HC) {
8283 reg_addr = (HC_REG_COMMAND_REG + port*32 +
8284 COMMAND_REG_ATTN_BITS_CLR);
8285 } else {
8286 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8287 }
8288
8289 val = ~deasserted;
8290 BLOGD(sc, DBG_INTR,
8291 "about to mask 0x%08x at %s addr 0x%08x\n", val,
8292 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8293 REG_WR(sc, reg_addr, val);
8294
8295 if (~sc->attn_state & deasserted) {
8296 BLOGE(sc, "IGU error\n");
8297 }
8298
8299 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8300 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8301
8302 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8303
8304 aeu_mask = REG_RD(sc, reg_addr);
8305
8306 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8307 aeu_mask, deasserted);
8308 aeu_mask |= (deasserted & 0x3ff);
8309 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8310
8311 REG_WR(sc, reg_addr, aeu_mask);
8312 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8313
8314 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8315 sc->attn_state &= ~deasserted;
8316 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8317 }
8318
8319 static void
bxe_attn_int(struct bxe_softc * sc)8320 bxe_attn_int(struct bxe_softc *sc)
8321 {
8322 /* read local copy of bits */
8323 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8324 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8325 uint32_t attn_state = sc->attn_state;
8326
8327 /* look for changed bits */
8328 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
8329 uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
8330
8331 BLOGD(sc, DBG_INTR,
8332 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8333 attn_bits, attn_ack, asserted, deasserted);
8334
8335 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8336 BLOGE(sc, "BAD attention state\n");
8337 }
8338
8339 /* handle bits that were raised */
8340 if (asserted) {
8341 bxe_attn_int_asserted(sc, asserted);
8342 }
8343
8344 if (deasserted) {
8345 bxe_attn_int_deasserted(sc, deasserted);
8346 }
8347 }
8348
8349 static uint16_t
bxe_update_dsb_idx(struct bxe_softc * sc)8350 bxe_update_dsb_idx(struct bxe_softc *sc)
8351 {
8352 struct host_sp_status_block *def_sb = sc->def_sb;
8353 uint16_t rc = 0;
8354
8355 mb(); /* status block is written to by the chip */
8356
8357 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8358 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8359 rc |= BXE_DEF_SB_ATT_IDX;
8360 }
8361
8362 if (sc->def_idx != def_sb->sp_sb.running_index) {
8363 sc->def_idx = def_sb->sp_sb.running_index;
8364 rc |= BXE_DEF_SB_IDX;
8365 }
8366
8367 mb();
8368
8369 return (rc);
8370 }
8371
8372 static inline struct ecore_queue_sp_obj *
bxe_cid_to_q_obj(struct bxe_softc * sc,uint32_t cid)8373 bxe_cid_to_q_obj(struct bxe_softc *sc,
8374 uint32_t cid)
8375 {
8376 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8377 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8378 }
8379
8380 static void
bxe_handle_mcast_eqe(struct bxe_softc * sc)8381 bxe_handle_mcast_eqe(struct bxe_softc *sc)
8382 {
8383 struct ecore_mcast_ramrod_params rparam;
8384 int rc;
8385
8386 memset(&rparam, 0, sizeof(rparam));
8387
8388 rparam.mcast_obj = &sc->mcast_obj;
8389
8390 BXE_MCAST_LOCK(sc);
8391
8392 /* clear pending state for the last command */
8393 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8394
8395 /* if there are pending mcast commands - send them */
8396 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8397 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8398 if (rc < 0) {
8399 BLOGD(sc, DBG_SP,
8400 "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8401 }
8402 }
8403
8404 BXE_MCAST_UNLOCK(sc);
8405 }
8406
8407 static void
bxe_handle_classification_eqe(struct bxe_softc * sc,union event_ring_elem * elem)8408 bxe_handle_classification_eqe(struct bxe_softc *sc,
8409 union event_ring_elem *elem)
8410 {
8411 unsigned long ramrod_flags = 0;
8412 int rc = 0;
8413 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8414 struct ecore_vlan_mac_obj *vlan_mac_obj;
8415
8416 /* always push next commands out, don't wait here */
8417 bit_set(&ramrod_flags, RAMROD_CONT);
8418
8419 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8420 case ECORE_FILTER_MAC_PENDING:
8421 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8422 vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8423 break;
8424
8425 case ECORE_FILTER_MCAST_PENDING:
8426 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8427 /*
8428 * This is only relevant for 57710 where multicast MACs are
8429 * configured as unicast MACs using the same ramrod.
8430 */
8431 bxe_handle_mcast_eqe(sc);
8432 return;
8433
8434 default:
8435 BLOGE(sc, "Unsupported classification command: %d\n",
8436 elem->message.data.eth_event.echo);
8437 return;
8438 }
8439
8440 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8441
8442 if (rc < 0) {
8443 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8444 } else if (rc > 0) {
8445 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8446 }
8447 }
8448
8449 static void
bxe_handle_rx_mode_eqe(struct bxe_softc * sc,union event_ring_elem * elem)8450 bxe_handle_rx_mode_eqe(struct bxe_softc *sc,
8451 union event_ring_elem *elem)
8452 {
8453 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8454
8455 /* send rx_mode command again if was requested */
8456 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8457 &sc->sp_state)) {
8458 bxe_set_storm_rx_mode(sc);
8459 }
8460 }
8461
8462 static void
bxe_update_eq_prod(struct bxe_softc * sc,uint16_t prod)8463 bxe_update_eq_prod(struct bxe_softc *sc,
8464 uint16_t prod)
8465 {
8466 storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8467 wmb(); /* keep prod updates ordered */
8468 }
8469
8470 static void
bxe_eq_int(struct bxe_softc * sc)8471 bxe_eq_int(struct bxe_softc *sc)
8472 {
8473 uint16_t hw_cons, sw_cons, sw_prod;
8474 union event_ring_elem *elem;
8475 uint8_t echo;
8476 uint32_t cid;
8477 uint8_t opcode;
8478 int spqe_cnt = 0;
8479 struct ecore_queue_sp_obj *q_obj;
8480 struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8481 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8482
8483 hw_cons = le16toh(*sc->eq_cons_sb);
8484
8485 /*
8486 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8487 * when we get to the next-page we need to adjust so the loop
8488 * condition below will be met. The next element is the size of a
8489 * regular element and hence incrementing by 1
8490 */
8491 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8492 hw_cons++;
8493 }
8494
8495 /*
8496 * This function may never run in parallel with itself for a
8497 * specific sc and no need for a read memory barrier here.
8498 */
8499 sw_cons = sc->eq_cons;
8500 sw_prod = sc->eq_prod;
8501
8502 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8503 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8504
8505 for (;
8506 sw_cons != hw_cons;
8507 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8508
8509 elem = &sc->eq[EQ_DESC(sw_cons)];
8510
8511 /* elem CID originates from FW, actually LE */
8512 cid = SW_CID(elem->message.data.cfc_del_event.cid);
8513 opcode = elem->message.opcode;
8514
8515 /* handle eq element */
8516 switch (opcode) {
8517
8518 case EVENT_RING_OPCODE_STAT_QUERY:
8519 BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8520 sc->stats_comp++);
8521 /* nothing to do with stats comp */
8522 goto next_spqe;
8523
8524 case EVENT_RING_OPCODE_CFC_DEL:
8525 /* handle according to cid range */
8526 /* we may want to verify here that the sc state is HALTING */
8527 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8528 q_obj = bxe_cid_to_q_obj(sc, cid);
8529 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8530 break;
8531 }
8532 goto next_spqe;
8533
8534 case EVENT_RING_OPCODE_STOP_TRAFFIC:
8535 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8536 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8537 break;
8538 }
8539 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8540 goto next_spqe;
8541
8542 case EVENT_RING_OPCODE_START_TRAFFIC:
8543 BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8544 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8545 break;
8546 }
8547 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8548 goto next_spqe;
8549
8550 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8551 echo = elem->message.data.function_update_event.echo;
8552 if (echo == SWITCH_UPDATE) {
8553 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8554 if (f_obj->complete_cmd(sc, f_obj,
8555 ECORE_F_CMD_SWITCH_UPDATE)) {
8556 break;
8557 }
8558 }
8559 else {
8560 BLOGD(sc, DBG_SP,
8561 "AFEX: ramrod completed FUNCTION_UPDATE\n");
8562 }
8563 goto next_spqe;
8564
8565 case EVENT_RING_OPCODE_FORWARD_SETUP:
8566 q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8567 if (q_obj->complete_cmd(sc, q_obj,
8568 ECORE_Q_CMD_SETUP_TX_ONLY)) {
8569 break;
8570 }
8571 goto next_spqe;
8572
8573 case EVENT_RING_OPCODE_FUNCTION_START:
8574 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8575 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8576 break;
8577 }
8578 goto next_spqe;
8579
8580 case EVENT_RING_OPCODE_FUNCTION_STOP:
8581 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8582 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8583 break;
8584 }
8585 goto next_spqe;
8586 }
8587
8588 switch (opcode | sc->state) {
8589 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8590 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8591 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8592 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8593 rss_raw->clear_pending(rss_raw);
8594 break;
8595
8596 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8597 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8598 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8599 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8600 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8601 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8602 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8603 bxe_handle_classification_eqe(sc, elem);
8604 break;
8605
8606 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8607 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8608 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8609 BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8610 bxe_handle_mcast_eqe(sc);
8611 break;
8612
8613 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8614 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8615 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8616 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8617 bxe_handle_rx_mode_eqe(sc, elem);
8618 break;
8619
8620 default:
8621 /* unknown event log error and continue */
8622 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8623 elem->message.opcode, sc->state);
8624 }
8625
8626 next_spqe:
8627 spqe_cnt++;
8628 } /* for */
8629
8630 mb();
8631 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8632
8633 sc->eq_cons = sw_cons;
8634 sc->eq_prod = sw_prod;
8635
8636 /* make sure that above mem writes were issued towards the memory */
8637 wmb();
8638
8639 /* update producer */
8640 bxe_update_eq_prod(sc, sc->eq_prod);
8641 }
8642
8643 static void
bxe_handle_sp_tq(void * context,int pending)8644 bxe_handle_sp_tq(void *context,
8645 int pending)
8646 {
8647 struct bxe_softc *sc = (struct bxe_softc *)context;
8648 uint16_t status;
8649
8650 BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8651
8652 /* what work needs to be performed? */
8653 status = bxe_update_dsb_idx(sc);
8654
8655 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8656
8657 /* HW attentions */
8658 if (status & BXE_DEF_SB_ATT_IDX) {
8659 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8660 bxe_attn_int(sc);
8661 status &= ~BXE_DEF_SB_ATT_IDX;
8662 }
8663
8664 /* SP events: STAT_QUERY and others */
8665 if (status & BXE_DEF_SB_IDX) {
8666 /* handle EQ completions */
8667 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8668 bxe_eq_int(sc);
8669 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8670 le16toh(sc->def_idx), IGU_INT_NOP, 1);
8671 status &= ~BXE_DEF_SB_IDX;
8672 }
8673
8674 /* if status is non zero then something went wrong */
8675 if (__predict_false(status)) {
8676 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8677 }
8678
8679 /* ack status block only if something was actually handled */
8680 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8681 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8682
8683 /*
8684 * Must be called after the EQ processing (since eq leads to sriov
8685 * ramrod completion flows).
8686 * This flow may have been scheduled by the arrival of a ramrod
8687 * completion, or by the sriov code rescheduling itself.
8688 */
8689 // XXX bxe_iov_sp_task(sc);
8690
8691 }
8692
8693 static void
bxe_handle_fp_tq(void * context,int pending)8694 bxe_handle_fp_tq(void *context,
8695 int pending)
8696 {
8697 struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8698 struct bxe_softc *sc = fp->sc;
8699 uint8_t more_tx = FALSE;
8700 uint8_t more_rx = FALSE;
8701
8702 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8703
8704 /* XXX
8705 * IFF_DRV_RUNNING state can't be checked here since we process
8706 * slowpath events on a client queue during setup. Instead
8707 * we need to add a "process/continue" flag here that the driver
8708 * can use to tell the task here not to do anything.
8709 */
8710 #if 0
8711 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8712 return;
8713 }
8714 #endif
8715
8716 /* update the fastpath index */
8717 bxe_update_fp_sb_idx(fp);
8718
8719 /* XXX add loop here if ever support multiple tx CoS */
8720 /* fp->txdata[cos] */
8721 if (bxe_has_tx_work(fp)) {
8722 BXE_FP_TX_LOCK(fp);
8723 more_tx = bxe_txeof(sc, fp);
8724 BXE_FP_TX_UNLOCK(fp);
8725 }
8726
8727 if (bxe_has_rx_work(fp)) {
8728 more_rx = bxe_rxeof(sc, fp);
8729 }
8730
8731 if (more_rx /*|| more_tx*/) {
8732 /* still more work to do */
8733 taskqueue_enqueue(fp->tq, &fp->tq_task);
8734 return;
8735 }
8736
8737 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8738 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8739 }
8740
8741 static void
bxe_task_fp(struct bxe_fastpath * fp)8742 bxe_task_fp(struct bxe_fastpath *fp)
8743 {
8744 struct bxe_softc *sc = fp->sc;
8745 uint8_t more_tx = FALSE;
8746 uint8_t more_rx = FALSE;
8747
8748 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8749
8750 /* update the fastpath index */
8751 bxe_update_fp_sb_idx(fp);
8752
8753 /* XXX add loop here if ever support multiple tx CoS */
8754 /* fp->txdata[cos] */
8755 if (bxe_has_tx_work(fp)) {
8756 BXE_FP_TX_LOCK(fp);
8757 more_tx = bxe_txeof(sc, fp);
8758 BXE_FP_TX_UNLOCK(fp);
8759 }
8760
8761 if (bxe_has_rx_work(fp)) {
8762 more_rx = bxe_rxeof(sc, fp);
8763 }
8764
8765 if (more_rx /*|| more_tx*/) {
8766 /* still more work to do, bail out if this ISR and process later */
8767 taskqueue_enqueue(fp->tq, &fp->tq_task);
8768 return;
8769 }
8770
8771 /*
8772 * Here we write the fastpath index taken before doing any tx or rx work.
8773 * It is very well possible other hw events occurred up to this point and
8774 * they were actually processed accordingly above. Since we're going to
8775 * write an older fastpath index, an interrupt is coming which we might
8776 * not do any work in.
8777 */
8778 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8779 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8780 }
8781
8782 /*
8783 * Legacy interrupt entry point.
8784 *
8785 * Verifies that the controller generated the interrupt and
8786 * then calls a separate routine to handle the various
8787 * interrupt causes: link, RX, and TX.
8788 */
8789 static void
bxe_intr_legacy(void * xsc)8790 bxe_intr_legacy(void *xsc)
8791 {
8792 struct bxe_softc *sc = (struct bxe_softc *)xsc;
8793 struct bxe_fastpath *fp;
8794 uint16_t status, mask;
8795 int i;
8796
8797 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8798
8799 /*
8800 * 0 for ustorm, 1 for cstorm
8801 * the bits returned from ack_int() are 0-15
8802 * bit 0 = attention status block
8803 * bit 1 = fast path status block
8804 * a mask of 0x2 or more = tx/rx event
8805 * a mask of 1 = slow path event
8806 */
8807
8808 status = bxe_ack_int(sc);
8809
8810 /* the interrupt is not for us */
8811 if (__predict_false(status == 0)) {
8812 BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8813 return;
8814 }
8815
8816 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8817
8818 FOR_EACH_ETH_QUEUE(sc, i) {
8819 fp = &sc->fp[i];
8820 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8821 if (status & mask) {
8822 /* acknowledge and disable further fastpath interrupts */
8823 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8824 bxe_task_fp(fp);
8825 status &= ~mask;
8826 }
8827 }
8828
8829 if (__predict_false(status & 0x1)) {
8830 /* acknowledge and disable further slowpath interrupts */
8831 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8832
8833 /* schedule slowpath handler */
8834 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8835
8836 status &= ~0x1;
8837 }
8838
8839 if (__predict_false(status)) {
8840 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8841 }
8842 }
8843
8844 /* slowpath interrupt entry point */
8845 static void
bxe_intr_sp(void * xsc)8846 bxe_intr_sp(void *xsc)
8847 {
8848 struct bxe_softc *sc = (struct bxe_softc *)xsc;
8849
8850 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8851
8852 /* acknowledge and disable further slowpath interrupts */
8853 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8854
8855 /* schedule slowpath handler */
8856 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8857 }
8858
8859 /* fastpath interrupt entry point */
8860 static void
bxe_intr_fp(void * xfp)8861 bxe_intr_fp(void *xfp)
8862 {
8863 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8864 struct bxe_softc *sc = fp->sc;
8865
8866 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8867
8868 BLOGD(sc, DBG_INTR,
8869 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8870 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8871
8872 /* acknowledge and disable further fastpath interrupts */
8873 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8874
8875 bxe_task_fp(fp);
8876 }
8877
8878 /* Release all interrupts allocated by the driver. */
8879 static void
bxe_interrupt_free(struct bxe_softc * sc)8880 bxe_interrupt_free(struct bxe_softc *sc)
8881 {
8882 int i;
8883
8884 switch (sc->interrupt_mode) {
8885 case INTR_MODE_INTX:
8886 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8887 if (sc->intr[0].resource != NULL) {
8888 bus_release_resource(sc->dev,
8889 SYS_RES_IRQ,
8890 sc->intr[0].rid,
8891 sc->intr[0].resource);
8892 }
8893 break;
8894 case INTR_MODE_MSI:
8895 for (i = 0; i < sc->intr_count; i++) {
8896 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8897 if (sc->intr[i].resource && sc->intr[i].rid) {
8898 bus_release_resource(sc->dev,
8899 SYS_RES_IRQ,
8900 sc->intr[i].rid,
8901 sc->intr[i].resource);
8902 }
8903 }
8904 pci_release_msi(sc->dev);
8905 break;
8906 case INTR_MODE_MSIX:
8907 for (i = 0; i < sc->intr_count; i++) {
8908 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8909 if (sc->intr[i].resource && sc->intr[i].rid) {
8910 bus_release_resource(sc->dev,
8911 SYS_RES_IRQ,
8912 sc->intr[i].rid,
8913 sc->intr[i].resource);
8914 }
8915 }
8916 pci_release_msi(sc->dev);
8917 break;
8918 default:
8919 /* nothing to do as initial allocation failed */
8920 break;
8921 }
8922 }
8923
8924 /*
8925 * This function determines and allocates the appropriate
8926 * interrupt based on system capabilites and user request.
8927 *
8928 * The user may force a particular interrupt mode, specify
8929 * the number of receive queues, specify the method for
8930 * distribuitng received frames to receive queues, or use
8931 * the default settings which will automatically select the
8932 * best supported combination. In addition, the OS may or
8933 * may not support certain combinations of these settings.
8934 * This routine attempts to reconcile the settings requested
8935 * by the user with the capabilites available from the system
8936 * to select the optimal combination of features.
8937 *
8938 * Returns:
8939 * 0 = Success, !0 = Failure.
8940 */
8941 static int
bxe_interrupt_alloc(struct bxe_softc * sc)8942 bxe_interrupt_alloc(struct bxe_softc *sc)
8943 {
8944 int msix_count = 0;
8945 int msi_count = 0;
8946 int num_requested = 0;
8947 int num_allocated = 0;
8948 int rid, i, j;
8949 int rc;
8950
8951 /* get the number of available MSI/MSI-X interrupts from the OS */
8952 if (sc->interrupt_mode > 0) {
8953 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8954 msix_count = pci_msix_count(sc->dev);
8955 }
8956
8957 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8958 msi_count = pci_msi_count(sc->dev);
8959 }
8960
8961 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8962 msi_count, msix_count);
8963 }
8964
8965 do { /* try allocating MSI-X interrupt resources (at least 2) */
8966 if (sc->interrupt_mode != INTR_MODE_MSIX) {
8967 break;
8968 }
8969
8970 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8971 (msix_count < 2)) {
8972 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8973 break;
8974 }
8975
8976 /* ask for the necessary number of MSI-X vectors */
8977 num_requested = min((sc->num_queues + 1), msix_count);
8978
8979 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8980
8981 num_allocated = num_requested;
8982 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8983 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8984 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8985 break;
8986 }
8987
8988 if (num_allocated < 2) { /* possible? */
8989 BLOGE(sc, "MSI-X allocation less than 2!\n");
8990 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8991 pci_release_msi(sc->dev);
8992 break;
8993 }
8994
8995 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8996 num_requested, num_allocated);
8997
8998 /* best effort so use the number of vectors allocated to us */
8999 sc->intr_count = num_allocated;
9000 sc->num_queues = num_allocated - 1;
9001
9002 rid = 1; /* initial resource identifier */
9003
9004 /* allocate the MSI-X vectors */
9005 for (i = 0; i < num_allocated; i++) {
9006 sc->intr[i].rid = (rid + i);
9007
9008 if ((sc->intr[i].resource =
9009 bus_alloc_resource_any(sc->dev,
9010 SYS_RES_IRQ,
9011 &sc->intr[i].rid,
9012 RF_ACTIVE)) == NULL) {
9013 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9014 i, (rid + i));
9015
9016 for (j = (i - 1); j >= 0; j--) {
9017 bus_release_resource(sc->dev,
9018 SYS_RES_IRQ,
9019 sc->intr[j].rid,
9020 sc->intr[j].resource);
9021 }
9022
9023 sc->intr_count = 0;
9024 sc->num_queues = 0;
9025 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9026 pci_release_msi(sc->dev);
9027 break;
9028 }
9029
9030 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9031 }
9032 } while (0);
9033
9034 do { /* try allocating MSI vector resources (at least 2) */
9035 if (sc->interrupt_mode != INTR_MODE_MSI) {
9036 break;
9037 }
9038
9039 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9040 (msi_count < 1)) {
9041 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9042 break;
9043 }
9044
9045 /* ask for a single MSI vector */
9046 num_requested = 1;
9047
9048 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9049
9050 num_allocated = num_requested;
9051 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9052 BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9053 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9054 break;
9055 }
9056
9057 if (num_allocated != 1) { /* possible? */
9058 BLOGE(sc, "MSI allocation is not 1!\n");
9059 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9060 pci_release_msi(sc->dev);
9061 break;
9062 }
9063
9064 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9065 num_requested, num_allocated);
9066
9067 /* best effort so use the number of vectors allocated to us */
9068 sc->intr_count = num_allocated;
9069 sc->num_queues = num_allocated;
9070
9071 rid = 1; /* initial resource identifier */
9072
9073 sc->intr[0].rid = rid;
9074
9075 if ((sc->intr[0].resource =
9076 bus_alloc_resource_any(sc->dev,
9077 SYS_RES_IRQ,
9078 &sc->intr[0].rid,
9079 RF_ACTIVE)) == NULL) {
9080 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9081 sc->intr_count = 0;
9082 sc->num_queues = 0;
9083 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9084 pci_release_msi(sc->dev);
9085 break;
9086 }
9087
9088 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9089 } while (0);
9090
9091 do { /* try allocating INTx vector resources */
9092 if (sc->interrupt_mode != INTR_MODE_INTX) {
9093 break;
9094 }
9095
9096 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9097
9098 /* only one vector for INTx */
9099 sc->intr_count = 1;
9100 sc->num_queues = 1;
9101
9102 rid = 0; /* initial resource identifier */
9103
9104 sc->intr[0].rid = rid;
9105
9106 if ((sc->intr[0].resource =
9107 bus_alloc_resource_any(sc->dev,
9108 SYS_RES_IRQ,
9109 &sc->intr[0].rid,
9110 (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9111 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9112 sc->intr_count = 0;
9113 sc->num_queues = 0;
9114 sc->interrupt_mode = -1; /* Failed! */
9115 break;
9116 }
9117
9118 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9119 } while (0);
9120
9121 if (sc->interrupt_mode == -1) {
9122 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9123 rc = 1;
9124 } else {
9125 BLOGD(sc, DBG_LOAD,
9126 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9127 sc->interrupt_mode, sc->num_queues);
9128 rc = 0;
9129 }
9130
9131 return (rc);
9132 }
9133
9134 static void
bxe_interrupt_detach(struct bxe_softc * sc)9135 bxe_interrupt_detach(struct bxe_softc *sc)
9136 {
9137 struct bxe_fastpath *fp;
9138 int i;
9139
9140 /* release interrupt resources */
9141 for (i = 0; i < sc->intr_count; i++) {
9142 if (sc->intr[i].resource && sc->intr[i].tag) {
9143 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9144 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9145 }
9146 }
9147
9148 for (i = 0; i < sc->num_queues; i++) {
9149 fp = &sc->fp[i];
9150 if (fp->tq) {
9151 taskqueue_drain(fp->tq, &fp->tq_task);
9152 taskqueue_drain(fp->tq, &fp->tx_task);
9153 while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9154 NULL))
9155 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9156 }
9157
9158 for (i = 0; i < sc->num_queues; i++) {
9159 fp = &sc->fp[i];
9160 if (fp->tq != NULL) {
9161 taskqueue_free(fp->tq);
9162 fp->tq = NULL;
9163 }
9164 }
9165 }
9166
9167 if (sc->sp_tq) {
9168 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9169 taskqueue_free(sc->sp_tq);
9170 sc->sp_tq = NULL;
9171 }
9172 }
9173
9174 /*
9175 * Enables interrupts and attach to the ISR.
9176 *
9177 * When using multiple MSI/MSI-X vectors the first vector
9178 * is used for slowpath operations while all remaining
9179 * vectors are used for fastpath operations. If only a
9180 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9181 * ISR must look for both slowpath and fastpath completions.
9182 */
9183 static int
bxe_interrupt_attach(struct bxe_softc * sc)9184 bxe_interrupt_attach(struct bxe_softc *sc)
9185 {
9186 struct bxe_fastpath *fp;
9187 int rc = 0;
9188 int i;
9189
9190 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9191 "bxe%d_sp_tq", sc->unit);
9192 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9193 sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9194 taskqueue_thread_enqueue,
9195 &sc->sp_tq);
9196 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9197 "%s", sc->sp_tq_name);
9198
9199
9200 for (i = 0; i < sc->num_queues; i++) {
9201 fp = &sc->fp[i];
9202 snprintf(fp->tq_name, sizeof(fp->tq_name),
9203 "bxe%d_fp%d_tq", sc->unit, i);
9204 NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9205 TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9206 fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9207 taskqueue_thread_enqueue,
9208 &fp->tq);
9209 TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9210 bxe_tx_mq_start_deferred, fp);
9211 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9212 "%s", fp->tq_name);
9213 }
9214
9215 /* setup interrupt handlers */
9216 if (sc->interrupt_mode == INTR_MODE_MSIX) {
9217 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9218
9219 /*
9220 * Setup the interrupt handler. Note that we pass the driver instance
9221 * to the interrupt handler for the slowpath.
9222 */
9223 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9224 (INTR_TYPE_NET | INTR_MPSAFE),
9225 NULL, bxe_intr_sp, sc,
9226 &sc->intr[0].tag)) != 0) {
9227 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9228 goto bxe_interrupt_attach_exit;
9229 }
9230
9231 bus_describe_intr(sc->dev, sc->intr[0].resource,
9232 sc->intr[0].tag, "sp");
9233
9234 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9235
9236 /* initialize the fastpath vectors (note the first was used for sp) */
9237 for (i = 0; i < sc->num_queues; i++) {
9238 fp = &sc->fp[i];
9239 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9240
9241 /*
9242 * Setup the interrupt handler. Note that we pass the
9243 * fastpath context to the interrupt handler in this
9244 * case.
9245 */
9246 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9247 (INTR_TYPE_NET | INTR_MPSAFE),
9248 NULL, bxe_intr_fp, fp,
9249 &sc->intr[i + 1].tag)) != 0) {
9250 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9251 (i + 1), rc);
9252 goto bxe_interrupt_attach_exit;
9253 }
9254
9255 bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9256 sc->intr[i + 1].tag, "fp%02d", i);
9257
9258 /* bind the fastpath instance to a cpu */
9259 if (sc->num_queues > 1) {
9260 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9261 }
9262
9263 fp->state = BXE_FP_STATE_IRQ;
9264 }
9265 } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9266 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9267
9268 /*
9269 * Setup the interrupt handler. Note that we pass the
9270 * driver instance to the interrupt handler which
9271 * will handle both the slowpath and fastpath.
9272 */
9273 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9274 (INTR_TYPE_NET | INTR_MPSAFE),
9275 NULL, bxe_intr_legacy, sc,
9276 &sc->intr[0].tag)) != 0) {
9277 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9278 goto bxe_interrupt_attach_exit;
9279 }
9280
9281 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9282 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9283
9284 /*
9285 * Setup the interrupt handler. Note that we pass the
9286 * driver instance to the interrupt handler which
9287 * will handle both the slowpath and fastpath.
9288 */
9289 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9290 (INTR_TYPE_NET | INTR_MPSAFE),
9291 NULL, bxe_intr_legacy, sc,
9292 &sc->intr[0].tag)) != 0) {
9293 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9294 goto bxe_interrupt_attach_exit;
9295 }
9296 }
9297
9298 bxe_interrupt_attach_exit:
9299
9300 return (rc);
9301 }
9302
9303 static int bxe_init_hw_common_chip(struct bxe_softc *sc);
9304 static int bxe_init_hw_common(struct bxe_softc *sc);
9305 static int bxe_init_hw_port(struct bxe_softc *sc);
9306 static int bxe_init_hw_func(struct bxe_softc *sc);
9307 static void bxe_reset_common(struct bxe_softc *sc);
9308 static void bxe_reset_port(struct bxe_softc *sc);
9309 static void bxe_reset_func(struct bxe_softc *sc);
9310 static int bxe_gunzip_init(struct bxe_softc *sc);
9311 static void bxe_gunzip_end(struct bxe_softc *sc);
9312 static int bxe_init_firmware(struct bxe_softc *sc);
9313 static void bxe_release_firmware(struct bxe_softc *sc);
9314
9315 static struct
9316 ecore_func_sp_drv_ops bxe_func_sp_drv = {
9317 .init_hw_cmn_chip = bxe_init_hw_common_chip,
9318 .init_hw_cmn = bxe_init_hw_common,
9319 .init_hw_port = bxe_init_hw_port,
9320 .init_hw_func = bxe_init_hw_func,
9321
9322 .reset_hw_cmn = bxe_reset_common,
9323 .reset_hw_port = bxe_reset_port,
9324 .reset_hw_func = bxe_reset_func,
9325
9326 .gunzip_init = bxe_gunzip_init,
9327 .gunzip_end = bxe_gunzip_end,
9328
9329 .init_fw = bxe_init_firmware,
9330 .release_fw = bxe_release_firmware,
9331 };
9332
9333 static void
bxe_init_func_obj(struct bxe_softc * sc)9334 bxe_init_func_obj(struct bxe_softc *sc)
9335 {
9336 sc->dmae_ready = 0;
9337
9338 ecore_init_func_obj(sc,
9339 &sc->func_obj,
9340 BXE_SP(sc, func_rdata),
9341 BXE_SP_MAPPING(sc, func_rdata),
9342 BXE_SP(sc, func_afex_rdata),
9343 BXE_SP_MAPPING(sc, func_afex_rdata),
9344 &bxe_func_sp_drv);
9345 }
9346
9347 static int
bxe_init_hw(struct bxe_softc * sc,uint32_t load_code)9348 bxe_init_hw(struct bxe_softc *sc,
9349 uint32_t load_code)
9350 {
9351 struct ecore_func_state_params func_params = { NULL };
9352 int rc;
9353
9354 /* prepare the parameters for function state transitions */
9355 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9356
9357 func_params.f_obj = &sc->func_obj;
9358 func_params.cmd = ECORE_F_CMD_HW_INIT;
9359
9360 func_params.params.hw_init.load_phase = load_code;
9361
9362 /*
9363 * Via a plethora of function pointers, we will eventually reach
9364 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9365 */
9366 rc = ecore_func_state_change(sc, &func_params);
9367
9368 return (rc);
9369 }
9370
9371 static void
bxe_fill(struct bxe_softc * sc,uint32_t addr,int fill,uint32_t len)9372 bxe_fill(struct bxe_softc *sc,
9373 uint32_t addr,
9374 int fill,
9375 uint32_t len)
9376 {
9377 uint32_t i;
9378
9379 if (!(len % 4) && !(addr % 4)) {
9380 for (i = 0; i < len; i += 4) {
9381 REG_WR(sc, (addr + i), fill);
9382 }
9383 } else {
9384 for (i = 0; i < len; i++) {
9385 REG_WR8(sc, (addr + i), fill);
9386 }
9387 }
9388 }
9389
9390 /* writes FP SP data to FW - data_size in dwords */
9391 static void
bxe_wr_fp_sb_data(struct bxe_softc * sc,int fw_sb_id,uint32_t * sb_data_p,uint32_t data_size)9392 bxe_wr_fp_sb_data(struct bxe_softc *sc,
9393 int fw_sb_id,
9394 uint32_t *sb_data_p,
9395 uint32_t data_size)
9396 {
9397 int index;
9398
9399 for (index = 0; index < data_size; index++) {
9400 REG_WR(sc,
9401 (BAR_CSTRORM_INTMEM +
9402 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9403 (sizeof(uint32_t) * index)),
9404 *(sb_data_p + index));
9405 }
9406 }
9407
9408 static void
bxe_zero_fp_sb(struct bxe_softc * sc,int fw_sb_id)9409 bxe_zero_fp_sb(struct bxe_softc *sc,
9410 int fw_sb_id)
9411 {
9412 struct hc_status_block_data_e2 sb_data_e2;
9413 struct hc_status_block_data_e1x sb_data_e1x;
9414 uint32_t *sb_data_p;
9415 uint32_t data_size = 0;
9416
9417 if (!CHIP_IS_E1x(sc)) {
9418 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9419 sb_data_e2.common.state = SB_DISABLED;
9420 sb_data_e2.common.p_func.vf_valid = FALSE;
9421 sb_data_p = (uint32_t *)&sb_data_e2;
9422 data_size = (sizeof(struct hc_status_block_data_e2) /
9423 sizeof(uint32_t));
9424 } else {
9425 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9426 sb_data_e1x.common.state = SB_DISABLED;
9427 sb_data_e1x.common.p_func.vf_valid = FALSE;
9428 sb_data_p = (uint32_t *)&sb_data_e1x;
9429 data_size = (sizeof(struct hc_status_block_data_e1x) /
9430 sizeof(uint32_t));
9431 }
9432
9433 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9434
9435 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9436 0, CSTORM_STATUS_BLOCK_SIZE);
9437 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9438 0, CSTORM_SYNC_BLOCK_SIZE);
9439 }
9440
9441 static void
bxe_wr_sp_sb_data(struct bxe_softc * sc,struct hc_sp_status_block_data * sp_sb_data)9442 bxe_wr_sp_sb_data(struct bxe_softc *sc,
9443 struct hc_sp_status_block_data *sp_sb_data)
9444 {
9445 int i;
9446
9447 for (i = 0;
9448 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9449 i++) {
9450 REG_WR(sc,
9451 (BAR_CSTRORM_INTMEM +
9452 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9453 (i * sizeof(uint32_t))),
9454 *((uint32_t *)sp_sb_data + i));
9455 }
9456 }
9457
9458 static void
bxe_zero_sp_sb(struct bxe_softc * sc)9459 bxe_zero_sp_sb(struct bxe_softc *sc)
9460 {
9461 struct hc_sp_status_block_data sp_sb_data;
9462
9463 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9464
9465 sp_sb_data.state = SB_DISABLED;
9466 sp_sb_data.p_func.vf_valid = FALSE;
9467
9468 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9469
9470 bxe_fill(sc,
9471 (BAR_CSTRORM_INTMEM +
9472 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9473 0, CSTORM_SP_STATUS_BLOCK_SIZE);
9474 bxe_fill(sc,
9475 (BAR_CSTRORM_INTMEM +
9476 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9477 0, CSTORM_SP_SYNC_BLOCK_SIZE);
9478 }
9479
9480 static void
bxe_setup_ndsb_state_machine(struct hc_status_block_sm * hc_sm,int igu_sb_id,int igu_seg_id)9481 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9482 int igu_sb_id,
9483 int igu_seg_id)
9484 {
9485 hc_sm->igu_sb_id = igu_sb_id;
9486 hc_sm->igu_seg_id = igu_seg_id;
9487 hc_sm->timer_value = 0xFF;
9488 hc_sm->time_to_expire = 0xFFFFFFFF;
9489 }
9490
9491 static void
bxe_map_sb_state_machines(struct hc_index_data * index_data)9492 bxe_map_sb_state_machines(struct hc_index_data *index_data)
9493 {
9494 /* zero out state machine indices */
9495
9496 /* rx indices */
9497 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9498
9499 /* tx indices */
9500 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9501 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9502 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9503 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9504
9505 /* map indices */
9506
9507 /* rx indices */
9508 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9509 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9510
9511 /* tx indices */
9512 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9513 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9514 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9515 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9516 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9517 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9518 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9519 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9520 }
9521
9522 static void
bxe_init_sb(struct bxe_softc * sc,bus_addr_t busaddr,int vfid,uint8_t vf_valid,int fw_sb_id,int igu_sb_id)9523 bxe_init_sb(struct bxe_softc *sc,
9524 bus_addr_t busaddr,
9525 int vfid,
9526 uint8_t vf_valid,
9527 int fw_sb_id,
9528 int igu_sb_id)
9529 {
9530 struct hc_status_block_data_e2 sb_data_e2;
9531 struct hc_status_block_data_e1x sb_data_e1x;
9532 struct hc_status_block_sm *hc_sm_p;
9533 uint32_t *sb_data_p;
9534 int igu_seg_id;
9535 int data_size;
9536
9537 if (CHIP_INT_MODE_IS_BC(sc)) {
9538 igu_seg_id = HC_SEG_ACCESS_NORM;
9539 } else {
9540 igu_seg_id = IGU_SEG_ACCESS_NORM;
9541 }
9542
9543 bxe_zero_fp_sb(sc, fw_sb_id);
9544
9545 if (!CHIP_IS_E1x(sc)) {
9546 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9547 sb_data_e2.common.state = SB_ENABLED;
9548 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9549 sb_data_e2.common.p_func.vf_id = vfid;
9550 sb_data_e2.common.p_func.vf_valid = vf_valid;
9551 sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9552 sb_data_e2.common.same_igu_sb_1b = TRUE;
9553 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9554 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9555 hc_sm_p = sb_data_e2.common.state_machine;
9556 sb_data_p = (uint32_t *)&sb_data_e2;
9557 data_size = (sizeof(struct hc_status_block_data_e2) /
9558 sizeof(uint32_t));
9559 bxe_map_sb_state_machines(sb_data_e2.index_data);
9560 } else {
9561 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9562 sb_data_e1x.common.state = SB_ENABLED;
9563 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9564 sb_data_e1x.common.p_func.vf_id = 0xff;
9565 sb_data_e1x.common.p_func.vf_valid = FALSE;
9566 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9567 sb_data_e1x.common.same_igu_sb_1b = TRUE;
9568 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9569 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9570 hc_sm_p = sb_data_e1x.common.state_machine;
9571 sb_data_p = (uint32_t *)&sb_data_e1x;
9572 data_size = (sizeof(struct hc_status_block_data_e1x) /
9573 sizeof(uint32_t));
9574 bxe_map_sb_state_machines(sb_data_e1x.index_data);
9575 }
9576
9577 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9578 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9579
9580 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9581
9582 /* write indices to HW - PCI guarantees endianity of regpairs */
9583 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9584 }
9585
9586 static inline uint8_t
bxe_fp_qzone_id(struct bxe_fastpath * fp)9587 bxe_fp_qzone_id(struct bxe_fastpath *fp)
9588 {
9589 if (CHIP_IS_E1x(fp->sc)) {
9590 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9591 } else {
9592 return (fp->cl_id);
9593 }
9594 }
9595
9596 static inline uint32_t
bxe_rx_ustorm_prods_offset(struct bxe_softc * sc,struct bxe_fastpath * fp)9597 bxe_rx_ustorm_prods_offset(struct bxe_softc *sc,
9598 struct bxe_fastpath *fp)
9599 {
9600 uint32_t offset = BAR_USTRORM_INTMEM;
9601
9602 if (!CHIP_IS_E1x(sc)) {
9603 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9604 } else {
9605 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9606 }
9607
9608 return (offset);
9609 }
9610
9611 static void
bxe_init_eth_fp(struct bxe_softc * sc,int idx)9612 bxe_init_eth_fp(struct bxe_softc *sc,
9613 int idx)
9614 {
9615 struct bxe_fastpath *fp = &sc->fp[idx];
9616 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9617 unsigned long q_type = 0;
9618 int cos;
9619
9620 fp->sc = sc;
9621 fp->index = idx;
9622
9623 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9624 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9625
9626 fp->cl_id = (CHIP_IS_E1x(sc)) ?
9627 (SC_L_ID(sc) + idx) :
9628 /* want client ID same as IGU SB ID for non-E1 */
9629 fp->igu_sb_id;
9630 fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9631
9632 /* setup sb indices */
9633 if (!CHIP_IS_E1x(sc)) {
9634 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values;
9635 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9636 } else {
9637 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values;
9638 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9639 }
9640
9641 /* init shortcut */
9642 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9643
9644 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9645
9646 /*
9647 * XXX If multiple CoS is ever supported then each fastpath structure
9648 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9649 */
9650 for (cos = 0; cos < sc->max_cos; cos++) {
9651 cids[cos] = idx;
9652 }
9653 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9654
9655 /* nothing more for a VF to do */
9656 if (IS_VF(sc)) {
9657 return;
9658 }
9659
9660 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9661 fp->fw_sb_id, fp->igu_sb_id);
9662
9663 bxe_update_fp_sb_idx(fp);
9664
9665 /* Configure Queue State object */
9666 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9667 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9668
9669 ecore_init_queue_obj(sc,
9670 &sc->sp_objs[idx].q_obj,
9671 fp->cl_id,
9672 cids,
9673 sc->max_cos,
9674 SC_FUNC(sc),
9675 BXE_SP(sc, q_rdata),
9676 BXE_SP_MAPPING(sc, q_rdata),
9677 q_type);
9678
9679 /* configure classification DBs */
9680 ecore_init_mac_obj(sc,
9681 &sc->sp_objs[idx].mac_obj,
9682 fp->cl_id,
9683 idx,
9684 SC_FUNC(sc),
9685 BXE_SP(sc, mac_rdata),
9686 BXE_SP_MAPPING(sc, mac_rdata),
9687 ECORE_FILTER_MAC_PENDING,
9688 &sc->sp_state,
9689 ECORE_OBJ_TYPE_RX_TX,
9690 &sc->macs_pool);
9691
9692 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9693 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9694 }
9695
9696 static inline void
bxe_update_rx_prod(struct bxe_softc * sc,struct bxe_fastpath * fp,uint16_t rx_bd_prod,uint16_t rx_cq_prod,uint16_t rx_sge_prod)9697 bxe_update_rx_prod(struct bxe_softc *sc,
9698 struct bxe_fastpath *fp,
9699 uint16_t rx_bd_prod,
9700 uint16_t rx_cq_prod,
9701 uint16_t rx_sge_prod)
9702 {
9703 struct ustorm_eth_rx_producers rx_prods = { 0 };
9704 uint32_t i;
9705
9706 /* update producers */
9707 rx_prods.bd_prod = rx_bd_prod;
9708 rx_prods.cqe_prod = rx_cq_prod;
9709 rx_prods.sge_prod = rx_sge_prod;
9710
9711 /*
9712 * Make sure that the BD and SGE data is updated before updating the
9713 * producers since FW might read the BD/SGE right after the producer
9714 * is updated.
9715 * This is only applicable for weak-ordered memory model archs such
9716 * as IA-64. The following barrier is also mandatory since FW will
9717 * assumes BDs must have buffers.
9718 */
9719 wmb();
9720
9721 for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9722 REG_WR(sc,
9723 (fp->ustorm_rx_prods_offset + (i * 4)),
9724 ((uint32_t *)&rx_prods)[i]);
9725 }
9726
9727 wmb(); /* keep prod updates ordered */
9728
9729 BLOGD(sc, DBG_RX,
9730 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9731 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9732 }
9733
9734 static void
bxe_init_rx_rings(struct bxe_softc * sc)9735 bxe_init_rx_rings(struct bxe_softc *sc)
9736 {
9737 struct bxe_fastpath *fp;
9738 int i;
9739
9740 for (i = 0; i < sc->num_queues; i++) {
9741 fp = &sc->fp[i];
9742
9743 fp->rx_bd_cons = 0;
9744
9745 /*
9746 * Activate the BD ring...
9747 * Warning, this will generate an interrupt (to the TSTORM)
9748 * so this can only be done after the chip is initialized
9749 */
9750 bxe_update_rx_prod(sc, fp,
9751 fp->rx_bd_prod,
9752 fp->rx_cq_prod,
9753 fp->rx_sge_prod);
9754
9755 if (i != 0) {
9756 continue;
9757 }
9758
9759 if (CHIP_IS_E1(sc)) {
9760 REG_WR(sc,
9761 (BAR_USTRORM_INTMEM +
9762 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9763 U64_LO(fp->rcq_dma.paddr));
9764 REG_WR(sc,
9765 (BAR_USTRORM_INTMEM +
9766 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9767 U64_HI(fp->rcq_dma.paddr));
9768 }
9769 }
9770 }
9771
9772 static void
bxe_init_tx_ring_one(struct bxe_fastpath * fp)9773 bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9774 {
9775 SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9776 fp->tx_db.data.zero_fill1 = 0;
9777 fp->tx_db.data.prod = 0;
9778
9779 fp->tx_pkt_prod = 0;
9780 fp->tx_pkt_cons = 0;
9781 fp->tx_bd_prod = 0;
9782 fp->tx_bd_cons = 0;
9783 fp->eth_q_stats.tx_pkts = 0;
9784 }
9785
9786 static inline void
bxe_init_tx_rings(struct bxe_softc * sc)9787 bxe_init_tx_rings(struct bxe_softc *sc)
9788 {
9789 int i;
9790
9791 for (i = 0; i < sc->num_queues; i++) {
9792 bxe_init_tx_ring_one(&sc->fp[i]);
9793 }
9794 }
9795
9796 static void
bxe_init_def_sb(struct bxe_softc * sc)9797 bxe_init_def_sb(struct bxe_softc *sc)
9798 {
9799 struct host_sp_status_block *def_sb = sc->def_sb;
9800 bus_addr_t mapping = sc->def_sb_dma.paddr;
9801 int igu_sp_sb_index;
9802 int igu_seg_id;
9803 int port = SC_PORT(sc);
9804 int func = SC_FUNC(sc);
9805 int reg_offset, reg_offset_en5;
9806 uint64_t section;
9807 int index, sindex;
9808 struct hc_sp_status_block_data sp_sb_data;
9809
9810 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9811
9812 if (CHIP_INT_MODE_IS_BC(sc)) {
9813 igu_sp_sb_index = DEF_SB_IGU_ID;
9814 igu_seg_id = HC_SEG_ACCESS_DEF;
9815 } else {
9816 igu_sp_sb_index = sc->igu_dsb_id;
9817 igu_seg_id = IGU_SEG_ACCESS_DEF;
9818 }
9819
9820 /* attentions */
9821 section = ((uint64_t)mapping +
9822 offsetof(struct host_sp_status_block, atten_status_block));
9823 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9824 sc->attn_state = 0;
9825
9826 reg_offset = (port) ?
9827 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9828 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9829 reg_offset_en5 = (port) ?
9830 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9831 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9832
9833 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9834 /* take care of sig[0]..sig[4] */
9835 for (sindex = 0; sindex < 4; sindex++) {
9836 sc->attn_group[index].sig[sindex] =
9837 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9838 }
9839
9840 if (!CHIP_IS_E1x(sc)) {
9841 /*
9842 * enable5 is separate from the rest of the registers,
9843 * and the address skip is 4 and not 16 between the
9844 * different groups
9845 */
9846 sc->attn_group[index].sig[4] =
9847 REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9848 } else {
9849 sc->attn_group[index].sig[4] = 0;
9850 }
9851 }
9852
9853 if (sc->devinfo.int_block == INT_BLOCK_HC) {
9854 reg_offset = (port) ?
9855 HC_REG_ATTN_MSG1_ADDR_L :
9856 HC_REG_ATTN_MSG0_ADDR_L;
9857 REG_WR(sc, reg_offset, U64_LO(section));
9858 REG_WR(sc, (reg_offset + 4), U64_HI(section));
9859 } else if (!CHIP_IS_E1x(sc)) {
9860 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9861 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9862 }
9863
9864 section = ((uint64_t)mapping +
9865 offsetof(struct host_sp_status_block, sp_sb));
9866
9867 bxe_zero_sp_sb(sc);
9868
9869 /* PCI guarantees endianity of regpair */
9870 sp_sb_data.state = SB_ENABLED;
9871 sp_sb_data.host_sb_addr.lo = U64_LO(section);
9872 sp_sb_data.host_sb_addr.hi = U64_HI(section);
9873 sp_sb_data.igu_sb_id = igu_sp_sb_index;
9874 sp_sb_data.igu_seg_id = igu_seg_id;
9875 sp_sb_data.p_func.pf_id = func;
9876 sp_sb_data.p_func.vnic_id = SC_VN(sc);
9877 sp_sb_data.p_func.vf_id = 0xff;
9878
9879 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9880
9881 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9882 }
9883
9884 static void
bxe_init_sp_ring(struct bxe_softc * sc)9885 bxe_init_sp_ring(struct bxe_softc *sc)
9886 {
9887 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9888 sc->spq_prod_idx = 0;
9889 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9890 sc->spq_prod_bd = sc->spq;
9891 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9892 }
9893
9894 static void
bxe_init_eq_ring(struct bxe_softc * sc)9895 bxe_init_eq_ring(struct bxe_softc *sc)
9896 {
9897 union event_ring_elem *elem;
9898 int i;
9899
9900 for (i = 1; i <= NUM_EQ_PAGES; i++) {
9901 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9902
9903 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9904 BCM_PAGE_SIZE *
9905 (i % NUM_EQ_PAGES)));
9906 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9907 BCM_PAGE_SIZE *
9908 (i % NUM_EQ_PAGES)));
9909 }
9910
9911 sc->eq_cons = 0;
9912 sc->eq_prod = NUM_EQ_DESC;
9913 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9914
9915 atomic_store_rel_long(&sc->eq_spq_left,
9916 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9917 NUM_EQ_DESC) - 1));
9918 }
9919
9920 static void
bxe_init_internal_common(struct bxe_softc * sc)9921 bxe_init_internal_common(struct bxe_softc *sc)
9922 {
9923 int i;
9924
9925 /*
9926 * Zero this manually as its initialization is currently missing
9927 * in the initTool.
9928 */
9929 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9930 REG_WR(sc,
9931 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9932 0);
9933 }
9934
9935 if (!CHIP_IS_E1x(sc)) {
9936 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9937 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9938 }
9939 }
9940
9941 static void
bxe_init_internal(struct bxe_softc * sc,uint32_t load_code)9942 bxe_init_internal(struct bxe_softc *sc,
9943 uint32_t load_code)
9944 {
9945 switch (load_code) {
9946 case FW_MSG_CODE_DRV_LOAD_COMMON:
9947 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9948 bxe_init_internal_common(sc);
9949 /* no break */
9950
9951 case FW_MSG_CODE_DRV_LOAD_PORT:
9952 /* nothing to do */
9953 /* no break */
9954
9955 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9956 /* internal memory per function is initialized inside bxe_pf_init */
9957 break;
9958
9959 default:
9960 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9961 break;
9962 }
9963 }
9964
9965 static void
storm_memset_func_cfg(struct bxe_softc * sc,struct tstorm_eth_function_common_config * tcfg,uint16_t abs_fid)9966 storm_memset_func_cfg(struct bxe_softc *sc,
9967 struct tstorm_eth_function_common_config *tcfg,
9968 uint16_t abs_fid)
9969 {
9970 uint32_t addr;
9971 size_t size;
9972
9973 addr = (BAR_TSTRORM_INTMEM +
9974 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9975 size = sizeof(struct tstorm_eth_function_common_config);
9976 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9977 }
9978
9979 static void
bxe_func_init(struct bxe_softc * sc,struct bxe_func_init_params * p)9980 bxe_func_init(struct bxe_softc *sc,
9981 struct bxe_func_init_params *p)
9982 {
9983 struct tstorm_eth_function_common_config tcfg = { 0 };
9984
9985 if (CHIP_IS_E1x(sc)) {
9986 storm_memset_func_cfg(sc, &tcfg, p->func_id);
9987 }
9988
9989 /* Enable the function in the FW */
9990 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9991 storm_memset_func_en(sc, p->func_id, 1);
9992
9993 /* spq */
9994 if (p->func_flgs & FUNC_FLG_SPQ) {
9995 storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9996 REG_WR(sc,
9997 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9998 p->spq_prod);
9999 }
10000 }
10001
10002 /*
10003 * Calculates the sum of vn_min_rates.
10004 * It's needed for further normalizing of the min_rates.
10005 * Returns:
10006 * sum of vn_min_rates.
10007 * or
10008 * 0 - if all the min_rates are 0.
10009 * In the later case fainess algorithm should be deactivated.
10010 * If all min rates are not zero then those that are zeroes will be set to 1.
10011 */
10012 static void
bxe_calc_vn_min(struct bxe_softc * sc,struct cmng_init_input * input)10013 bxe_calc_vn_min(struct bxe_softc *sc,
10014 struct cmng_init_input *input)
10015 {
10016 uint32_t vn_cfg;
10017 uint32_t vn_min_rate;
10018 int all_zero = 1;
10019 int vn;
10020
10021 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10022 vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10023 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10024 FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10025
10026 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10027 /* skip hidden VNs */
10028 vn_min_rate = 0;
10029 } else if (!vn_min_rate) {
10030 /* If min rate is zero - set it to 100 */
10031 vn_min_rate = DEF_MIN_RATE;
10032 } else {
10033 all_zero = 0;
10034 }
10035
10036 input->vnic_min_rate[vn] = vn_min_rate;
10037 }
10038
10039 /* if ETS or all min rates are zeros - disable fairness */
10040 if (BXE_IS_ETS_ENABLED(sc)) {
10041 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10042 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10043 } else if (all_zero) {
10044 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10045 BLOGD(sc, DBG_LOAD,
10046 "Fariness disabled (all MIN values are zeroes)\n");
10047 } else {
10048 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10049 }
10050 }
10051
10052 static inline uint16_t
bxe_extract_max_cfg(struct bxe_softc * sc,uint32_t mf_cfg)10053 bxe_extract_max_cfg(struct bxe_softc *sc,
10054 uint32_t mf_cfg)
10055 {
10056 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10057 FUNC_MF_CFG_MAX_BW_SHIFT);
10058
10059 if (!max_cfg) {
10060 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10061 max_cfg = 100;
10062 }
10063
10064 return (max_cfg);
10065 }
10066
10067 static void
bxe_calc_vn_max(struct bxe_softc * sc,int vn,struct cmng_init_input * input)10068 bxe_calc_vn_max(struct bxe_softc *sc,
10069 int vn,
10070 struct cmng_init_input *input)
10071 {
10072 uint16_t vn_max_rate;
10073 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10074 uint32_t max_cfg;
10075
10076 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10077 vn_max_rate = 0;
10078 } else {
10079 max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10080
10081 if (IS_MF_SI(sc)) {
10082 /* max_cfg in percents of linkspeed */
10083 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10084 } else { /* SD modes */
10085 /* max_cfg is absolute in 100Mb units */
10086 vn_max_rate = (max_cfg * 100);
10087 }
10088 }
10089
10090 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10091
10092 input->vnic_max_rate[vn] = vn_max_rate;
10093 }
10094
10095 static void
bxe_cmng_fns_init(struct bxe_softc * sc,uint8_t read_cfg,uint8_t cmng_type)10096 bxe_cmng_fns_init(struct bxe_softc *sc,
10097 uint8_t read_cfg,
10098 uint8_t cmng_type)
10099 {
10100 struct cmng_init_input input;
10101 int vn;
10102
10103 memset(&input, 0, sizeof(struct cmng_init_input));
10104
10105 input.port_rate = sc->link_vars.line_speed;
10106
10107 if (cmng_type == CMNG_FNS_MINMAX) {
10108 /* read mf conf from shmem */
10109 if (read_cfg) {
10110 bxe_read_mf_cfg(sc);
10111 }
10112
10113 /* get VN min rate and enable fairness if not 0 */
10114 bxe_calc_vn_min(sc, &input);
10115
10116 /* get VN max rate */
10117 if (sc->port.pmf) {
10118 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10119 bxe_calc_vn_max(sc, vn, &input);
10120 }
10121 }
10122
10123 /* always enable rate shaping and fairness */
10124 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10125
10126 ecore_init_cmng(&input, &sc->cmng);
10127 return;
10128 }
10129
10130 /* rate shaping and fairness are disabled */
10131 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10132 }
10133
10134 static int
bxe_get_cmng_fns_mode(struct bxe_softc * sc)10135 bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10136 {
10137 if (CHIP_REV_IS_SLOW(sc)) {
10138 return (CMNG_FNS_NONE);
10139 }
10140
10141 if (IS_MF(sc)) {
10142 return (CMNG_FNS_MINMAX);
10143 }
10144
10145 return (CMNG_FNS_NONE);
10146 }
10147
10148 static void
storm_memset_cmng(struct bxe_softc * sc,struct cmng_init * cmng,uint8_t port)10149 storm_memset_cmng(struct bxe_softc *sc,
10150 struct cmng_init *cmng,
10151 uint8_t port)
10152 {
10153 int vn;
10154 int func;
10155 uint32_t addr;
10156 size_t size;
10157
10158 addr = (BAR_XSTRORM_INTMEM +
10159 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10160 size = sizeof(struct cmng_struct_per_port);
10161 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10162
10163 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10164 func = func_by_vn(sc, vn);
10165
10166 addr = (BAR_XSTRORM_INTMEM +
10167 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10168 size = sizeof(struct rate_shaping_vars_per_vn);
10169 ecore_storm_memset_struct(sc, addr, size,
10170 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10171
10172 addr = (BAR_XSTRORM_INTMEM +
10173 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10174 size = sizeof(struct fairness_vars_per_vn);
10175 ecore_storm_memset_struct(sc, addr, size,
10176 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10177 }
10178 }
10179
10180 static void
bxe_pf_init(struct bxe_softc * sc)10181 bxe_pf_init(struct bxe_softc *sc)
10182 {
10183 struct bxe_func_init_params func_init = { 0 };
10184 struct event_ring_data eq_data = { { 0 } };
10185 uint16_t flags;
10186
10187 if (!CHIP_IS_E1x(sc)) {
10188 /* reset IGU PF statistics: MSIX + ATTN */
10189 /* PF */
10190 REG_WR(sc,
10191 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10192 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10193 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10194 0);
10195 /* ATTN */
10196 REG_WR(sc,
10197 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10198 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10199 (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10200 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10201 0);
10202 }
10203
10204 /* function setup flags */
10205 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10206
10207 /*
10208 * This flag is relevant for E1x only.
10209 * E2 doesn't have a TPA configuration in a function level.
10210 */
10211 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10212
10213 func_init.func_flgs = flags;
10214 func_init.pf_id = SC_FUNC(sc);
10215 func_init.func_id = SC_FUNC(sc);
10216 func_init.spq_map = sc->spq_dma.paddr;
10217 func_init.spq_prod = sc->spq_prod_idx;
10218
10219 bxe_func_init(sc, &func_init);
10220
10221 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10222
10223 /*
10224 * Congestion management values depend on the link rate.
10225 * There is no active link so initial link rate is set to 10Gbps.
10226 * When the link comes up the congestion management values are
10227 * re-calculated according to the actual link rate.
10228 */
10229 sc->link_vars.line_speed = SPEED_10000;
10230 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10231
10232 /* Only the PMF sets the HW */
10233 if (sc->port.pmf) {
10234 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10235 }
10236
10237 /* init Event Queue - PCI bus guarantees correct endainity */
10238 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10239 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10240 eq_data.producer = sc->eq_prod;
10241 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
10242 eq_data.sb_id = DEF_SB_ID;
10243 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10244 }
10245
10246 static void
bxe_hc_int_enable(struct bxe_softc * sc)10247 bxe_hc_int_enable(struct bxe_softc *sc)
10248 {
10249 int port = SC_PORT(sc);
10250 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10251 uint32_t val = REG_RD(sc, addr);
10252 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10253 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10254 (sc->intr_count == 1)) ? TRUE : FALSE;
10255 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10256
10257 if (msix) {
10258 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10259 HC_CONFIG_0_REG_INT_LINE_EN_0);
10260 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10261 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10262 if (single_msix) {
10263 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10264 }
10265 } else if (msi) {
10266 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10267 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10268 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10269 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10270 } else {
10271 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10272 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10273 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10274 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10275
10276 if (!CHIP_IS_E1(sc)) {
10277 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10278 val, port, addr);
10279
10280 REG_WR(sc, addr, val);
10281
10282 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10283 }
10284 }
10285
10286 if (CHIP_IS_E1(sc)) {
10287 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10288 }
10289
10290 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10291 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10292
10293 REG_WR(sc, addr, val);
10294
10295 /* ensure that HC_CONFIG is written before leading/trailing edge config */
10296 mb();
10297
10298 if (!CHIP_IS_E1(sc)) {
10299 /* init leading/trailing edge */
10300 if (IS_MF(sc)) {
10301 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10302 if (sc->port.pmf) {
10303 /* enable nig and gpio3 attention */
10304 val |= 0x1100;
10305 }
10306 } else {
10307 val = 0xffff;
10308 }
10309
10310 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10311 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10312 }
10313
10314 /* make sure that interrupts are indeed enabled from here on */
10315 mb();
10316 }
10317
10318 static void
bxe_igu_int_enable(struct bxe_softc * sc)10319 bxe_igu_int_enable(struct bxe_softc *sc)
10320 {
10321 uint32_t val;
10322 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10323 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10324 (sc->intr_count == 1)) ? TRUE : FALSE;
10325 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10326
10327 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10328
10329 if (msix) {
10330 val &= ~(IGU_PF_CONF_INT_LINE_EN |
10331 IGU_PF_CONF_SINGLE_ISR_EN);
10332 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10333 IGU_PF_CONF_ATTN_BIT_EN);
10334 if (single_msix) {
10335 val |= IGU_PF_CONF_SINGLE_ISR_EN;
10336 }
10337 } else if (msi) {
10338 val &= ~IGU_PF_CONF_INT_LINE_EN;
10339 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10340 IGU_PF_CONF_ATTN_BIT_EN |
10341 IGU_PF_CONF_SINGLE_ISR_EN);
10342 } else {
10343 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10344 val |= (IGU_PF_CONF_INT_LINE_EN |
10345 IGU_PF_CONF_ATTN_BIT_EN |
10346 IGU_PF_CONF_SINGLE_ISR_EN);
10347 }
10348
10349 /* clean previous status - need to configure igu prior to ack*/
10350 if ((!msix) || single_msix) {
10351 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10352 bxe_ack_int(sc);
10353 }
10354
10355 val |= IGU_PF_CONF_FUNC_EN;
10356
10357 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10358 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10359
10360 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10361
10362 mb();
10363
10364 /* init leading/trailing edge */
10365 if (IS_MF(sc)) {
10366 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10367 if (sc->port.pmf) {
10368 /* enable nig and gpio3 attention */
10369 val |= 0x1100;
10370 }
10371 } else {
10372 val = 0xffff;
10373 }
10374
10375 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10376 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10377
10378 /* make sure that interrupts are indeed enabled from here on */
10379 mb();
10380 }
10381
10382 static void
bxe_int_enable(struct bxe_softc * sc)10383 bxe_int_enable(struct bxe_softc *sc)
10384 {
10385 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10386 bxe_hc_int_enable(sc);
10387 } else {
10388 bxe_igu_int_enable(sc);
10389 }
10390 }
10391
10392 static void
bxe_hc_int_disable(struct bxe_softc * sc)10393 bxe_hc_int_disable(struct bxe_softc *sc)
10394 {
10395 int port = SC_PORT(sc);
10396 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10397 uint32_t val = REG_RD(sc, addr);
10398
10399 /*
10400 * In E1 we must use only PCI configuration space to disable MSI/MSIX
10401 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10402 * block
10403 */
10404 if (CHIP_IS_E1(sc)) {
10405 /*
10406 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10407 * to prevent from HC sending interrupts after we exit the function
10408 */
10409 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10410
10411 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10412 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10413 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10414 } else {
10415 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10416 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10417 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10418 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10419 }
10420
10421 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10422
10423 /* flush all outstanding writes */
10424 mb();
10425
10426 REG_WR(sc, addr, val);
10427 if (REG_RD(sc, addr) != val) {
10428 BLOGE(sc, "proper val not read from HC IGU!\n");
10429 }
10430 }
10431
10432 static void
bxe_igu_int_disable(struct bxe_softc * sc)10433 bxe_igu_int_disable(struct bxe_softc *sc)
10434 {
10435 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10436
10437 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10438 IGU_PF_CONF_INT_LINE_EN |
10439 IGU_PF_CONF_ATTN_BIT_EN);
10440
10441 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10442
10443 /* flush all outstanding writes */
10444 mb();
10445
10446 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10447 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10448 BLOGE(sc, "proper val not read from IGU!\n");
10449 }
10450 }
10451
10452 static void
bxe_int_disable(struct bxe_softc * sc)10453 bxe_int_disable(struct bxe_softc *sc)
10454 {
10455 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10456 bxe_hc_int_disable(sc);
10457 } else {
10458 bxe_igu_int_disable(sc);
10459 }
10460 }
10461
10462 static void
bxe_nic_init(struct bxe_softc * sc,int load_code)10463 bxe_nic_init(struct bxe_softc *sc,
10464 int load_code)
10465 {
10466 int i;
10467
10468 for (i = 0; i < sc->num_queues; i++) {
10469 bxe_init_eth_fp(sc, i);
10470 }
10471
10472 rmb(); /* ensure status block indices were read */
10473
10474 bxe_init_rx_rings(sc);
10475 bxe_init_tx_rings(sc);
10476
10477 if (IS_VF(sc)) {
10478 return;
10479 }
10480
10481 /* initialize MOD_ABS interrupts */
10482 elink_init_mod_abs_int(sc, &sc->link_vars,
10483 sc->devinfo.chip_id,
10484 sc->devinfo.shmem_base,
10485 sc->devinfo.shmem2_base,
10486 SC_PORT(sc));
10487
10488 bxe_init_def_sb(sc);
10489 bxe_update_dsb_idx(sc);
10490 bxe_init_sp_ring(sc);
10491 bxe_init_eq_ring(sc);
10492 bxe_init_internal(sc, load_code);
10493 bxe_pf_init(sc);
10494 bxe_stats_init(sc);
10495
10496 /* flush all before enabling interrupts */
10497 mb();
10498
10499 bxe_int_enable(sc);
10500
10501 /* check for SPIO5 */
10502 bxe_attn_int_deasserted0(sc,
10503 REG_RD(sc,
10504 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10505 SC_PORT(sc)*4)) &
10506 AEU_INPUTS_ATTN_BITS_SPIO5);
10507 }
10508
10509 static inline void
bxe_init_objs(struct bxe_softc * sc)10510 bxe_init_objs(struct bxe_softc *sc)
10511 {
10512 /* mcast rules must be added to tx if tx switching is enabled */
10513 ecore_obj_type o_type =
10514 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10515 ECORE_OBJ_TYPE_RX;
10516
10517 /* RX_MODE controlling object */
10518 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10519
10520 /* multicast configuration controlling object */
10521 ecore_init_mcast_obj(sc,
10522 &sc->mcast_obj,
10523 sc->fp[0].cl_id,
10524 sc->fp[0].index,
10525 SC_FUNC(sc),
10526 SC_FUNC(sc),
10527 BXE_SP(sc, mcast_rdata),
10528 BXE_SP_MAPPING(sc, mcast_rdata),
10529 ECORE_FILTER_MCAST_PENDING,
10530 &sc->sp_state,
10531 o_type);
10532
10533 /* Setup CAM credit pools */
10534 ecore_init_mac_credit_pool(sc,
10535 &sc->macs_pool,
10536 SC_FUNC(sc),
10537 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10538 VNICS_PER_PATH(sc));
10539
10540 ecore_init_vlan_credit_pool(sc,
10541 &sc->vlans_pool,
10542 SC_ABS_FUNC(sc) >> 1,
10543 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10544 VNICS_PER_PATH(sc));
10545
10546 /* RSS configuration object */
10547 ecore_init_rss_config_obj(sc,
10548 &sc->rss_conf_obj,
10549 sc->fp[0].cl_id,
10550 sc->fp[0].index,
10551 SC_FUNC(sc),
10552 SC_FUNC(sc),
10553 BXE_SP(sc, rss_rdata),
10554 BXE_SP_MAPPING(sc, rss_rdata),
10555 ECORE_FILTER_RSS_CONF_PENDING,
10556 &sc->sp_state, ECORE_OBJ_TYPE_RX);
10557 }
10558
10559 /*
10560 * Initialize the function. This must be called before sending CLIENT_SETUP
10561 * for the first client.
10562 */
10563 static inline int
bxe_func_start(struct bxe_softc * sc)10564 bxe_func_start(struct bxe_softc *sc)
10565 {
10566 struct ecore_func_state_params func_params = { NULL };
10567 struct ecore_func_start_params *start_params = &func_params.params.start;
10568
10569 /* Prepare parameters for function state transitions */
10570 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10571
10572 func_params.f_obj = &sc->func_obj;
10573 func_params.cmd = ECORE_F_CMD_START;
10574
10575 /* Function parameters */
10576 start_params->mf_mode = sc->devinfo.mf_info.mf_mode;
10577 start_params->sd_vlan_tag = OVLAN(sc);
10578
10579 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10580 start_params->network_cos_mode = STATIC_COS;
10581 } else { /* CHIP_IS_E1X */
10582 start_params->network_cos_mode = FW_WRR;
10583 }
10584
10585 //start_params->gre_tunnel_mode = 0;
10586 //start_params->gre_tunnel_rss = 0;
10587
10588 return (ecore_func_state_change(sc, &func_params));
10589 }
10590
10591 static int
bxe_set_power_state(struct bxe_softc * sc,uint8_t state)10592 bxe_set_power_state(struct bxe_softc *sc,
10593 uint8_t state)
10594 {
10595 uint16_t pmcsr;
10596
10597 /* If there is no power capability, silently succeed */
10598 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10599 BLOGW(sc, "No power capability\n");
10600 return (0);
10601 }
10602
10603 pmcsr = pci_read_config(sc->dev,
10604 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10605 2);
10606
10607 switch (state) {
10608 case PCI_PM_D0:
10609 pci_write_config(sc->dev,
10610 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10611 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10612
10613 if (pmcsr & PCIM_PSTAT_DMASK) {
10614 /* delay required during transition out of D3hot */
10615 DELAY(20000);
10616 }
10617
10618 break;
10619
10620 case PCI_PM_D3hot:
10621 /* XXX if there are other clients above don't shut down the power */
10622
10623 /* don't shut down the power for emulation and FPGA */
10624 if (CHIP_REV_IS_SLOW(sc)) {
10625 return (0);
10626 }
10627
10628 pmcsr &= ~PCIM_PSTAT_DMASK;
10629 pmcsr |= PCIM_PSTAT_D3;
10630
10631 if (sc->wol) {
10632 pmcsr |= PCIM_PSTAT_PMEENABLE;
10633 }
10634
10635 pci_write_config(sc->dev,
10636 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10637 pmcsr, 4);
10638
10639 /*
10640 * No more memory access after this point until device is brought back
10641 * to D0 state.
10642 */
10643 break;
10644
10645 default:
10646 BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10647 state, pmcsr);
10648 return (-1);
10649 }
10650
10651 return (0);
10652 }
10653
10654
10655 /* return true if succeeded to acquire the lock */
10656 static uint8_t
bxe_trylock_hw_lock(struct bxe_softc * sc,uint32_t resource)10657 bxe_trylock_hw_lock(struct bxe_softc *sc,
10658 uint32_t resource)
10659 {
10660 uint32_t lock_status;
10661 uint32_t resource_bit = (1 << resource);
10662 int func = SC_FUNC(sc);
10663 uint32_t hw_lock_control_reg;
10664
10665 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10666
10667 /* Validating that the resource is within range */
10668 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10669 BLOGD(sc, DBG_LOAD,
10670 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10671 resource, HW_LOCK_MAX_RESOURCE_VALUE);
10672 return (FALSE);
10673 }
10674
10675 if (func <= 5) {
10676 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10677 } else {
10678 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10679 }
10680
10681 /* try to acquire the lock */
10682 REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10683 lock_status = REG_RD(sc, hw_lock_control_reg);
10684 if (lock_status & resource_bit) {
10685 return (TRUE);
10686 }
10687
10688 BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10689 "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10690 lock_status, resource_bit);
10691
10692 return (FALSE);
10693 }
10694
10695 /*
10696 * Get the recovery leader resource id according to the engine this function
10697 * belongs to. Currently only only 2 engines is supported.
10698 */
10699 static int
bxe_get_leader_lock_resource(struct bxe_softc * sc)10700 bxe_get_leader_lock_resource(struct bxe_softc *sc)
10701 {
10702 if (SC_PATH(sc)) {
10703 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10704 } else {
10705 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10706 }
10707 }
10708
10709 /* try to acquire a leader lock for current engine */
10710 static uint8_t
bxe_trylock_leader_lock(struct bxe_softc * sc)10711 bxe_trylock_leader_lock(struct bxe_softc *sc)
10712 {
10713 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10714 }
10715
10716 static int
bxe_release_leader_lock(struct bxe_softc * sc)10717 bxe_release_leader_lock(struct bxe_softc *sc)
10718 {
10719 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10720 }
10721
10722 /* close gates #2, #3 and #4 */
10723 static void
bxe_set_234_gates(struct bxe_softc * sc,uint8_t close)10724 bxe_set_234_gates(struct bxe_softc *sc,
10725 uint8_t close)
10726 {
10727 uint32_t val;
10728
10729 /* gates #2 and #4a are closed/opened for "not E1" only */
10730 if (!CHIP_IS_E1(sc)) {
10731 /* #4 */
10732 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10733 /* #2 */
10734 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10735 }
10736
10737 /* #3 */
10738 if (CHIP_IS_E1x(sc)) {
10739 /* prevent interrupts from HC on both ports */
10740 val = REG_RD(sc, HC_REG_CONFIG_1);
10741 REG_WR(sc, HC_REG_CONFIG_1,
10742 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10743 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10744
10745 val = REG_RD(sc, HC_REG_CONFIG_0);
10746 REG_WR(sc, HC_REG_CONFIG_0,
10747 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10748 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10749 } else {
10750 /* Prevent incoming interrupts in IGU */
10751 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10752
10753 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10754 (!close) ?
10755 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10756 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10757 }
10758
10759 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10760 close ? "closing" : "opening");
10761
10762 wmb();
10763 }
10764
10765 /* poll for pending writes bit, it should get cleared in no more than 1s */
10766 static int
bxe_er_poll_igu_vq(struct bxe_softc * sc)10767 bxe_er_poll_igu_vq(struct bxe_softc *sc)
10768 {
10769 uint32_t cnt = 1000;
10770 uint32_t pend_bits = 0;
10771
10772 do {
10773 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10774
10775 if (pend_bits == 0) {
10776 break;
10777 }
10778
10779 DELAY(1000);
10780 } while (--cnt > 0);
10781
10782 if (cnt == 0) {
10783 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10784 return (-1);
10785 }
10786
10787 return (0);
10788 }
10789
10790 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */
10791
10792 static void
bxe_clp_reset_prep(struct bxe_softc * sc,uint32_t * magic_val)10793 bxe_clp_reset_prep(struct bxe_softc *sc,
10794 uint32_t *magic_val)
10795 {
10796 /* Do some magic... */
10797 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10798 *magic_val = val & SHARED_MF_CLP_MAGIC;
10799 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10800 }
10801
10802 /* restore the value of the 'magic' bit */
10803 static void
bxe_clp_reset_done(struct bxe_softc * sc,uint32_t magic_val)10804 bxe_clp_reset_done(struct bxe_softc *sc,
10805 uint32_t magic_val)
10806 {
10807 /* Restore the 'magic' bit value... */
10808 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10809 MFCFG_WR(sc, shared_mf_config.clp_mb,
10810 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10811 }
10812
10813 /* prepare for MCP reset, takes care of CLP configurations */
10814 static void
bxe_reset_mcp_prep(struct bxe_softc * sc,uint32_t * magic_val)10815 bxe_reset_mcp_prep(struct bxe_softc *sc,
10816 uint32_t *magic_val)
10817 {
10818 uint32_t shmem;
10819 uint32_t validity_offset;
10820
10821 /* set `magic' bit in order to save MF config */
10822 if (!CHIP_IS_E1(sc)) {
10823 bxe_clp_reset_prep(sc, magic_val);
10824 }
10825
10826 /* get shmem offset */
10827 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10828 validity_offset =
10829 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10830
10831 /* Clear validity map flags */
10832 if (shmem > 0) {
10833 REG_WR(sc, shmem + validity_offset, 0);
10834 }
10835 }
10836
10837 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
10838 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
10839
10840 static void
bxe_mcp_wait_one(struct bxe_softc * sc)10841 bxe_mcp_wait_one(struct bxe_softc *sc)
10842 {
10843 /* special handling for emulation and FPGA (10 times longer) */
10844 if (CHIP_REV_IS_SLOW(sc)) {
10845 DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10846 } else {
10847 DELAY((MCP_ONE_TIMEOUT) * 1000);
10848 }
10849 }
10850
10851 /* initialize shmem_base and waits for validity signature to appear */
10852 static int
bxe_init_shmem(struct bxe_softc * sc)10853 bxe_init_shmem(struct bxe_softc *sc)
10854 {
10855 int cnt = 0;
10856 uint32_t val = 0;
10857
10858 do {
10859 sc->devinfo.shmem_base =
10860 sc->link_params.shmem_base =
10861 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10862
10863 if (sc->devinfo.shmem_base) {
10864 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10865 if (val & SHR_MEM_VALIDITY_MB)
10866 return (0);
10867 }
10868
10869 bxe_mcp_wait_one(sc);
10870
10871 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10872
10873 BLOGE(sc, "BAD MCP validity signature\n");
10874
10875 return (-1);
10876 }
10877
10878 static int
bxe_reset_mcp_comp(struct bxe_softc * sc,uint32_t magic_val)10879 bxe_reset_mcp_comp(struct bxe_softc *sc,
10880 uint32_t magic_val)
10881 {
10882 int rc = bxe_init_shmem(sc);
10883
10884 /* Restore the `magic' bit value */
10885 if (!CHIP_IS_E1(sc)) {
10886 bxe_clp_reset_done(sc, magic_val);
10887 }
10888
10889 return (rc);
10890 }
10891
10892 static void
bxe_pxp_prep(struct bxe_softc * sc)10893 bxe_pxp_prep(struct bxe_softc *sc)
10894 {
10895 if (!CHIP_IS_E1(sc)) {
10896 REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10897 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10898 wmb();
10899 }
10900 }
10901
10902 /*
10903 * Reset the whole chip except for:
10904 * - PCIE core
10905 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10906 * - IGU
10907 * - MISC (including AEU)
10908 * - GRC
10909 * - RBCN, RBCP
10910 */
10911 static void
bxe_process_kill_chip_reset(struct bxe_softc * sc,uint8_t global)10912 bxe_process_kill_chip_reset(struct bxe_softc *sc,
10913 uint8_t global)
10914 {
10915 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10916 uint32_t global_bits2, stay_reset2;
10917
10918 /*
10919 * Bits that have to be set in reset_mask2 if we want to reset 'global'
10920 * (per chip) blocks.
10921 */
10922 global_bits2 =
10923 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10924 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10925
10926 /*
10927 * Don't reset the following blocks.
10928 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10929 * reset, as in 4 port device they might still be owned
10930 * by the MCP (there is only one leader per path).
10931 */
10932 not_reset_mask1 =
10933 MISC_REGISTERS_RESET_REG_1_RST_HC |
10934 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10935 MISC_REGISTERS_RESET_REG_1_RST_PXP;
10936
10937 not_reset_mask2 =
10938 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10939 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10940 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10941 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10942 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10943 MISC_REGISTERS_RESET_REG_2_RST_GRC |
10944 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10945 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10946 MISC_REGISTERS_RESET_REG_2_RST_ATC |
10947 MISC_REGISTERS_RESET_REG_2_PGLC |
10948 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10949 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10950 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10951 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10952 MISC_REGISTERS_RESET_REG_2_UMAC0 |
10953 MISC_REGISTERS_RESET_REG_2_UMAC1;
10954
10955 /*
10956 * Keep the following blocks in reset:
10957 * - all xxMACs are handled by the elink code.
10958 */
10959 stay_reset2 =
10960 MISC_REGISTERS_RESET_REG_2_XMAC |
10961 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10962
10963 /* Full reset masks according to the chip */
10964 reset_mask1 = 0xffffffff;
10965
10966 if (CHIP_IS_E1(sc))
10967 reset_mask2 = 0xffff;
10968 else if (CHIP_IS_E1H(sc))
10969 reset_mask2 = 0x1ffff;
10970 else if (CHIP_IS_E2(sc))
10971 reset_mask2 = 0xfffff;
10972 else /* CHIP_IS_E3 */
10973 reset_mask2 = 0x3ffffff;
10974
10975 /* Don't reset global blocks unless we need to */
10976 if (!global)
10977 reset_mask2 &= ~global_bits2;
10978
10979 /*
10980 * In case of attention in the QM, we need to reset PXP
10981 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10982 * because otherwise QM reset would release 'close the gates' shortly
10983 * before resetting the PXP, then the PSWRQ would send a write
10984 * request to PGLUE. Then when PXP is reset, PGLUE would try to
10985 * read the payload data from PSWWR, but PSWWR would not
10986 * respond. The write queue in PGLUE would stuck, dmae commands
10987 * would not return. Therefore it's important to reset the second
10988 * reset register (containing the
10989 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10990 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10991 * bit).
10992 */
10993 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10994 reset_mask2 & (~not_reset_mask2));
10995
10996 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10997 reset_mask1 & (~not_reset_mask1));
10998
10999 mb();
11000 wmb();
11001
11002 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
11003 reset_mask2 & (~stay_reset2));
11004
11005 mb();
11006 wmb();
11007
11008 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
11009 wmb();
11010 }
11011
11012 static int
bxe_process_kill(struct bxe_softc * sc,uint8_t global)11013 bxe_process_kill(struct bxe_softc *sc,
11014 uint8_t global)
11015 {
11016 int cnt = 1000;
11017 uint32_t val = 0;
11018 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11019 uint32_t tags_63_32 = 0;
11020
11021 /* Empty the Tetris buffer, wait for 1s */
11022 do {
11023 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11024 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11025 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11026 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11027 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11028 if (CHIP_IS_E3(sc)) {
11029 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11030 }
11031
11032 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11033 ((port_is_idle_0 & 0x1) == 0x1) &&
11034 ((port_is_idle_1 & 0x1) == 0x1) &&
11035 (pgl_exp_rom2 == 0xffffffff) &&
11036 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11037 break;
11038 DELAY(1000);
11039 } while (cnt-- > 0);
11040
11041 if (cnt <= 0) {
11042 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11043 "are still outstanding read requests after 1s! "
11044 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11045 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11046 sr_cnt, blk_cnt, port_is_idle_0,
11047 port_is_idle_1, pgl_exp_rom2);
11048 return (-1);
11049 }
11050
11051 mb();
11052
11053 /* Close gates #2, #3 and #4 */
11054 bxe_set_234_gates(sc, TRUE);
11055
11056 /* Poll for IGU VQs for 57712 and newer chips */
11057 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11058 return (-1);
11059 }
11060
11061 /* XXX indicate that "process kill" is in progress to MCP */
11062
11063 /* clear "unprepared" bit */
11064 REG_WR(sc, MISC_REG_UNPREPARED, 0);
11065 mb();
11066
11067 /* Make sure all is written to the chip before the reset */
11068 wmb();
11069
11070 /*
11071 * Wait for 1ms to empty GLUE and PCI-E core queues,
11072 * PSWHST, GRC and PSWRD Tetris buffer.
11073 */
11074 DELAY(1000);
11075
11076 /* Prepare to chip reset: */
11077 /* MCP */
11078 if (global) {
11079 bxe_reset_mcp_prep(sc, &val);
11080 }
11081
11082 /* PXP */
11083 bxe_pxp_prep(sc);
11084 mb();
11085
11086 /* reset the chip */
11087 bxe_process_kill_chip_reset(sc, global);
11088 mb();
11089
11090 /* clear errors in PGB */
11091 if (!CHIP_IS_E1(sc))
11092 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11093
11094 /* Recover after reset: */
11095 /* MCP */
11096 if (global && bxe_reset_mcp_comp(sc, val)) {
11097 return (-1);
11098 }
11099
11100 /* XXX add resetting the NO_MCP mode DB here */
11101
11102 /* Open the gates #2, #3 and #4 */
11103 bxe_set_234_gates(sc, FALSE);
11104
11105 /* XXX
11106 * IGU/AEU preparation bring back the AEU/IGU to a reset state
11107 * re-enable attentions
11108 */
11109
11110 return (0);
11111 }
11112
11113 static int
bxe_leader_reset(struct bxe_softc * sc)11114 bxe_leader_reset(struct bxe_softc *sc)
11115 {
11116 int rc = 0;
11117 uint8_t global = bxe_reset_is_global(sc);
11118 uint32_t load_code;
11119
11120 /*
11121 * If not going to reset MCP, load "fake" driver to reset HW while
11122 * driver is owner of the HW.
11123 */
11124 if (!global && !BXE_NOMCP(sc)) {
11125 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11126 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11127 if (!load_code) {
11128 BLOGE(sc, "MCP response failure, aborting\n");
11129 rc = -1;
11130 goto exit_leader_reset;
11131 }
11132
11133 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11134 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11135 BLOGE(sc, "MCP unexpected response, aborting\n");
11136 rc = -1;
11137 goto exit_leader_reset2;
11138 }
11139
11140 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11141 if (!load_code) {
11142 BLOGE(sc, "MCP response failure, aborting\n");
11143 rc = -1;
11144 goto exit_leader_reset2;
11145 }
11146 }
11147
11148 /* try to recover after the failure */
11149 if (bxe_process_kill(sc, global)) {
11150 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11151 rc = -1;
11152 goto exit_leader_reset2;
11153 }
11154
11155 /*
11156 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11157 * state.
11158 */
11159 bxe_set_reset_done(sc);
11160 if (global) {
11161 bxe_clear_reset_global(sc);
11162 }
11163
11164 exit_leader_reset2:
11165
11166 /* unload "fake driver" if it was loaded */
11167 if (!global && !BXE_NOMCP(sc)) {
11168 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11169 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11170 }
11171
11172 exit_leader_reset:
11173
11174 sc->is_leader = 0;
11175 bxe_release_leader_lock(sc);
11176
11177 mb();
11178 return (rc);
11179 }
11180
11181 /*
11182 * prepare INIT transition, parameters configured:
11183 * - HC configuration
11184 * - Queue's CDU context
11185 */
11186 static void
bxe_pf_q_prep_init(struct bxe_softc * sc,struct bxe_fastpath * fp,struct ecore_queue_init_params * init_params)11187 bxe_pf_q_prep_init(struct bxe_softc *sc,
11188 struct bxe_fastpath *fp,
11189 struct ecore_queue_init_params *init_params)
11190 {
11191 uint8_t cos;
11192 int cxt_index, cxt_offset;
11193
11194 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11195 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11196
11197 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11198 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11199
11200 /* HC rate */
11201 init_params->rx.hc_rate =
11202 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11203 init_params->tx.hc_rate =
11204 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11205
11206 /* FW SB ID */
11207 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11208
11209 /* CQ index among the SB indices */
11210 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11211 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11212
11213 /* set maximum number of COSs supported by this queue */
11214 init_params->max_cos = sc->max_cos;
11215
11216 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11217 fp->index, init_params->max_cos);
11218
11219 /* set the context pointers queue object */
11220 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11221 /* XXX change index/cid here if ever support multiple tx CoS */
11222 /* fp->txdata[cos]->cid */
11223 cxt_index = fp->index / ILT_PAGE_CIDS;
11224 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11225 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11226 }
11227 }
11228
11229 /* set flags that are common for the Tx-only and not normal connections */
11230 static unsigned long
bxe_get_common_flags(struct bxe_softc * sc,struct bxe_fastpath * fp,uint8_t zero_stats)11231 bxe_get_common_flags(struct bxe_softc *sc,
11232 struct bxe_fastpath *fp,
11233 uint8_t zero_stats)
11234 {
11235 unsigned long flags = 0;
11236
11237 /* PF driver will always initialize the Queue to an ACTIVE state */
11238 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11239
11240 /*
11241 * tx only connections collect statistics (on the same index as the
11242 * parent connection). The statistics are zeroed when the parent
11243 * connection is initialized.
11244 */
11245
11246 bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11247 if (zero_stats) {
11248 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11249 }
11250
11251 /*
11252 * tx only connections can support tx-switching, though their
11253 * CoS-ness doesn't survive the loopback
11254 */
11255 if (sc->flags & BXE_TX_SWITCHING) {
11256 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11257 }
11258
11259 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11260
11261 return (flags);
11262 }
11263
11264 static unsigned long
bxe_get_q_flags(struct bxe_softc * sc,struct bxe_fastpath * fp,uint8_t leading)11265 bxe_get_q_flags(struct bxe_softc *sc,
11266 struct bxe_fastpath *fp,
11267 uint8_t leading)
11268 {
11269 unsigned long flags = 0;
11270
11271 if (IS_MF_SD(sc)) {
11272 bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11273 }
11274
11275 if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11276 bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11277 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11278 }
11279
11280 if (leading) {
11281 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11282 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11283 }
11284
11285 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11286
11287 /* merge with common flags */
11288 return (flags | bxe_get_common_flags(sc, fp, TRUE));
11289 }
11290
11291 static void
bxe_pf_q_prep_general(struct bxe_softc * sc,struct bxe_fastpath * fp,struct ecore_general_setup_params * gen_init,uint8_t cos)11292 bxe_pf_q_prep_general(struct bxe_softc *sc,
11293 struct bxe_fastpath *fp,
11294 struct ecore_general_setup_params *gen_init,
11295 uint8_t cos)
11296 {
11297 gen_init->stat_id = bxe_stats_id(fp);
11298 gen_init->spcl_id = fp->cl_id;
11299 gen_init->mtu = sc->mtu;
11300 gen_init->cos = cos;
11301 }
11302
11303 static void
bxe_pf_rx_q_prep(struct bxe_softc * sc,struct bxe_fastpath * fp,struct rxq_pause_params * pause,struct ecore_rxq_setup_params * rxq_init)11304 bxe_pf_rx_q_prep(struct bxe_softc *sc,
11305 struct bxe_fastpath *fp,
11306 struct rxq_pause_params *pause,
11307 struct ecore_rxq_setup_params *rxq_init)
11308 {
11309 uint8_t max_sge = 0;
11310 uint16_t sge_sz = 0;
11311 uint16_t tpa_agg_size = 0;
11312
11313 pause->sge_th_lo = SGE_TH_LO(sc);
11314 pause->sge_th_hi = SGE_TH_HI(sc);
11315
11316 /* validate SGE ring has enough to cross high threshold */
11317 if (sc->dropless_fc &&
11318 (pause->sge_th_hi + FW_PREFETCH_CNT) >
11319 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11320 BLOGW(sc, "sge ring threshold limit\n");
11321 }
11322
11323 /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11324 tpa_agg_size = (2 * sc->mtu);
11325 if (tpa_agg_size < sc->max_aggregation_size) {
11326 tpa_agg_size = sc->max_aggregation_size;
11327 }
11328
11329 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11330 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11331 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11332 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11333
11334 /* pause - not for e1 */
11335 if (!CHIP_IS_E1(sc)) {
11336 pause->bd_th_lo = BD_TH_LO(sc);
11337 pause->bd_th_hi = BD_TH_HI(sc);
11338
11339 pause->rcq_th_lo = RCQ_TH_LO(sc);
11340 pause->rcq_th_hi = RCQ_TH_HI(sc);
11341
11342 /* validate rings have enough entries to cross high thresholds */
11343 if (sc->dropless_fc &&
11344 pause->bd_th_hi + FW_PREFETCH_CNT >
11345 sc->rx_ring_size) {
11346 BLOGW(sc, "rx bd ring threshold limit\n");
11347 }
11348
11349 if (sc->dropless_fc &&
11350 pause->rcq_th_hi + FW_PREFETCH_CNT >
11351 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11352 BLOGW(sc, "rcq ring threshold limit\n");
11353 }
11354
11355 pause->pri_map = 1;
11356 }
11357
11358 /* rxq setup */
11359 rxq_init->dscr_map = fp->rx_dma.paddr;
11360 rxq_init->sge_map = fp->rx_sge_dma.paddr;
11361 rxq_init->rcq_map = fp->rcq_dma.paddr;
11362 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11363
11364 /*
11365 * This should be a maximum number of data bytes that may be
11366 * placed on the BD (not including paddings).
11367 */
11368 rxq_init->buf_sz = (fp->rx_buf_size -
11369 IP_HEADER_ALIGNMENT_PADDING);
11370
11371 rxq_init->cl_qzone_id = fp->cl_qzone_id;
11372 rxq_init->tpa_agg_sz = tpa_agg_size;
11373 rxq_init->sge_buf_sz = sge_sz;
11374 rxq_init->max_sges_pkt = max_sge;
11375 rxq_init->rss_engine_id = SC_FUNC(sc);
11376 rxq_init->mcast_engine_id = SC_FUNC(sc);
11377
11378 /*
11379 * Maximum number or simultaneous TPA aggregation for this Queue.
11380 * For PF Clients it should be the maximum available number.
11381 * VF driver(s) may want to define it to a smaller value.
11382 */
11383 rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11384
11385 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11386 rxq_init->fw_sb_id = fp->fw_sb_id;
11387
11388 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11389
11390 /*
11391 * configure silent vlan removal
11392 * if multi function mode is afex, then mask default vlan
11393 */
11394 if (IS_MF_AFEX(sc)) {
11395 rxq_init->silent_removal_value =
11396 sc->devinfo.mf_info.afex_def_vlan_tag;
11397 rxq_init->silent_removal_mask = EVL_VLID_MASK;
11398 }
11399 }
11400
11401 static void
bxe_pf_tx_q_prep(struct bxe_softc * sc,struct bxe_fastpath * fp,struct ecore_txq_setup_params * txq_init,uint8_t cos)11402 bxe_pf_tx_q_prep(struct bxe_softc *sc,
11403 struct bxe_fastpath *fp,
11404 struct ecore_txq_setup_params *txq_init,
11405 uint8_t cos)
11406 {
11407 /*
11408 * XXX If multiple CoS is ever supported then each fastpath structure
11409 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11410 * fp->txdata[cos]->tx_dma.paddr;
11411 */
11412 txq_init->dscr_map = fp->tx_dma.paddr;
11413 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11414 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11415 txq_init->fw_sb_id = fp->fw_sb_id;
11416
11417 /*
11418 * set the TSS leading client id for TX classfication to the
11419 * leading RSS client id
11420 */
11421 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11422 }
11423
11424 /*
11425 * This function performs 2 steps in a queue state machine:
11426 * 1) RESET->INIT
11427 * 2) INIT->SETUP
11428 */
11429 static int
bxe_setup_queue(struct bxe_softc * sc,struct bxe_fastpath * fp,uint8_t leading)11430 bxe_setup_queue(struct bxe_softc *sc,
11431 struct bxe_fastpath *fp,
11432 uint8_t leading)
11433 {
11434 struct ecore_queue_state_params q_params = { NULL };
11435 struct ecore_queue_setup_params *setup_params =
11436 &q_params.params.setup;
11437 int rc;
11438
11439 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11440
11441 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11442
11443 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11444
11445 /* we want to wait for completion in this context */
11446 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11447
11448 /* prepare the INIT parameters */
11449 bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11450
11451 /* Set the command */
11452 q_params.cmd = ECORE_Q_CMD_INIT;
11453
11454 /* Change the state to INIT */
11455 rc = ecore_queue_state_change(sc, &q_params);
11456 if (rc) {
11457 BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11458 return (rc);
11459 }
11460
11461 BLOGD(sc, DBG_LOAD, "init complete\n");
11462
11463 /* now move the Queue to the SETUP state */
11464 memset(setup_params, 0, sizeof(*setup_params));
11465
11466 /* set Queue flags */
11467 setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11468
11469 /* set general SETUP parameters */
11470 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11471 FIRST_TX_COS_INDEX);
11472
11473 bxe_pf_rx_q_prep(sc, fp,
11474 &setup_params->pause_params,
11475 &setup_params->rxq_params);
11476
11477 bxe_pf_tx_q_prep(sc, fp,
11478 &setup_params->txq_params,
11479 FIRST_TX_COS_INDEX);
11480
11481 /* Set the command */
11482 q_params.cmd = ECORE_Q_CMD_SETUP;
11483
11484 /* change the state to SETUP */
11485 rc = ecore_queue_state_change(sc, &q_params);
11486 if (rc) {
11487 BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11488 return (rc);
11489 }
11490
11491 return (rc);
11492 }
11493
11494 static int
bxe_setup_leading(struct bxe_softc * sc)11495 bxe_setup_leading(struct bxe_softc *sc)
11496 {
11497 return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11498 }
11499
11500 static int
bxe_config_rss_pf(struct bxe_softc * sc,struct ecore_rss_config_obj * rss_obj,uint8_t config_hash)11501 bxe_config_rss_pf(struct bxe_softc *sc,
11502 struct ecore_rss_config_obj *rss_obj,
11503 uint8_t config_hash)
11504 {
11505 struct ecore_config_rss_params params = { NULL };
11506 int i;
11507
11508 /*
11509 * Although RSS is meaningless when there is a single HW queue we
11510 * still need it enabled in order to have HW Rx hash generated.
11511 */
11512
11513 params.rss_obj = rss_obj;
11514
11515 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
11516
11517 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags);
11518
11519 /* RSS configuration */
11520 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags);
11521 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags);
11522 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags);
11523 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags);
11524 if (rss_obj->udp_rss_v4) {
11525 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags);
11526 }
11527 if (rss_obj->udp_rss_v6) {
11528 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags);
11529 }
11530
11531 /* Hash bits */
11532 params.rss_result_mask = MULTI_MASK;
11533
11534 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11535
11536 if (config_hash) {
11537 /* RSS keys */
11538 for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11539 params.rss_key[i] = arc4random();
11540 }
11541
11542 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags);
11543 }
11544
11545 return (ecore_config_rss(sc, ¶ms));
11546 }
11547
11548 static int
bxe_config_rss_eth(struct bxe_softc * sc,uint8_t config_hash)11549 bxe_config_rss_eth(struct bxe_softc *sc,
11550 uint8_t config_hash)
11551 {
11552 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11553 }
11554
11555 static int
bxe_init_rss_pf(struct bxe_softc * sc)11556 bxe_init_rss_pf(struct bxe_softc *sc)
11557 {
11558 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11559 int i;
11560
11561 /*
11562 * Prepare the initial contents of the indirection table if
11563 * RSS is enabled
11564 */
11565 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11566 sc->rss_conf_obj.ind_table[i] =
11567 (sc->fp->cl_id + (i % num_eth_queues));
11568 }
11569
11570 if (sc->udp_rss) {
11571 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11572 }
11573
11574 /*
11575 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11576 * per-port, so if explicit configuration is needed, do it only
11577 * for a PMF.
11578 *
11579 * For 57712 and newer it's a per-function configuration.
11580 */
11581 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11582 }
11583
11584 static int
bxe_set_mac_one(struct bxe_softc * sc,uint8_t * mac,struct ecore_vlan_mac_obj * obj,uint8_t set,int mac_type,unsigned long * ramrod_flags)11585 bxe_set_mac_one(struct bxe_softc *sc,
11586 uint8_t *mac,
11587 struct ecore_vlan_mac_obj *obj,
11588 uint8_t set,
11589 int mac_type,
11590 unsigned long *ramrod_flags)
11591 {
11592 struct ecore_vlan_mac_ramrod_params ramrod_param;
11593 int rc;
11594
11595 memset(&ramrod_param, 0, sizeof(ramrod_param));
11596
11597 /* fill in general parameters */
11598 ramrod_param.vlan_mac_obj = obj;
11599 ramrod_param.ramrod_flags = *ramrod_flags;
11600
11601 /* fill a user request section if needed */
11602 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11603 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11604
11605 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11606
11607 /* Set the command: ADD or DEL */
11608 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11609 ECORE_VLAN_MAC_DEL;
11610 }
11611
11612 rc = ecore_config_vlan_mac(sc, &ramrod_param);
11613
11614 if (rc == ECORE_EXISTS) {
11615 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11616 /* do not treat adding same MAC as error */
11617 rc = 0;
11618 } else if (rc < 0) {
11619 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11620 }
11621
11622 return (rc);
11623 }
11624
11625 static int
bxe_set_eth_mac(struct bxe_softc * sc,uint8_t set)11626 bxe_set_eth_mac(struct bxe_softc *sc,
11627 uint8_t set)
11628 {
11629 unsigned long ramrod_flags = 0;
11630
11631 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11632
11633 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11634
11635 /* Eth MAC is set on RSS leading client (fp[0]) */
11636 return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11637 &sc->sp_objs->mac_obj,
11638 set, ECORE_ETH_MAC, &ramrod_flags));
11639 }
11640
11641 static int
bxe_get_cur_phy_idx(struct bxe_softc * sc)11642 bxe_get_cur_phy_idx(struct bxe_softc *sc)
11643 {
11644 uint32_t sel_phy_idx = 0;
11645
11646 if (sc->link_params.num_phys <= 1) {
11647 return (ELINK_INT_PHY);
11648 }
11649
11650 if (sc->link_vars.link_up) {
11651 sel_phy_idx = ELINK_EXT_PHY1;
11652 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11653 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11654 (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11655 ELINK_SUPPORTED_FIBRE))
11656 sel_phy_idx = ELINK_EXT_PHY2;
11657 } else {
11658 switch (elink_phy_selection(&sc->link_params)) {
11659 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11660 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11661 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11662 sel_phy_idx = ELINK_EXT_PHY1;
11663 break;
11664 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11665 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11666 sel_phy_idx = ELINK_EXT_PHY2;
11667 break;
11668 }
11669 }
11670
11671 return (sel_phy_idx);
11672 }
11673
11674 static int
bxe_get_link_cfg_idx(struct bxe_softc * sc)11675 bxe_get_link_cfg_idx(struct bxe_softc *sc)
11676 {
11677 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11678
11679 /*
11680 * The selected activated PHY is always after swapping (in case PHY
11681 * swapping is enabled). So when swapping is enabled, we need to reverse
11682 * the configuration
11683 */
11684
11685 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11686 if (sel_phy_idx == ELINK_EXT_PHY1)
11687 sel_phy_idx = ELINK_EXT_PHY2;
11688 else if (sel_phy_idx == ELINK_EXT_PHY2)
11689 sel_phy_idx = ELINK_EXT_PHY1;
11690 }
11691
11692 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11693 }
11694
11695 static void
bxe_set_requested_fc(struct bxe_softc * sc)11696 bxe_set_requested_fc(struct bxe_softc *sc)
11697 {
11698 /*
11699 * Initialize link parameters structure variables
11700 * It is recommended to turn off RX FC for jumbo frames
11701 * for better performance
11702 */
11703 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11704 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11705 } else {
11706 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11707 }
11708 }
11709
11710 static void
bxe_calc_fc_adv(struct bxe_softc * sc)11711 bxe_calc_fc_adv(struct bxe_softc *sc)
11712 {
11713 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11714
11715
11716 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11717 ADVERTISED_Pause);
11718
11719 switch (sc->link_vars.ieee_fc &
11720 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11721
11722 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11723 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11724 ADVERTISED_Pause);
11725 break;
11726
11727 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11728 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11729 break;
11730
11731 default:
11732 break;
11733
11734 }
11735 }
11736
11737 static uint16_t
bxe_get_mf_speed(struct bxe_softc * sc)11738 bxe_get_mf_speed(struct bxe_softc *sc)
11739 {
11740 uint16_t line_speed = sc->link_vars.line_speed;
11741 if (IS_MF(sc)) {
11742 uint16_t maxCfg =
11743 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11744
11745 /* calculate the current MAX line speed limit for the MF devices */
11746 if (IS_MF_SI(sc)) {
11747 line_speed = (line_speed * maxCfg) / 100;
11748 } else { /* SD mode */
11749 uint16_t vn_max_rate = maxCfg * 100;
11750
11751 if (vn_max_rate < line_speed) {
11752 line_speed = vn_max_rate;
11753 }
11754 }
11755 }
11756
11757 return (line_speed);
11758 }
11759
11760 static void
bxe_fill_report_data(struct bxe_softc * sc,struct bxe_link_report_data * data)11761 bxe_fill_report_data(struct bxe_softc *sc,
11762 struct bxe_link_report_data *data)
11763 {
11764 uint16_t line_speed = bxe_get_mf_speed(sc);
11765
11766 memset(data, 0, sizeof(*data));
11767
11768 /* fill the report data with the effective line speed */
11769 data->line_speed = line_speed;
11770
11771 /* Link is down */
11772 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11773 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11774 }
11775
11776 /* Full DUPLEX */
11777 if (sc->link_vars.duplex == DUPLEX_FULL) {
11778 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11779 }
11780
11781 /* Rx Flow Control is ON */
11782 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11783 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11784 }
11785
11786 /* Tx Flow Control is ON */
11787 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11788 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11789 }
11790 }
11791
11792 /* report link status to OS, should be called under phy_lock */
11793 static void
bxe_link_report_locked(struct bxe_softc * sc)11794 bxe_link_report_locked(struct bxe_softc *sc)
11795 {
11796 struct bxe_link_report_data cur_data;
11797
11798 /* reread mf_cfg */
11799 if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11800 bxe_read_mf_cfg(sc);
11801 }
11802
11803 /* Read the current link report info */
11804 bxe_fill_report_data(sc, &cur_data);
11805
11806 /* Don't report link down or exactly the same link status twice */
11807 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11808 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11809 &sc->last_reported_link.link_report_flags) &&
11810 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11811 &cur_data.link_report_flags))) {
11812 return;
11813 }
11814
11815 ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11816 cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11817 sc->link_cnt++;
11818
11819 ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11820 /* report new link params and remember the state for the next time */
11821 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11822
11823 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11824 &cur_data.link_report_flags)) {
11825 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11826 } else {
11827 const char *duplex;
11828 const char *flow;
11829
11830 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11831 &cur_data.link_report_flags)) {
11832 duplex = "full";
11833 ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11834 } else {
11835 duplex = "half";
11836 ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11837 }
11838
11839 /*
11840 * Handle the FC at the end so that only these flags would be
11841 * possibly set. This way we may easily check if there is no FC
11842 * enabled.
11843 */
11844 if (cur_data.link_report_flags) {
11845 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11846 &cur_data.link_report_flags) &&
11847 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11848 &cur_data.link_report_flags)) {
11849 flow = "ON - receive & transmit";
11850 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11851 &cur_data.link_report_flags) &&
11852 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11853 &cur_data.link_report_flags)) {
11854 flow = "ON - receive";
11855 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11856 &cur_data.link_report_flags) &&
11857 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11858 &cur_data.link_report_flags)) {
11859 flow = "ON - transmit";
11860 } else {
11861 flow = "none"; /* possible? */
11862 }
11863 } else {
11864 flow = "none";
11865 }
11866
11867 if_link_state_change(sc->ifp, LINK_STATE_UP);
11868 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11869 cur_data.line_speed, duplex, flow);
11870 }
11871 }
11872
11873 static void
bxe_link_report(struct bxe_softc * sc)11874 bxe_link_report(struct bxe_softc *sc)
11875 {
11876 bxe_acquire_phy_lock(sc);
11877 bxe_link_report_locked(sc);
11878 bxe_release_phy_lock(sc);
11879 }
11880
11881 static void
bxe_link_status_update(struct bxe_softc * sc)11882 bxe_link_status_update(struct bxe_softc *sc)
11883 {
11884 if (sc->state != BXE_STATE_OPEN) {
11885 return;
11886 }
11887
11888 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11889 elink_link_status_update(&sc->link_params, &sc->link_vars);
11890 } else {
11891 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11892 ELINK_SUPPORTED_10baseT_Full |
11893 ELINK_SUPPORTED_100baseT_Half |
11894 ELINK_SUPPORTED_100baseT_Full |
11895 ELINK_SUPPORTED_1000baseT_Full |
11896 ELINK_SUPPORTED_2500baseX_Full |
11897 ELINK_SUPPORTED_10000baseT_Full |
11898 ELINK_SUPPORTED_TP |
11899 ELINK_SUPPORTED_FIBRE |
11900 ELINK_SUPPORTED_Autoneg |
11901 ELINK_SUPPORTED_Pause |
11902 ELINK_SUPPORTED_Asym_Pause);
11903 sc->port.advertising[0] = sc->port.supported[0];
11904
11905 sc->link_params.sc = sc;
11906 sc->link_params.port = SC_PORT(sc);
11907 sc->link_params.req_duplex[0] = DUPLEX_FULL;
11908 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE;
11909 sc->link_params.req_line_speed[0] = SPEED_10000;
11910 sc->link_params.speed_cap_mask[0] = 0x7f0000;
11911 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G;
11912
11913 if (CHIP_REV_IS_FPGA(sc)) {
11914 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC;
11915 sc->link_vars.line_speed = ELINK_SPEED_1000;
11916 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11917 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11918 } else {
11919 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC;
11920 sc->link_vars.line_speed = ELINK_SPEED_10000;
11921 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11922 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11923 }
11924
11925 sc->link_vars.link_up = 1;
11926
11927 sc->link_vars.duplex = DUPLEX_FULL;
11928 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11929
11930 if (IS_PF(sc)) {
11931 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11932 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11933 bxe_link_report(sc);
11934 }
11935 }
11936
11937 if (IS_PF(sc)) {
11938 if (sc->link_vars.link_up) {
11939 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11940 } else {
11941 bxe_stats_handle(sc, STATS_EVENT_STOP);
11942 }
11943 bxe_link_report(sc);
11944 } else {
11945 bxe_link_report(sc);
11946 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11947 }
11948 }
11949
11950 static int
bxe_initial_phy_init(struct bxe_softc * sc,int load_mode)11951 bxe_initial_phy_init(struct bxe_softc *sc,
11952 int load_mode)
11953 {
11954 int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11955 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11956 struct elink_params *lp = &sc->link_params;
11957
11958 bxe_set_requested_fc(sc);
11959
11960 if (CHIP_REV_IS_SLOW(sc)) {
11961 uint32_t bond = CHIP_BOND_ID(sc);
11962 uint32_t feat = 0;
11963
11964 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11965 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11966 } else if (bond & 0x4) {
11967 if (CHIP_IS_E3(sc)) {
11968 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11969 } else {
11970 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11971 }
11972 } else if (bond & 0x8) {
11973 if (CHIP_IS_E3(sc)) {
11974 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11975 } else {
11976 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11977 }
11978 }
11979
11980 /* disable EMAC for E3 and above */
11981 if (bond & 0x2) {
11982 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11983 }
11984
11985 sc->link_params.feature_config_flags |= feat;
11986 }
11987
11988 bxe_acquire_phy_lock(sc);
11989
11990 if (load_mode == LOAD_DIAG) {
11991 lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11992 /* Prefer doing PHY loopback at 10G speed, if possible */
11993 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11994 if (lp->speed_cap_mask[cfg_idx] &
11995 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11996 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11997 } else {
11998 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11999 }
12000 }
12001 }
12002
12003 if (load_mode == LOAD_LOOPBACK_EXT) {
12004 lp->loopback_mode = ELINK_LOOPBACK_EXT;
12005 }
12006
12007 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
12008
12009 bxe_release_phy_lock(sc);
12010
12011 bxe_calc_fc_adv(sc);
12012
12013 if (sc->link_vars.link_up) {
12014 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12015 bxe_link_report(sc);
12016 }
12017
12018 if (!CHIP_REV_IS_SLOW(sc)) {
12019 bxe_periodic_start(sc);
12020 }
12021
12022 sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12023 return (rc);
12024 }
12025
12026 static u_int
bxe_push_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)12027 bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12028 {
12029 struct ecore_mcast_list_elem *mc_mac = arg;
12030
12031 mc_mac += cnt;
12032 mc_mac->mac = (uint8_t *)LLADDR(sdl);
12033
12034 return (1);
12035 }
12036
12037 static int
bxe_init_mcast_macs_list(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p)12038 bxe_init_mcast_macs_list(struct bxe_softc *sc,
12039 struct ecore_mcast_ramrod_params *p)
12040 {
12041 if_t ifp = sc->ifp;
12042 int mc_count;
12043 struct ecore_mcast_list_elem *mc_mac;
12044
12045 ECORE_LIST_INIT(&p->mcast_list);
12046 p->mcast_list_len = 0;
12047
12048 /* XXXGL: multicast count may change later */
12049 mc_count = if_llmaddr_count(ifp);
12050
12051 if (!mc_count) {
12052 return (0);
12053 }
12054
12055 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12056 (M_NOWAIT | M_ZERO));
12057 if (!mc_mac) {
12058 BLOGE(sc, "Failed to allocate temp mcast list\n");
12059 return (-1);
12060 }
12061 bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12062 if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac);
12063
12064 for (int i = 0; i < mc_count; i ++) {
12065 ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list);
12066 BLOGD(sc, DBG_LOAD,
12067 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
12068 mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2],
12069 mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5],
12070 mc_count);
12071 }
12072
12073 p->mcast_list_len = mc_count;
12074
12075 return (0);
12076 }
12077
12078 static void
bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params * p)12079 bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12080 {
12081 struct ecore_mcast_list_elem *mc_mac =
12082 ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12083 struct ecore_mcast_list_elem,
12084 link);
12085
12086 if (mc_mac) {
12087 /* only a single free as all mc_macs are in the same heap array */
12088 free(mc_mac, M_DEVBUF);
12089 }
12090 }
12091 static int
bxe_set_mc_list(struct bxe_softc * sc)12092 bxe_set_mc_list(struct bxe_softc *sc)
12093 {
12094 struct ecore_mcast_ramrod_params rparam = { NULL };
12095 int rc = 0;
12096
12097 rparam.mcast_obj = &sc->mcast_obj;
12098
12099 BXE_MCAST_LOCK(sc);
12100
12101 /* first, clear all configured multicast MACs */
12102 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12103 if (rc < 0) {
12104 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12105 /* Manual backport parts of FreeBSD upstream r284470. */
12106 BXE_MCAST_UNLOCK(sc);
12107 return (rc);
12108 }
12109
12110 /* configure a new MACs list */
12111 rc = bxe_init_mcast_macs_list(sc, &rparam);
12112 if (rc) {
12113 BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12114 BXE_MCAST_UNLOCK(sc);
12115 return (rc);
12116 }
12117
12118 /* Now add the new MACs */
12119 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12120 if (rc < 0) {
12121 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12122 }
12123
12124 bxe_free_mcast_macs_list(&rparam);
12125
12126 BXE_MCAST_UNLOCK(sc);
12127
12128 return (rc);
12129 }
12130
12131 struct bxe_set_addr_ctx {
12132 struct bxe_softc *sc;
12133 unsigned long ramrod_flags;
12134 int rc;
12135 };
12136
12137 static u_int
bxe_set_addr(void * arg,struct sockaddr_dl * sdl,u_int cnt)12138 bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12139 {
12140 struct bxe_set_addr_ctx *ctx = arg;
12141 struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj;
12142 int rc;
12143
12144 if (ctx->rc < 0)
12145 return (0);
12146
12147 rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE,
12148 ECORE_UC_LIST_MAC, &ctx->ramrod_flags);
12149
12150 /* do not treat adding same MAC as an error */
12151 if (rc == -EEXIST)
12152 BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12153 else if (rc < 0) {
12154 BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc);
12155 ctx->rc = rc;
12156 }
12157
12158 return (1);
12159 }
12160
12161 static int
bxe_set_uc_list(struct bxe_softc * sc)12162 bxe_set_uc_list(struct bxe_softc *sc)
12163 {
12164 if_t ifp = sc->ifp;
12165 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12166 struct bxe_set_addr_ctx ctx = { sc, 0, 0 };
12167 int rc;
12168
12169 /* first schedule a cleanup up of old configuration */
12170 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12171 if (rc < 0) {
12172 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12173 return (rc);
12174 }
12175
12176 if_foreach_lladdr(ifp, bxe_set_addr, &ctx);
12177 if (ctx.rc < 0)
12178 return (ctx.rc);
12179
12180 /* Execute the pending commands */
12181 bit_set(&ctx.ramrod_flags, RAMROD_CONT);
12182 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12183 ECORE_UC_LIST_MAC, &ctx.ramrod_flags));
12184 }
12185
12186 static void
bxe_set_rx_mode(struct bxe_softc * sc)12187 bxe_set_rx_mode(struct bxe_softc *sc)
12188 {
12189 if_t ifp = sc->ifp;
12190 uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12191
12192 if (sc->state != BXE_STATE_OPEN) {
12193 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12194 return;
12195 }
12196
12197 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12198
12199 if (if_getflags(ifp) & IFF_PROMISC) {
12200 rx_mode = BXE_RX_MODE_PROMISC;
12201 } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12202 ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12203 CHIP_IS_E1(sc))) {
12204 rx_mode = BXE_RX_MODE_ALLMULTI;
12205 } else {
12206 if (IS_PF(sc)) {
12207 /* some multicasts */
12208 if (bxe_set_mc_list(sc) < 0) {
12209 rx_mode = BXE_RX_MODE_ALLMULTI;
12210 }
12211 if (bxe_set_uc_list(sc) < 0) {
12212 rx_mode = BXE_RX_MODE_PROMISC;
12213 }
12214 }
12215 }
12216
12217 sc->rx_mode = rx_mode;
12218
12219 /* schedule the rx_mode command */
12220 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12221 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12222 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12223 return;
12224 }
12225
12226 if (IS_PF(sc)) {
12227 bxe_set_storm_rx_mode(sc);
12228 }
12229 }
12230
12231
12232 /* update flags in shmem */
12233 static void
bxe_update_drv_flags(struct bxe_softc * sc,uint32_t flags,uint32_t set)12234 bxe_update_drv_flags(struct bxe_softc *sc,
12235 uint32_t flags,
12236 uint32_t set)
12237 {
12238 uint32_t drv_flags;
12239
12240 if (SHMEM2_HAS(sc, drv_flags)) {
12241 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12242 drv_flags = SHMEM2_RD(sc, drv_flags);
12243
12244 if (set) {
12245 SET_FLAGS(drv_flags, flags);
12246 } else {
12247 RESET_FLAGS(drv_flags, flags);
12248 }
12249
12250 SHMEM2_WR(sc, drv_flags, drv_flags);
12251 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12252
12253 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12254 }
12255 }
12256
12257 /* periodic timer callout routine, only runs when the interface is up */
12258
12259 static void
bxe_periodic_callout_func(void * xsc)12260 bxe_periodic_callout_func(void *xsc)
12261 {
12262 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12263 int i;
12264
12265 if (!BXE_CORE_TRYLOCK(sc)) {
12266 /* just bail and try again next time */
12267
12268 if ((sc->state == BXE_STATE_OPEN) &&
12269 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12270 /* schedule the next periodic callout */
12271 callout_reset(&sc->periodic_callout, hz,
12272 bxe_periodic_callout_func, sc);
12273 }
12274
12275 return;
12276 }
12277
12278 if ((sc->state != BXE_STATE_OPEN) ||
12279 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12280 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12281 BXE_CORE_UNLOCK(sc);
12282 return;
12283 }
12284
12285
12286 /* Check for TX timeouts on any fastpath. */
12287 FOR_EACH_QUEUE(sc, i) {
12288 if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12289 /* Ruh-Roh, chip was reset! */
12290 break;
12291 }
12292 }
12293
12294 if (!CHIP_REV_IS_SLOW(sc)) {
12295 /*
12296 * This barrier is needed to ensure the ordering between the writing
12297 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12298 * the reading here.
12299 */
12300 mb();
12301 if (sc->port.pmf) {
12302 bxe_acquire_phy_lock(sc);
12303 elink_period_func(&sc->link_params, &sc->link_vars);
12304 bxe_release_phy_lock(sc);
12305 }
12306 }
12307
12308 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12309 int mb_idx = SC_FW_MB_IDX(sc);
12310 uint32_t drv_pulse;
12311 uint32_t mcp_pulse;
12312
12313 ++sc->fw_drv_pulse_wr_seq;
12314 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12315
12316 drv_pulse = sc->fw_drv_pulse_wr_seq;
12317 bxe_drv_pulse(sc);
12318
12319 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12320 MCP_PULSE_SEQ_MASK);
12321
12322 /*
12323 * The delta between driver pulse and mcp response should
12324 * be 1 (before mcp response) or 0 (after mcp response).
12325 */
12326 if ((drv_pulse != mcp_pulse) &&
12327 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12328 /* someone lost a heartbeat... */
12329 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12330 drv_pulse, mcp_pulse);
12331 }
12332 }
12333
12334 /* state is BXE_STATE_OPEN */
12335 bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12336
12337 BXE_CORE_UNLOCK(sc);
12338
12339 if ((sc->state == BXE_STATE_OPEN) &&
12340 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12341 /* schedule the next periodic callout */
12342 callout_reset(&sc->periodic_callout, hz,
12343 bxe_periodic_callout_func, sc);
12344 }
12345 }
12346
12347 static void
bxe_periodic_start(struct bxe_softc * sc)12348 bxe_periodic_start(struct bxe_softc *sc)
12349 {
12350 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12351 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12352 }
12353
12354 static void
bxe_periodic_stop(struct bxe_softc * sc)12355 bxe_periodic_stop(struct bxe_softc *sc)
12356 {
12357 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12358 callout_drain(&sc->periodic_callout);
12359 }
12360
12361 void
bxe_parity_recover(struct bxe_softc * sc)12362 bxe_parity_recover(struct bxe_softc *sc)
12363 {
12364 uint8_t global = FALSE;
12365 uint32_t error_recovered, error_unrecovered;
12366 bool is_parity;
12367
12368
12369 if ((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12370 (sc->state == BXE_STATE_ERROR)) {
12371 BLOGE(sc, "RECOVERY failed, "
12372 "stack notified driver is NOT running! "
12373 "Please reboot/power cycle the system.\n");
12374 return;
12375 }
12376
12377 while (1) {
12378 BLOGD(sc, DBG_SP,
12379 "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n",
12380 __func__, sc, sc->state, sc->recovery_state, sc->error_status);
12381
12382 switch(sc->recovery_state) {
12383
12384 case BXE_RECOVERY_INIT:
12385 is_parity = bxe_chk_parity_attn(sc, &global, FALSE);
12386
12387 if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ||
12388 (sc->error_status & BXE_ERR_MCP_ASSERT) ||
12389 (sc->error_status & BXE_ERR_GLOBAL)) {
12390
12391 BXE_CORE_LOCK(sc);
12392 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12393 bxe_periodic_stop(sc);
12394 }
12395 bxe_nic_unload(sc, UNLOAD_RECOVERY, false);
12396 sc->state = BXE_STATE_ERROR;
12397 sc->recovery_state = BXE_RECOVERY_FAILED;
12398 BLOGE(sc, " No Recovery tried for error 0x%x"
12399 " stack notified driver is NOT running!"
12400 " Please reboot/power cycle the system.\n",
12401 sc->error_status);
12402 BXE_CORE_UNLOCK(sc);
12403 return;
12404 }
12405
12406
12407 /* Try to get a LEADER_LOCK HW lock */
12408 if (bxe_trylock_leader_lock(sc)) {
12409
12410 bxe_set_reset_in_progress(sc);
12411 /*
12412 * Check if there is a global attention and if
12413 * there was a global attention, set the global
12414 * reset bit.
12415 */
12416 if (global) {
12417 bxe_set_reset_global(sc);
12418 }
12419 sc->is_leader = 1;
12420 }
12421
12422 /* If interface has been removed - break */
12423
12424 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12425 bxe_periodic_stop(sc);
12426 }
12427
12428 BXE_CORE_LOCK(sc);
12429 bxe_nic_unload(sc,UNLOAD_RECOVERY, false);
12430 sc->recovery_state = BXE_RECOVERY_WAIT;
12431 BXE_CORE_UNLOCK(sc);
12432
12433 /*
12434 * Ensure "is_leader", MCP command sequence and
12435 * "recovery_state" update values are seen on other
12436 * CPUs.
12437 */
12438 mb();
12439 break;
12440 case BXE_RECOVERY_WAIT:
12441
12442 if (sc->is_leader) {
12443 int other_engine = SC_PATH(sc) ? 0 : 1;
12444 bool other_load_status =
12445 bxe_get_load_status(sc, other_engine);
12446 bool load_status =
12447 bxe_get_load_status(sc, SC_PATH(sc));
12448 global = bxe_reset_is_global(sc);
12449
12450 /*
12451 * In case of a parity in a global block, let
12452 * the first leader that performs a
12453 * leader_reset() reset the global blocks in
12454 * order to clear global attentions. Otherwise
12455 * the gates will remain closed for that
12456 * engine.
12457 */
12458 if (load_status ||
12459 (global && other_load_status)) {
12460 /*
12461 * Wait until all other functions get
12462 * down.
12463 */
12464 taskqueue_enqueue_timeout(taskqueue_thread,
12465 &sc->sp_err_timeout_task, hz/10);
12466 return;
12467 } else {
12468 /*
12469 * If all other functions got down
12470 * try to bring the chip back to
12471 * normal. In any case it's an exit
12472 * point for a leader.
12473 */
12474 if (bxe_leader_reset(sc)) {
12475 BLOGE(sc, "RECOVERY failed, "
12476 "stack notified driver is NOT running!\n");
12477 sc->recovery_state = BXE_RECOVERY_FAILED;
12478 sc->state = BXE_STATE_ERROR;
12479 mb();
12480 return;
12481 }
12482
12483 /*
12484 * If we are here, means that the
12485 * leader has succeeded and doesn't
12486 * want to be a leader any more. Try
12487 * to continue as a none-leader.
12488 */
12489 break;
12490 }
12491
12492 } else { /* non-leader */
12493 if (!bxe_reset_is_done(sc, SC_PATH(sc))) {
12494 /*
12495 * Try to get a LEADER_LOCK HW lock as
12496 * long as a former leader may have
12497 * been unloaded by the user or
12498 * released a leadership by another
12499 * reason.
12500 */
12501 if (bxe_trylock_leader_lock(sc)) {
12502 /*
12503 * I'm a leader now! Restart a
12504 * switch case.
12505 */
12506 sc->is_leader = 1;
12507 break;
12508 }
12509
12510 taskqueue_enqueue_timeout(taskqueue_thread,
12511 &sc->sp_err_timeout_task, hz/10);
12512 return;
12513
12514 } else {
12515 /*
12516 * If there was a global attention, wait
12517 * for it to be cleared.
12518 */
12519 if (bxe_reset_is_global(sc)) {
12520 taskqueue_enqueue_timeout(taskqueue_thread,
12521 &sc->sp_err_timeout_task, hz/10);
12522 return;
12523 }
12524
12525 error_recovered =
12526 sc->eth_stats.recoverable_error;
12527 error_unrecovered =
12528 sc->eth_stats.unrecoverable_error;
12529 BXE_CORE_LOCK(sc);
12530 sc->recovery_state =
12531 BXE_RECOVERY_NIC_LOADING;
12532 if (bxe_nic_load(sc, LOAD_NORMAL)) {
12533 error_unrecovered++;
12534 sc->recovery_state = BXE_RECOVERY_FAILED;
12535 sc->state = BXE_STATE_ERROR;
12536 BLOGE(sc, "Recovery is NOT successfull, "
12537 " state=0x%x recovery_state=0x%x error=%x\n",
12538 sc->state, sc->recovery_state, sc->error_status);
12539 sc->error_status = 0;
12540 } else {
12541 sc->recovery_state =
12542 BXE_RECOVERY_DONE;
12543 error_recovered++;
12544 BLOGI(sc, "Recovery is successfull from errors %x,"
12545 " state=0x%x"
12546 " recovery_state=0x%x \n", sc->error_status,
12547 sc->state, sc->recovery_state);
12548 mb();
12549 }
12550 sc->error_status = 0;
12551 BXE_CORE_UNLOCK(sc);
12552 sc->eth_stats.recoverable_error =
12553 error_recovered;
12554 sc->eth_stats.unrecoverable_error =
12555 error_unrecovered;
12556
12557 return;
12558 }
12559 }
12560 default:
12561 return;
12562 }
12563 }
12564 }
12565 void
bxe_handle_error(struct bxe_softc * sc)12566 bxe_handle_error(struct bxe_softc * sc)
12567 {
12568
12569 if(sc->recovery_state == BXE_RECOVERY_WAIT) {
12570 return;
12571 }
12572 if(sc->error_status) {
12573 if (sc->state == BXE_STATE_OPEN) {
12574 bxe_int_disable(sc);
12575 }
12576 if (sc->link_vars.link_up) {
12577 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
12578 }
12579 sc->recovery_state = BXE_RECOVERY_INIT;
12580 BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n",
12581 sc->unit, sc->error_status, sc->recovery_state);
12582 bxe_parity_recover(sc);
12583 }
12584 }
12585
12586 static void
bxe_sp_err_timeout_task(void * arg,int pending)12587 bxe_sp_err_timeout_task(void *arg, int pending)
12588 {
12589
12590 struct bxe_softc *sc = (struct bxe_softc *)arg;
12591
12592 BLOGD(sc, DBG_SP,
12593 "%s state = 0x%x rec state=0x%x error_status=%x\n",
12594 __func__, sc->state, sc->recovery_state, sc->error_status);
12595
12596 if((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12597 (sc->state == BXE_STATE_ERROR)) {
12598 return;
12599 }
12600 /* if can be taken */
12601 if ((sc->error_status) && (sc->trigger_grcdump)) {
12602 bxe_grc_dump(sc);
12603 }
12604 if (sc->recovery_state != BXE_RECOVERY_DONE) {
12605 bxe_handle_error(sc);
12606 bxe_parity_recover(sc);
12607 } else if (sc->error_status) {
12608 bxe_handle_error(sc);
12609 }
12610
12611 return;
12612 }
12613
12614 /* start the controller */
12615 static __noinline int
bxe_nic_load(struct bxe_softc * sc,int load_mode)12616 bxe_nic_load(struct bxe_softc *sc,
12617 int load_mode)
12618 {
12619 uint32_t val;
12620 int load_code = 0;
12621 int i, rc = 0;
12622
12623 BXE_CORE_LOCK_ASSERT(sc);
12624
12625 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12626
12627 sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12628
12629 if (IS_PF(sc)) {
12630 /* must be called before memory allocation and HW init */
12631 bxe_ilt_set_info(sc);
12632 }
12633
12634 sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12635
12636 bxe_set_fp_rx_buf_size(sc);
12637
12638 if (bxe_alloc_fp_buffers(sc) != 0) {
12639 BLOGE(sc, "Failed to allocate fastpath memory\n");
12640 sc->state = BXE_STATE_CLOSED;
12641 rc = ENOMEM;
12642 goto bxe_nic_load_error0;
12643 }
12644
12645 if (bxe_alloc_mem(sc) != 0) {
12646 sc->state = BXE_STATE_CLOSED;
12647 rc = ENOMEM;
12648 goto bxe_nic_load_error0;
12649 }
12650
12651 if (bxe_alloc_fw_stats_mem(sc) != 0) {
12652 sc->state = BXE_STATE_CLOSED;
12653 rc = ENOMEM;
12654 goto bxe_nic_load_error0;
12655 }
12656
12657 if (IS_PF(sc)) {
12658 /* set pf load just before approaching the MCP */
12659 bxe_set_pf_load(sc);
12660
12661 /* if MCP exists send load request and analyze response */
12662 if (!BXE_NOMCP(sc)) {
12663 /* attempt to load pf */
12664 if (bxe_nic_load_request(sc, &load_code) != 0) {
12665 sc->state = BXE_STATE_CLOSED;
12666 rc = ENXIO;
12667 goto bxe_nic_load_error1;
12668 }
12669
12670 /* what did the MCP say? */
12671 if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12672 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12673 sc->state = BXE_STATE_CLOSED;
12674 rc = ENXIO;
12675 goto bxe_nic_load_error2;
12676 }
12677 } else {
12678 BLOGI(sc, "Device has no MCP!\n");
12679 load_code = bxe_nic_load_no_mcp(sc);
12680 }
12681
12682 /* mark PMF if applicable */
12683 bxe_nic_load_pmf(sc, load_code);
12684
12685 /* Init Function state controlling object */
12686 bxe_init_func_obj(sc);
12687
12688 /* Initialize HW */
12689 if (bxe_init_hw(sc, load_code) != 0) {
12690 BLOGE(sc, "HW init failed\n");
12691 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12692 sc->state = BXE_STATE_CLOSED;
12693 rc = ENXIO;
12694 goto bxe_nic_load_error2;
12695 }
12696 }
12697
12698 /* set ALWAYS_ALIVE bit in shmem */
12699 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12700 bxe_drv_pulse(sc);
12701 sc->flags |= BXE_NO_PULSE;
12702
12703 /* attach interrupts */
12704 if (bxe_interrupt_attach(sc) != 0) {
12705 sc->state = BXE_STATE_CLOSED;
12706 rc = ENXIO;
12707 goto bxe_nic_load_error2;
12708 }
12709
12710 bxe_nic_init(sc, load_code);
12711
12712 /* Init per-function objects */
12713 if (IS_PF(sc)) {
12714 bxe_init_objs(sc);
12715 // XXX bxe_iov_nic_init(sc);
12716
12717 /* set AFEX default VLAN tag to an invalid value */
12718 sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12719 // XXX bxe_nic_load_afex_dcc(sc, load_code);
12720
12721 sc->state = BXE_STATE_OPENING_WAITING_PORT;
12722 rc = bxe_func_start(sc);
12723 if (rc) {
12724 BLOGE(sc, "Function start failed! rc = %d\n", rc);
12725 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12726 sc->state = BXE_STATE_ERROR;
12727 goto bxe_nic_load_error3;
12728 }
12729
12730 /* send LOAD_DONE command to MCP */
12731 if (!BXE_NOMCP(sc)) {
12732 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12733 if (!load_code) {
12734 BLOGE(sc, "MCP response failure, aborting\n");
12735 sc->state = BXE_STATE_ERROR;
12736 rc = ENXIO;
12737 goto bxe_nic_load_error3;
12738 }
12739 }
12740
12741 rc = bxe_setup_leading(sc);
12742 if (rc) {
12743 BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12744 sc->state = BXE_STATE_ERROR;
12745 goto bxe_nic_load_error3;
12746 }
12747
12748 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12749 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12750 if (rc) {
12751 BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12752 sc->state = BXE_STATE_ERROR;
12753 goto bxe_nic_load_error3;
12754 }
12755 }
12756
12757 rc = bxe_init_rss_pf(sc);
12758 if (rc) {
12759 BLOGE(sc, "PF RSS init failed\n");
12760 sc->state = BXE_STATE_ERROR;
12761 goto bxe_nic_load_error3;
12762 }
12763 }
12764 /* XXX VF */
12765
12766 /* now when Clients are configured we are ready to work */
12767 sc->state = BXE_STATE_OPEN;
12768
12769 /* Configure a ucast MAC */
12770 if (IS_PF(sc)) {
12771 rc = bxe_set_eth_mac(sc, TRUE);
12772 }
12773 if (rc) {
12774 BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12775 sc->state = BXE_STATE_ERROR;
12776 goto bxe_nic_load_error3;
12777 }
12778
12779 if (sc->port.pmf) {
12780 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12781 if (rc) {
12782 sc->state = BXE_STATE_ERROR;
12783 goto bxe_nic_load_error3;
12784 }
12785 }
12786
12787 sc->link_params.feature_config_flags &=
12788 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12789
12790 /* start fast path */
12791
12792 /* Initialize Rx filter */
12793 bxe_set_rx_mode(sc);
12794
12795 /* start the Tx */
12796 switch (/* XXX load_mode */LOAD_OPEN) {
12797 case LOAD_NORMAL:
12798 case LOAD_OPEN:
12799 break;
12800
12801 case LOAD_DIAG:
12802 case LOAD_LOOPBACK_EXT:
12803 sc->state = BXE_STATE_DIAG;
12804 break;
12805
12806 default:
12807 break;
12808 }
12809
12810 if (sc->port.pmf) {
12811 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12812 } else {
12813 bxe_link_status_update(sc);
12814 }
12815
12816 /* start the periodic timer callout */
12817 bxe_periodic_start(sc);
12818
12819 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12820 /* mark driver is loaded in shmem2 */
12821 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12822 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12823 (val |
12824 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12825 DRV_FLAGS_CAPABILITIES_LOADED_L2));
12826 }
12827
12828 /* wait for all pending SP commands to complete */
12829 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12830 BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12831 bxe_periodic_stop(sc);
12832 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12833 return (ENXIO);
12834 }
12835
12836 /* Tell the stack the driver is running! */
12837 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12838
12839 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12840
12841 return (0);
12842
12843 bxe_nic_load_error3:
12844
12845 if (IS_PF(sc)) {
12846 bxe_int_disable_sync(sc, 1);
12847
12848 /* clean out queued objects */
12849 bxe_squeeze_objects(sc);
12850 }
12851
12852 bxe_interrupt_detach(sc);
12853
12854 bxe_nic_load_error2:
12855
12856 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12857 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12858 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12859 }
12860
12861 sc->port.pmf = 0;
12862
12863 bxe_nic_load_error1:
12864
12865 /* clear pf_load status, as it was already set */
12866 if (IS_PF(sc)) {
12867 bxe_clear_pf_load(sc);
12868 }
12869
12870 bxe_nic_load_error0:
12871
12872 bxe_free_fw_stats_mem(sc);
12873 bxe_free_fp_buffers(sc);
12874 bxe_free_mem(sc);
12875
12876 return (rc);
12877 }
12878
12879 static int
bxe_init_locked(struct bxe_softc * sc)12880 bxe_init_locked(struct bxe_softc *sc)
12881 {
12882 int other_engine = SC_PATH(sc) ? 0 : 1;
12883 uint8_t other_load_status, load_status;
12884 uint8_t global = FALSE;
12885 int rc;
12886
12887 BXE_CORE_LOCK_ASSERT(sc);
12888
12889 /* check if the driver is already running */
12890 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12891 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12892 return (0);
12893 }
12894
12895 if((sc->state == BXE_STATE_ERROR) &&
12896 (sc->recovery_state == BXE_RECOVERY_FAILED)) {
12897 BLOGE(sc, "Initialization not done, "
12898 "as previous recovery failed."
12899 "Reboot/Power-cycle the system\n" );
12900 return (ENXIO);
12901 }
12902
12903
12904 bxe_set_power_state(sc, PCI_PM_D0);
12905
12906 /*
12907 * If parity occurred during the unload, then attentions and/or
12908 * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12909 * loaded on the current engine to complete the recovery. Parity recovery
12910 * is only relevant for PF driver.
12911 */
12912 if (IS_PF(sc)) {
12913 other_load_status = bxe_get_load_status(sc, other_engine);
12914 load_status = bxe_get_load_status(sc, SC_PATH(sc));
12915
12916 if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12917 bxe_chk_parity_attn(sc, &global, TRUE)) {
12918 do {
12919 /*
12920 * If there are attentions and they are in global blocks, set
12921 * the GLOBAL_RESET bit regardless whether it will be this
12922 * function that will complete the recovery or not.
12923 */
12924 if (global) {
12925 bxe_set_reset_global(sc);
12926 }
12927
12928 /*
12929 * Only the first function on the current engine should try
12930 * to recover in open. In case of attentions in global blocks
12931 * only the first in the chip should try to recover.
12932 */
12933 if ((!load_status && (!global || !other_load_status)) &&
12934 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12935 BLOGI(sc, "Recovered during init\n");
12936 break;
12937 }
12938
12939 /* recovery has failed... */
12940 bxe_set_power_state(sc, PCI_PM_D3hot);
12941 sc->recovery_state = BXE_RECOVERY_FAILED;
12942
12943 BLOGE(sc, "Recovery flow hasn't properly "
12944 "completed yet, try again later. "
12945 "If you still see this message after a "
12946 "few retries then power cycle is required.\n");
12947
12948 rc = ENXIO;
12949 goto bxe_init_locked_done;
12950 } while (0);
12951 }
12952 }
12953
12954 sc->recovery_state = BXE_RECOVERY_DONE;
12955
12956 rc = bxe_nic_load(sc, LOAD_OPEN);
12957
12958 bxe_init_locked_done:
12959
12960 if (rc) {
12961 /* Tell the stack the driver is NOT running! */
12962 BLOGE(sc, "Initialization failed, "
12963 "stack notified driver is NOT running!\n");
12964 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12965 }
12966
12967 return (rc);
12968 }
12969
12970 static int
bxe_stop_locked(struct bxe_softc * sc)12971 bxe_stop_locked(struct bxe_softc *sc)
12972 {
12973 BXE_CORE_LOCK_ASSERT(sc);
12974 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12975 }
12976
12977 /*
12978 * Handles controller initialization when called from an unlocked routine.
12979 * ifconfig calls this function.
12980 *
12981 * Returns:
12982 * void
12983 */
12984 static void
bxe_init(void * xsc)12985 bxe_init(void *xsc)
12986 {
12987 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12988
12989 BXE_CORE_LOCK(sc);
12990 bxe_init_locked(sc);
12991 BXE_CORE_UNLOCK(sc);
12992 }
12993
12994 static int
bxe_init_ifnet(struct bxe_softc * sc)12995 bxe_init_ifnet(struct bxe_softc *sc)
12996 {
12997 if_t ifp;
12998 int capabilities;
12999
13000 /* ifconfig entrypoint for media type/status reporting */
13001 ifmedia_init(&sc->ifmedia, IFM_IMASK,
13002 bxe_ifmedia_update,
13003 bxe_ifmedia_status);
13004
13005 /* set the default interface values */
13006 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
13007 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
13008 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
13009
13010 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
13011 BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
13012
13013 /* allocate the ifnet structure */
13014 if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
13015 BLOGE(sc, "Interface allocation failed!\n");
13016 return (ENXIO);
13017 }
13018
13019 if_setsoftc(ifp, sc);
13020 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
13021 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
13022 if_setioctlfn(ifp, bxe_ioctl);
13023 if_setstartfn(ifp, bxe_tx_start);
13024 if_setgetcounterfn(ifp, bxe_get_counter);
13025 if_settransmitfn(ifp, bxe_tx_mq_start);
13026 if_setqflushfn(ifp, bxe_mq_flush);
13027 if_setinitfn(ifp, bxe_init);
13028 if_setmtu(ifp, sc->mtu);
13029 if_sethwassist(ifp, (CSUM_IP |
13030 CSUM_TCP |
13031 CSUM_UDP |
13032 CSUM_TSO |
13033 CSUM_TCP_IPV6 |
13034 CSUM_UDP_IPV6));
13035
13036 capabilities =
13037 (IFCAP_VLAN_MTU |
13038 IFCAP_VLAN_HWTAGGING |
13039 IFCAP_VLAN_HWTSO |
13040 IFCAP_VLAN_HWFILTER |
13041 IFCAP_VLAN_HWCSUM |
13042 IFCAP_HWCSUM |
13043 IFCAP_JUMBO_MTU |
13044 IFCAP_LRO |
13045 IFCAP_TSO4 |
13046 IFCAP_TSO6 |
13047 IFCAP_WOL_MAGIC);
13048 if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
13049 if_setcapenable(ifp, if_getcapabilities(ifp));
13050 if_setbaudrate(ifp, IF_Gbps(10));
13051 /* XXX */
13052 if_setsendqlen(ifp, sc->tx_ring_size);
13053 if_setsendqready(ifp);
13054 /* XXX */
13055
13056 sc->ifp = ifp;
13057
13058 /* attach to the Ethernet interface list */
13059 ether_ifattach(ifp, sc->link_params.mac_addr);
13060
13061 /* Attach driver debugnet methods. */
13062 DEBUGNET_SET(ifp, bxe);
13063
13064 return (0);
13065 }
13066
13067 static void
bxe_deallocate_bars(struct bxe_softc * sc)13068 bxe_deallocate_bars(struct bxe_softc *sc)
13069 {
13070 int i;
13071
13072 for (i = 0; i < MAX_BARS; i++) {
13073 if (sc->bar[i].resource != NULL) {
13074 bus_release_resource(sc->dev,
13075 SYS_RES_MEMORY,
13076 sc->bar[i].rid,
13077 sc->bar[i].resource);
13078 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13079 i, PCIR_BAR(i));
13080 }
13081 }
13082 }
13083
13084 static int
bxe_allocate_bars(struct bxe_softc * sc)13085 bxe_allocate_bars(struct bxe_softc *sc)
13086 {
13087 u_int flags;
13088 int i;
13089
13090 memset(sc->bar, 0, sizeof(sc->bar));
13091
13092 for (i = 0; i < MAX_BARS; i++) {
13093
13094 /* memory resources reside at BARs 0, 2, 4 */
13095 /* Run `pciconf -lb` to see mappings */
13096 if ((i != 0) && (i != 2) && (i != 4)) {
13097 continue;
13098 }
13099
13100 sc->bar[i].rid = PCIR_BAR(i);
13101
13102 flags = RF_ACTIVE;
13103 if (i == 0) {
13104 flags |= RF_SHAREABLE;
13105 }
13106
13107 if ((sc->bar[i].resource =
13108 bus_alloc_resource_any(sc->dev,
13109 SYS_RES_MEMORY,
13110 &sc->bar[i].rid,
13111 flags)) == NULL) {
13112 return (0);
13113 }
13114
13115 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource);
13116 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13117 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13118
13119 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
13120 i, PCIR_BAR(i),
13121 rman_get_start(sc->bar[i].resource),
13122 rman_get_end(sc->bar[i].resource),
13123 rman_get_size(sc->bar[i].resource),
13124 (uintmax_t)sc->bar[i].kva);
13125 }
13126
13127 return (0);
13128 }
13129
13130 static void
bxe_get_function_num(struct bxe_softc * sc)13131 bxe_get_function_num(struct bxe_softc *sc)
13132 {
13133 uint32_t val = 0;
13134
13135 /*
13136 * Read the ME register to get the function number. The ME register
13137 * holds the relative-function number and absolute-function number. The
13138 * absolute-function number appears only in E2 and above. Before that
13139 * these bits always contained zero, therefore we cannot blindly use them.
13140 */
13141
13142 val = REG_RD(sc, BAR_ME_REGISTER);
13143
13144 sc->pfunc_rel =
13145 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13146 sc->path_id =
13147 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13148
13149 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13150 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13151 } else {
13152 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13153 }
13154
13155 BLOGD(sc, DBG_LOAD,
13156 "Relative function %d, Absolute function %d, Path %d\n",
13157 sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13158 }
13159
13160 static uint32_t
bxe_get_shmem_mf_cfg_base(struct bxe_softc * sc)13161 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13162 {
13163 uint32_t shmem2_size;
13164 uint32_t offset;
13165 uint32_t mf_cfg_offset_value;
13166
13167 /* Non 57712 */
13168 offset = (SHMEM_RD(sc, func_mb) +
13169 (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13170
13171 /* 57712 plus */
13172 if (sc->devinfo.shmem2_base != 0) {
13173 shmem2_size = SHMEM2_RD(sc, size);
13174 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13175 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13176 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13177 offset = mf_cfg_offset_value;
13178 }
13179 }
13180 }
13181
13182 return (offset);
13183 }
13184
13185 static uint32_t
bxe_pcie_capability_read(struct bxe_softc * sc,int reg,int width)13186 bxe_pcie_capability_read(struct bxe_softc *sc,
13187 int reg,
13188 int width)
13189 {
13190 int pcie_reg;
13191
13192 /* ensure PCIe capability is enabled */
13193 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13194 if (pcie_reg != 0) {
13195 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13196 return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13197 }
13198 }
13199
13200 BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13201
13202 return (0);
13203 }
13204
13205 static uint8_t
bxe_is_pcie_pending(struct bxe_softc * sc)13206 bxe_is_pcie_pending(struct bxe_softc *sc)
13207 {
13208 return (bxe_pcie_capability_read(sc, PCIER_DEVICE_STA, 2) &
13209 PCIEM_STA_TRANSACTION_PND);
13210 }
13211
13212 /*
13213 * Walk the PCI capabiites list for the device to find what features are
13214 * supported. These capabilites may be enabled/disabled by firmware so it's
13215 * best to walk the list rather than make assumptions.
13216 */
13217 static void
bxe_probe_pci_caps(struct bxe_softc * sc)13218 bxe_probe_pci_caps(struct bxe_softc *sc)
13219 {
13220 uint16_t link_status;
13221 int reg;
13222
13223 /* check if PCI Power Management is enabled */
13224 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) {
13225 if (reg != 0) {
13226 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13227
13228 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13229 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13230 }
13231 }
13232
13233 link_status = bxe_pcie_capability_read(sc, PCIER_LINK_STA, 2);
13234
13235 /* handle PCIe 2.0 workarounds for 57710 */
13236 if (CHIP_IS_E1(sc)) {
13237 /* workaround for 57710 errata E4_57710_27462 */
13238 sc->devinfo.pcie_link_speed =
13239 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13240
13241 /* workaround for 57710 errata E4_57710_27488 */
13242 sc->devinfo.pcie_link_width =
13243 ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13244 if (sc->devinfo.pcie_link_speed > 1) {
13245 sc->devinfo.pcie_link_width =
13246 ((link_status & PCIEM_LINK_STA_WIDTH) >> 4) >> 1;
13247 }
13248 } else {
13249 sc->devinfo.pcie_link_speed =
13250 (link_status & PCIEM_LINK_STA_SPEED);
13251 sc->devinfo.pcie_link_width =
13252 ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13253 }
13254
13255 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13256 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13257
13258 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13259 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13260
13261 /* check if MSI capability is enabled */
13262 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) {
13263 if (reg != 0) {
13264 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13265
13266 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13267 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13268 }
13269 }
13270
13271 /* check if MSI-X capability is enabled */
13272 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) {
13273 if (reg != 0) {
13274 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13275
13276 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13277 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13278 }
13279 }
13280 }
13281
13282 static int
bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc * sc)13283 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13284 {
13285 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13286 uint32_t val;
13287
13288 /* get the outer vlan if we're in switch-dependent mode */
13289
13290 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13291 mf_info->ext_id = (uint16_t)val;
13292
13293 mf_info->multi_vnics_mode = 1;
13294
13295 if (!VALID_OVLAN(mf_info->ext_id)) {
13296 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13297 return (1);
13298 }
13299
13300 /* get the capabilities */
13301 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13302 FUNC_MF_CFG_PROTOCOL_ISCSI) {
13303 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13304 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13305 FUNC_MF_CFG_PROTOCOL_FCOE) {
13306 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13307 } else {
13308 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13309 }
13310
13311 mf_info->vnics_per_port =
13312 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13313
13314 return (0);
13315 }
13316
13317 static uint32_t
bxe_get_shmem_ext_proto_support_flags(struct bxe_softc * sc)13318 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13319 {
13320 uint32_t retval = 0;
13321 uint32_t val;
13322
13323 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13324
13325 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13326 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13327 retval |= MF_PROTO_SUPPORT_ETHERNET;
13328 }
13329 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13330 retval |= MF_PROTO_SUPPORT_ISCSI;
13331 }
13332 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13333 retval |= MF_PROTO_SUPPORT_FCOE;
13334 }
13335 }
13336
13337 return (retval);
13338 }
13339
13340 static int
bxe_get_shmem_mf_cfg_info_si(struct bxe_softc * sc)13341 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13342 {
13343 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13344 uint32_t val;
13345
13346 /*
13347 * There is no outer vlan if we're in switch-independent mode.
13348 * If the mac is valid then assume multi-function.
13349 */
13350
13351 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13352
13353 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13354
13355 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13356
13357 mf_info->vnics_per_port =
13358 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13359
13360 return (0);
13361 }
13362
13363 static int
bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc * sc)13364 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13365 {
13366 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13367 uint32_t e1hov_tag;
13368 uint32_t func_config;
13369 uint32_t niv_config;
13370
13371 mf_info->multi_vnics_mode = 1;
13372
13373 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13374 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13375 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13376
13377 mf_info->ext_id =
13378 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13379 FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13380
13381 mf_info->default_vlan =
13382 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13383 FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13384
13385 mf_info->niv_allowed_priorities =
13386 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13387 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13388
13389 mf_info->niv_default_cos =
13390 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13391 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13392
13393 mf_info->afex_vlan_mode =
13394 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13395 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13396
13397 mf_info->niv_mba_enabled =
13398 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13399 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13400
13401 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13402
13403 mf_info->vnics_per_port =
13404 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13405
13406 return (0);
13407 }
13408
13409 static int
bxe_check_valid_mf_cfg(struct bxe_softc * sc)13410 bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13411 {
13412 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13413 uint32_t mf_cfg1;
13414 uint32_t mf_cfg2;
13415 uint32_t ovlan1;
13416 uint32_t ovlan2;
13417 uint8_t i, j;
13418
13419 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13420 SC_PORT(sc));
13421 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13422 mf_info->mf_config[SC_VN(sc)]);
13423 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13424 mf_info->multi_vnics_mode);
13425 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13426 mf_info->vnics_per_port);
13427 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13428 mf_info->ext_id);
13429 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13430 mf_info->min_bw[0], mf_info->min_bw[1],
13431 mf_info->min_bw[2], mf_info->min_bw[3]);
13432 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13433 mf_info->max_bw[0], mf_info->max_bw[1],
13434 mf_info->max_bw[2], mf_info->max_bw[3]);
13435 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13436 sc->mac_addr_str);
13437
13438 /* various MF mode sanity checks... */
13439
13440 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13441 BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13442 SC_PORT(sc));
13443 return (1);
13444 }
13445
13446 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13447 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13448 mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13449 return (1);
13450 }
13451
13452 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13453 /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13454 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13455 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13456 SC_VN(sc), OVLAN(sc));
13457 return (1);
13458 }
13459
13460 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13461 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13462 mf_info->multi_vnics_mode, OVLAN(sc));
13463 return (1);
13464 }
13465
13466 /*
13467 * Verify all functions are either MF or SF mode. If MF, make sure
13468 * sure that all non-hidden functions have a valid ovlan. If SF,
13469 * make sure that all non-hidden functions have an invalid ovlan.
13470 */
13471 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13472 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13473 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13474 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13475 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13476 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13477 BLOGE(sc, "mf_mode=SD function %d MF config "
13478 "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13479 i, mf_info->multi_vnics_mode, ovlan1);
13480 return (1);
13481 }
13482 }
13483
13484 /* Verify all funcs on the same port each have a different ovlan. */
13485 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13486 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13487 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13488 /* iterate from the next function on the port to the max func */
13489 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13490 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13491 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13492 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13493 VALID_OVLAN(ovlan1) &&
13494 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13495 VALID_OVLAN(ovlan2) &&
13496 (ovlan1 == ovlan2)) {
13497 BLOGE(sc, "mf_mode=SD functions %d and %d "
13498 "have the same ovlan (%d)\n",
13499 i, j, ovlan1);
13500 return (1);
13501 }
13502 }
13503 }
13504 } /* MULTI_FUNCTION_SD */
13505
13506 return (0);
13507 }
13508
13509 static int
bxe_get_mf_cfg_info(struct bxe_softc * sc)13510 bxe_get_mf_cfg_info(struct bxe_softc *sc)
13511 {
13512 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13513 uint32_t val, mac_upper;
13514 uint8_t i, vnic;
13515
13516 /* initialize mf_info defaults */
13517 mf_info->vnics_per_port = 1;
13518 mf_info->multi_vnics_mode = FALSE;
13519 mf_info->path_has_ovlan = FALSE;
13520 mf_info->mf_mode = SINGLE_FUNCTION;
13521
13522 if (!CHIP_IS_MF_CAP(sc)) {
13523 return (0);
13524 }
13525
13526 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13527 BLOGE(sc, "Invalid mf_cfg_base!\n");
13528 return (1);
13529 }
13530
13531 /* get the MF mode (switch dependent / independent / single-function) */
13532
13533 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13534
13535 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13536 {
13537 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13538
13539 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13540
13541 /* check for legal upper mac bytes */
13542 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13543 mf_info->mf_mode = MULTI_FUNCTION_SI;
13544 } else {
13545 BLOGE(sc, "Invalid config for Switch Independent mode\n");
13546 }
13547
13548 break;
13549
13550 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13551 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13552
13553 /* get outer vlan configuration */
13554 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13555
13556 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13557 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13558 mf_info->mf_mode = MULTI_FUNCTION_SD;
13559 } else {
13560 BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13561 }
13562
13563 break;
13564
13565 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13566
13567 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13568 return (0);
13569
13570 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13571
13572 /*
13573 * Mark MF mode as NIV if MCP version includes NPAR-SD support
13574 * and the MAC address is valid.
13575 */
13576 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13577
13578 if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13579 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13580 mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13581 } else {
13582 BLOGE(sc, "Invalid config for AFEX mode\n");
13583 }
13584
13585 break;
13586
13587 default:
13588
13589 BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13590 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13591
13592 return (1);
13593 }
13594
13595 /* set path mf_mode (which could be different than function mf_mode) */
13596 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13597 mf_info->path_has_ovlan = TRUE;
13598 } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13599 /*
13600 * Decide on path multi vnics mode. If we're not in MF mode and in
13601 * 4-port mode, this is good enough to check vnic-0 of the other port
13602 * on the same path
13603 */
13604 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13605 uint8_t other_port = !(PORT_ID(sc) & 1);
13606 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13607
13608 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13609
13610 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13611 }
13612 }
13613
13614 if (mf_info->mf_mode == SINGLE_FUNCTION) {
13615 /* invalid MF config */
13616 if (SC_VN(sc) >= 1) {
13617 BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13618 return (1);
13619 }
13620
13621 return (0);
13622 }
13623
13624 /* get the MF configuration */
13625 mf_info->mf_config[SC_VN(sc)] =
13626 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13627
13628 switch(mf_info->mf_mode)
13629 {
13630 case MULTI_FUNCTION_SD:
13631
13632 bxe_get_shmem_mf_cfg_info_sd(sc);
13633 break;
13634
13635 case MULTI_FUNCTION_SI:
13636
13637 bxe_get_shmem_mf_cfg_info_si(sc);
13638 break;
13639
13640 case MULTI_FUNCTION_AFEX:
13641
13642 bxe_get_shmem_mf_cfg_info_niv(sc);
13643 break;
13644
13645 default:
13646
13647 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13648 mf_info->mf_mode);
13649 return (1);
13650 }
13651
13652 /* get the congestion management parameters */
13653
13654 vnic = 0;
13655 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13656 /* get min/max bw */
13657 val = MFCFG_RD(sc, func_mf_config[i].config);
13658 mf_info->min_bw[vnic] =
13659 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13660 mf_info->max_bw[vnic] =
13661 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13662 vnic++;
13663 }
13664
13665 return (bxe_check_valid_mf_cfg(sc));
13666 }
13667
13668 static int
bxe_get_shmem_info(struct bxe_softc * sc)13669 bxe_get_shmem_info(struct bxe_softc *sc)
13670 {
13671 int port;
13672 uint32_t mac_hi, mac_lo, val;
13673
13674 port = SC_PORT(sc);
13675 mac_hi = mac_lo = 0;
13676
13677 sc->link_params.sc = sc;
13678 sc->link_params.port = port;
13679
13680 /* get the hardware config info */
13681 sc->devinfo.hw_config =
13682 SHMEM_RD(sc, dev_info.shared_hw_config.config);
13683 sc->devinfo.hw_config2 =
13684 SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13685
13686 sc->link_params.hw_led_mode =
13687 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13688 SHARED_HW_CFG_LED_MODE_SHIFT);
13689
13690 /* get the port feature config */
13691 sc->port.config =
13692 SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13693
13694 /* get the link params */
13695 sc->link_params.speed_cap_mask[0] =
13696 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13697 sc->link_params.speed_cap_mask[1] =
13698 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13699
13700 /* get the lane config */
13701 sc->link_params.lane_config =
13702 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13703
13704 /* get the link config */
13705 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13706 sc->port.link_config[ELINK_INT_PHY] = val;
13707 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13708 sc->port.link_config[ELINK_EXT_PHY1] =
13709 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13710
13711 /* get the override preemphasis flag and enable it or turn it off */
13712 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13713 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13714 sc->link_params.feature_config_flags |=
13715 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13716 } else {
13717 sc->link_params.feature_config_flags &=
13718 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13719 }
13720
13721 /* get the initial value of the link params */
13722 sc->link_params.multi_phy_config =
13723 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13724
13725 /* get external phy info */
13726 sc->port.ext_phy_config =
13727 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13728
13729 /* get the multifunction configuration */
13730 bxe_get_mf_cfg_info(sc);
13731
13732 /* get the mac address */
13733 if (IS_MF(sc)) {
13734 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13735 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13736 } else {
13737 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13738 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13739 }
13740
13741 if ((mac_lo == 0) && (mac_hi == 0)) {
13742 *sc->mac_addr_str = 0;
13743 BLOGE(sc, "No Ethernet address programmed!\n");
13744 } else {
13745 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13746 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13747 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13748 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13749 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13750 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13751 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13752 "%02x:%02x:%02x:%02x:%02x:%02x",
13753 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13754 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13755 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13756 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13757 }
13758
13759 return (0);
13760 }
13761
13762 static void
bxe_get_tunable_params(struct bxe_softc * sc)13763 bxe_get_tunable_params(struct bxe_softc *sc)
13764 {
13765 /* sanity checks */
13766
13767 if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13768 (bxe_interrupt_mode != INTR_MODE_MSI) &&
13769 (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13770 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13771 bxe_interrupt_mode = INTR_MODE_MSIX;
13772 }
13773
13774 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13775 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13776 bxe_queue_count = 0;
13777 }
13778
13779 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13780 if (bxe_max_rx_bufs == 0) {
13781 bxe_max_rx_bufs = RX_BD_USABLE;
13782 } else {
13783 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13784 bxe_max_rx_bufs = 2048;
13785 }
13786 }
13787
13788 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13789 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13790 bxe_hc_rx_ticks = 25;
13791 }
13792
13793 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13794 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13795 bxe_hc_tx_ticks = 50;
13796 }
13797
13798 if (bxe_max_aggregation_size == 0) {
13799 bxe_max_aggregation_size = TPA_AGG_SIZE;
13800 }
13801
13802 if (bxe_max_aggregation_size > 0xffff) {
13803 BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13804 bxe_max_aggregation_size);
13805 bxe_max_aggregation_size = TPA_AGG_SIZE;
13806 }
13807
13808 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13809 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13810 bxe_mrrs = -1;
13811 }
13812
13813 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13814 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13815 bxe_autogreeen = 0;
13816 }
13817
13818 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13819 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13820 bxe_udp_rss = 0;
13821 }
13822
13823 /* pull in user settings */
13824
13825 sc->interrupt_mode = bxe_interrupt_mode;
13826 sc->max_rx_bufs = bxe_max_rx_bufs;
13827 sc->hc_rx_ticks = bxe_hc_rx_ticks;
13828 sc->hc_tx_ticks = bxe_hc_tx_ticks;
13829 sc->max_aggregation_size = bxe_max_aggregation_size;
13830 sc->mrrs = bxe_mrrs;
13831 sc->autogreeen = bxe_autogreeen;
13832 sc->udp_rss = bxe_udp_rss;
13833
13834 if (bxe_interrupt_mode == INTR_MODE_INTX) {
13835 sc->num_queues = 1;
13836 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13837 sc->num_queues =
13838 min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13839 MAX_RSS_CHAINS);
13840 if (sc->num_queues > mp_ncpus) {
13841 sc->num_queues = mp_ncpus;
13842 }
13843 }
13844
13845 BLOGD(sc, DBG_LOAD,
13846 "User Config: "
13847 "debug=0x%lx "
13848 "interrupt_mode=%d "
13849 "queue_count=%d "
13850 "hc_rx_ticks=%d "
13851 "hc_tx_ticks=%d "
13852 "rx_budget=%d "
13853 "max_aggregation_size=%d "
13854 "mrrs=%d "
13855 "autogreeen=%d "
13856 "udp_rss=%d\n",
13857 bxe_debug,
13858 sc->interrupt_mode,
13859 sc->num_queues,
13860 sc->hc_rx_ticks,
13861 sc->hc_tx_ticks,
13862 bxe_rx_budget,
13863 sc->max_aggregation_size,
13864 sc->mrrs,
13865 sc->autogreeen,
13866 sc->udp_rss);
13867 }
13868
13869 static int
bxe_media_detect(struct bxe_softc * sc)13870 bxe_media_detect(struct bxe_softc *sc)
13871 {
13872 int port_type;
13873 uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13874
13875 switch (sc->link_params.phy[phy_idx].media_type) {
13876 case ELINK_ETH_PHY_SFPP_10G_FIBER:
13877 case ELINK_ETH_PHY_XFP_FIBER:
13878 BLOGI(sc, "Found 10Gb Fiber media.\n");
13879 sc->media = IFM_10G_SR;
13880 port_type = PORT_FIBRE;
13881 break;
13882 case ELINK_ETH_PHY_SFP_1G_FIBER:
13883 BLOGI(sc, "Found 1Gb Fiber media.\n");
13884 sc->media = IFM_1000_SX;
13885 port_type = PORT_FIBRE;
13886 break;
13887 case ELINK_ETH_PHY_KR:
13888 case ELINK_ETH_PHY_CX4:
13889 BLOGI(sc, "Found 10GBase-CX4 media.\n");
13890 sc->media = IFM_10G_CX4;
13891 port_type = PORT_FIBRE;
13892 break;
13893 case ELINK_ETH_PHY_DA_TWINAX:
13894 BLOGI(sc, "Found 10Gb Twinax media.\n");
13895 sc->media = IFM_10G_TWINAX;
13896 port_type = PORT_DA;
13897 break;
13898 case ELINK_ETH_PHY_BASE_T:
13899 if (sc->link_params.speed_cap_mask[0] &
13900 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13901 BLOGI(sc, "Found 10GBase-T media.\n");
13902 sc->media = IFM_10G_T;
13903 port_type = PORT_TP;
13904 } else {
13905 BLOGI(sc, "Found 1000Base-T media.\n");
13906 sc->media = IFM_1000_T;
13907 port_type = PORT_TP;
13908 }
13909 break;
13910 case ELINK_ETH_PHY_NOT_PRESENT:
13911 BLOGI(sc, "Media not present.\n");
13912 sc->media = 0;
13913 port_type = PORT_OTHER;
13914 break;
13915 case ELINK_ETH_PHY_UNSPECIFIED:
13916 default:
13917 BLOGI(sc, "Unknown media!\n");
13918 sc->media = 0;
13919 port_type = PORT_OTHER;
13920 break;
13921 }
13922 return port_type;
13923 }
13924
13925 #define GET_FIELD(value, fname) \
13926 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13927 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13928 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13929
13930 static int
bxe_get_igu_cam_info(struct bxe_softc * sc)13931 bxe_get_igu_cam_info(struct bxe_softc *sc)
13932 {
13933 int pfid = SC_FUNC(sc);
13934 int igu_sb_id;
13935 uint32_t val;
13936 uint8_t fid, igu_sb_cnt = 0;
13937
13938 sc->igu_base_sb = 0xff;
13939
13940 if (CHIP_INT_MODE_IS_BC(sc)) {
13941 int vn = SC_VN(sc);
13942 igu_sb_cnt = sc->igu_sb_cnt;
13943 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13944 FP_SB_MAX_E1x);
13945 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13946 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13947 return (0);
13948 }
13949
13950 /* IGU in normal mode - read CAM */
13951 for (igu_sb_id = 0;
13952 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13953 igu_sb_id++) {
13954 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13955 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13956 continue;
13957 }
13958 fid = IGU_FID(val);
13959 if ((fid & IGU_FID_ENCODE_IS_PF)) {
13960 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13961 continue;
13962 }
13963 if (IGU_VEC(val) == 0) {
13964 /* default status block */
13965 sc->igu_dsb_id = igu_sb_id;
13966 } else {
13967 if (sc->igu_base_sb == 0xff) {
13968 sc->igu_base_sb = igu_sb_id;
13969 }
13970 igu_sb_cnt++;
13971 }
13972 }
13973 }
13974
13975 /*
13976 * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13977 * that number of CAM entries will not be equal to the value advertised in
13978 * PCI. Driver should use the minimal value of both as the actual status
13979 * block count
13980 */
13981 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13982
13983 if (igu_sb_cnt == 0) {
13984 BLOGE(sc, "CAM configuration error\n");
13985 return (-1);
13986 }
13987
13988 return (0);
13989 }
13990
13991 /*
13992 * Gather various information from the device config space, the device itself,
13993 * shmem, and the user input.
13994 */
13995 static int
bxe_get_device_info(struct bxe_softc * sc)13996 bxe_get_device_info(struct bxe_softc *sc)
13997 {
13998 uint32_t val;
13999 int rc;
14000
14001 /* Get the data for the device */
14002 sc->devinfo.vendor_id = pci_get_vendor(sc->dev);
14003 sc->devinfo.device_id = pci_get_device(sc->dev);
14004 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
14005 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
14006
14007 /* get the chip revision (chip metal comes from pci config space) */
14008 sc->devinfo.chip_id =
14009 sc->link_params.chip_id =
14010 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
14011 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) |
14012 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) |
14013 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0));
14014
14015 /* force 57811 according to MISC register */
14016 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
14017 if (CHIP_IS_57810(sc)) {
14018 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
14019 (sc->devinfo.chip_id & 0x0000ffff));
14020 } else if (CHIP_IS_57810_MF(sc)) {
14021 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
14022 (sc->devinfo.chip_id & 0x0000ffff));
14023 }
14024 sc->devinfo.chip_id |= 0x1;
14025 }
14026
14027 BLOGD(sc, DBG_LOAD,
14028 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
14029 sc->devinfo.chip_id,
14030 ((sc->devinfo.chip_id >> 16) & 0xffff),
14031 ((sc->devinfo.chip_id >> 12) & 0xf),
14032 ((sc->devinfo.chip_id >> 4) & 0xff),
14033 ((sc->devinfo.chip_id >> 0) & 0xf));
14034
14035 val = (REG_RD(sc, 0x2874) & 0x55);
14036 if ((sc->devinfo.chip_id & 0x1) ||
14037 (CHIP_IS_E1(sc) && val) ||
14038 (CHIP_IS_E1H(sc) && (val == 0x55))) {
14039 sc->flags |= BXE_ONE_PORT_FLAG;
14040 BLOGD(sc, DBG_LOAD, "single port device\n");
14041 }
14042
14043 /* set the doorbell size */
14044 sc->doorbell_size = (1 << BXE_DB_SHIFT);
14045
14046 /* determine whether the device is in 2 port or 4 port mode */
14047 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14048 if (CHIP_IS_E2E3(sc)) {
14049 /*
14050 * Read port4mode_en_ovwr[0]:
14051 * If 1, four port mode is in port4mode_en_ovwr[1].
14052 * If 0, four port mode is in port4mode_en[0].
14053 */
14054 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14055 if (val & 1) {
14056 val = ((val >> 1) & 1);
14057 } else {
14058 val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14059 }
14060
14061 sc->devinfo.chip_port_mode =
14062 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14063
14064 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14065 }
14066
14067 /* get the function and path info for the device */
14068 bxe_get_function_num(sc);
14069
14070 /* get the shared memory base address */
14071 sc->devinfo.shmem_base =
14072 sc->link_params.shmem_base =
14073 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14074 sc->devinfo.shmem2_base =
14075 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14076 MISC_REG_GENERIC_CR_0));
14077
14078 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14079 sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14080
14081 if (!sc->devinfo.shmem_base) {
14082 /* this should ONLY prevent upcoming shmem reads */
14083 BLOGI(sc, "MCP not active\n");
14084 sc->flags |= BXE_NO_MCP_FLAG;
14085 return (0);
14086 }
14087
14088 /* make sure the shared memory contents are valid */
14089 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14090 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14091 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14092 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14093 return (0);
14094 }
14095 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14096
14097 /* get the bootcode version */
14098 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14099 snprintf(sc->devinfo.bc_ver_str,
14100 sizeof(sc->devinfo.bc_ver_str),
14101 "%d.%d.%d",
14102 ((sc->devinfo.bc_ver >> 24) & 0xff),
14103 ((sc->devinfo.bc_ver >> 16) & 0xff),
14104 ((sc->devinfo.bc_ver >> 8) & 0xff));
14105 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14106
14107 /* get the bootcode shmem address */
14108 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14109 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14110
14111 /* clean indirect addresses as they're not used */
14112 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14113 if (IS_PF(sc)) {
14114 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14115 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14116 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14117 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14118 if (CHIP_IS_E1x(sc)) {
14119 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14120 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14121 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14122 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14123 }
14124
14125 /*
14126 * Enable internal target-read (in case we are probed after PF
14127 * FLR). Must be done prior to any BAR read access. Only for
14128 * 57712 and up
14129 */
14130 if (!CHIP_IS_E1x(sc)) {
14131 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14132 }
14133 }
14134
14135 /* get the nvram size */
14136 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14137 sc->devinfo.flash_size =
14138 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14139 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14140
14141 /* get PCI capabilites */
14142 bxe_probe_pci_caps(sc);
14143
14144 bxe_set_power_state(sc, PCI_PM_D0);
14145
14146 /* get various configuration parameters from shmem */
14147 bxe_get_shmem_info(sc);
14148
14149 if (sc->devinfo.pcie_msix_cap_reg != 0) {
14150 val = pci_read_config(sc->dev,
14151 (sc->devinfo.pcie_msix_cap_reg +
14152 PCIR_MSIX_CTRL),
14153 2);
14154 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14155 } else {
14156 sc->igu_sb_cnt = 1;
14157 }
14158
14159 sc->igu_base_addr = BAR_IGU_INTMEM;
14160
14161 /* initialize IGU parameters */
14162 if (CHIP_IS_E1x(sc)) {
14163 sc->devinfo.int_block = INT_BLOCK_HC;
14164 sc->igu_dsb_id = DEF_SB_IGU_ID;
14165 sc->igu_base_sb = 0;
14166 } else {
14167 sc->devinfo.int_block = INT_BLOCK_IGU;
14168
14169 /* do not allow device reset during IGU info preocessing */
14170 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14171
14172 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14173
14174 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14175 int tout = 5000;
14176
14177 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14178
14179 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14180 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14181 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14182
14183 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14184 tout--;
14185 DELAY(1000);
14186 }
14187
14188 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14189 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14190 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14191 return (-1);
14192 }
14193 }
14194
14195 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14196 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14197 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14198 } else {
14199 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14200 }
14201
14202 rc = bxe_get_igu_cam_info(sc);
14203
14204 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14205
14206 if (rc) {
14207 return (rc);
14208 }
14209 }
14210
14211 /*
14212 * Get base FW non-default (fast path) status block ID. This value is
14213 * used to initialize the fw_sb_id saved on the fp/queue structure to
14214 * determine the id used by the FW.
14215 */
14216 if (CHIP_IS_E1x(sc)) {
14217 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14218 } else {
14219 /*
14220 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14221 * the same queue are indicated on the same IGU SB). So we prefer
14222 * FW and IGU SBs to be the same value.
14223 */
14224 sc->base_fw_ndsb = sc->igu_base_sb;
14225 }
14226
14227 BLOGD(sc, DBG_LOAD,
14228 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14229 sc->igu_dsb_id, sc->igu_base_sb,
14230 sc->igu_sb_cnt, sc->base_fw_ndsb);
14231
14232 elink_phy_probe(&sc->link_params);
14233
14234 return (0);
14235 }
14236
14237 static void
bxe_link_settings_supported(struct bxe_softc * sc,uint32_t switch_cfg)14238 bxe_link_settings_supported(struct bxe_softc *sc,
14239 uint32_t switch_cfg)
14240 {
14241 uint32_t cfg_size = 0;
14242 uint32_t idx;
14243 uint8_t port = SC_PORT(sc);
14244
14245 /* aggregation of supported attributes of all external phys */
14246 sc->port.supported[0] = 0;
14247 sc->port.supported[1] = 0;
14248
14249 switch (sc->link_params.num_phys) {
14250 case 1:
14251 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14252 cfg_size = 1;
14253 break;
14254 case 2:
14255 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14256 cfg_size = 1;
14257 break;
14258 case 3:
14259 if (sc->link_params.multi_phy_config &
14260 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14261 sc->port.supported[1] =
14262 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14263 sc->port.supported[0] =
14264 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14265 } else {
14266 sc->port.supported[0] =
14267 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14268 sc->port.supported[1] =
14269 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14270 }
14271 cfg_size = 2;
14272 break;
14273 }
14274
14275 if (!(sc->port.supported[0] || sc->port.supported[1])) {
14276 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14277 SHMEM_RD(sc,
14278 dev_info.port_hw_config[port].external_phy_config),
14279 SHMEM_RD(sc,
14280 dev_info.port_hw_config[port].external_phy_config2));
14281 return;
14282 }
14283
14284 if (CHIP_IS_E3(sc))
14285 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14286 else {
14287 switch (switch_cfg) {
14288 case ELINK_SWITCH_CFG_1G:
14289 sc->port.phy_addr =
14290 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14291 break;
14292 case ELINK_SWITCH_CFG_10G:
14293 sc->port.phy_addr =
14294 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14295 break;
14296 default:
14297 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14298 sc->port.link_config[0]);
14299 return;
14300 }
14301 }
14302
14303 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14304
14305 /* mask what we support according to speed_cap_mask per configuration */
14306 for (idx = 0; idx < cfg_size; idx++) {
14307 if (!(sc->link_params.speed_cap_mask[idx] &
14308 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14309 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14310 }
14311
14312 if (!(sc->link_params.speed_cap_mask[idx] &
14313 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14314 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14315 }
14316
14317 if (!(sc->link_params.speed_cap_mask[idx] &
14318 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14319 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14320 }
14321
14322 if (!(sc->link_params.speed_cap_mask[idx] &
14323 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14324 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14325 }
14326
14327 if (!(sc->link_params.speed_cap_mask[idx] &
14328 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14329 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14330 }
14331
14332 if (!(sc->link_params.speed_cap_mask[idx] &
14333 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14334 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14335 }
14336
14337 if (!(sc->link_params.speed_cap_mask[idx] &
14338 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14339 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14340 }
14341
14342 if (!(sc->link_params.speed_cap_mask[idx] &
14343 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14344 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14345 }
14346 }
14347
14348 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14349 sc->port.supported[0], sc->port.supported[1]);
14350 ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14351 sc->port.supported[0], sc->port.supported[1]);
14352 }
14353
14354 static void
bxe_link_settings_requested(struct bxe_softc * sc)14355 bxe_link_settings_requested(struct bxe_softc *sc)
14356 {
14357 uint32_t link_config;
14358 uint32_t idx;
14359 uint32_t cfg_size = 0;
14360
14361 sc->port.advertising[0] = 0;
14362 sc->port.advertising[1] = 0;
14363
14364 switch (sc->link_params.num_phys) {
14365 case 1:
14366 case 2:
14367 cfg_size = 1;
14368 break;
14369 case 3:
14370 cfg_size = 2;
14371 break;
14372 }
14373
14374 for (idx = 0; idx < cfg_size; idx++) {
14375 sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14376 link_config = sc->port.link_config[idx];
14377
14378 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14379 case PORT_FEATURE_LINK_SPEED_AUTO:
14380 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14381 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14382 sc->port.advertising[idx] |= sc->port.supported[idx];
14383 if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14384 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14385 sc->port.advertising[idx] |=
14386 (ELINK_SUPPORTED_100baseT_Half |
14387 ELINK_SUPPORTED_100baseT_Full);
14388 } else {
14389 /* force 10G, no AN */
14390 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14391 sc->port.advertising[idx] |=
14392 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14393 continue;
14394 }
14395 break;
14396
14397 case PORT_FEATURE_LINK_SPEED_10M_FULL:
14398 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14399 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14400 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14401 ADVERTISED_TP);
14402 } else {
14403 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14404 "speed_cap_mask=0x%08x\n",
14405 link_config, sc->link_params.speed_cap_mask[idx]);
14406 return;
14407 }
14408 break;
14409
14410 case PORT_FEATURE_LINK_SPEED_10M_HALF:
14411 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14412 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14413 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14414 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14415 ADVERTISED_TP);
14416 ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14417 sc->link_params.req_duplex[idx]);
14418 } else {
14419 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14420 "speed_cap_mask=0x%08x\n",
14421 link_config, sc->link_params.speed_cap_mask[idx]);
14422 return;
14423 }
14424 break;
14425
14426 case PORT_FEATURE_LINK_SPEED_100M_FULL:
14427 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14428 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14429 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14430 ADVERTISED_TP);
14431 } else {
14432 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14433 "speed_cap_mask=0x%08x\n",
14434 link_config, sc->link_params.speed_cap_mask[idx]);
14435 return;
14436 }
14437 break;
14438
14439 case PORT_FEATURE_LINK_SPEED_100M_HALF:
14440 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14441 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14442 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14443 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14444 ADVERTISED_TP);
14445 } else {
14446 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14447 "speed_cap_mask=0x%08x\n",
14448 link_config, sc->link_params.speed_cap_mask[idx]);
14449 return;
14450 }
14451 break;
14452
14453 case PORT_FEATURE_LINK_SPEED_1G:
14454 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14455 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14456 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14457 ADVERTISED_TP);
14458 } else {
14459 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14460 "speed_cap_mask=0x%08x\n",
14461 link_config, sc->link_params.speed_cap_mask[idx]);
14462 return;
14463 }
14464 break;
14465
14466 case PORT_FEATURE_LINK_SPEED_2_5G:
14467 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14468 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14469 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14470 ADVERTISED_TP);
14471 } else {
14472 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14473 "speed_cap_mask=0x%08x\n",
14474 link_config, sc->link_params.speed_cap_mask[idx]);
14475 return;
14476 }
14477 break;
14478
14479 case PORT_FEATURE_LINK_SPEED_10G_CX4:
14480 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14481 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14482 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14483 ADVERTISED_FIBRE);
14484 } else {
14485 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14486 "speed_cap_mask=0x%08x\n",
14487 link_config, sc->link_params.speed_cap_mask[idx]);
14488 return;
14489 }
14490 break;
14491
14492 case PORT_FEATURE_LINK_SPEED_20G:
14493 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14494 break;
14495
14496 default:
14497 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14498 "speed_cap_mask=0x%08x\n",
14499 link_config, sc->link_params.speed_cap_mask[idx]);
14500 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14501 sc->port.advertising[idx] = sc->port.supported[idx];
14502 break;
14503 }
14504
14505 sc->link_params.req_flow_ctrl[idx] =
14506 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14507
14508 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14509 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14510 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14511 } else {
14512 bxe_set_requested_fc(sc);
14513 }
14514 }
14515
14516 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14517 "req_flow_ctrl=0x%x advertising=0x%x\n",
14518 sc->link_params.req_line_speed[idx],
14519 sc->link_params.req_duplex[idx],
14520 sc->link_params.req_flow_ctrl[idx],
14521 sc->port.advertising[idx]);
14522 ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14523 "advertising=0x%x\n",
14524 sc->link_params.req_line_speed[idx],
14525 sc->link_params.req_duplex[idx],
14526 sc->port.advertising[idx]);
14527 }
14528 }
14529
14530 static void
bxe_get_phy_info(struct bxe_softc * sc)14531 bxe_get_phy_info(struct bxe_softc *sc)
14532 {
14533 uint8_t port = SC_PORT(sc);
14534 uint32_t config = sc->port.config;
14535 uint32_t eee_mode;
14536
14537 /* shmem data already read in bxe_get_shmem_info() */
14538
14539 ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14540 "link_config0=0x%08x\n",
14541 sc->link_params.lane_config,
14542 sc->link_params.speed_cap_mask[0],
14543 sc->port.link_config[0]);
14544
14545
14546 bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14547 bxe_link_settings_requested(sc);
14548
14549 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14550 sc->link_params.feature_config_flags |=
14551 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14552 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14553 sc->link_params.feature_config_flags &=
14554 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14555 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14556 sc->link_params.feature_config_flags |=
14557 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14558 }
14559
14560 /* configure link feature according to nvram value */
14561 eee_mode =
14562 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14563 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14564 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14565 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14566 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14567 ELINK_EEE_MODE_ENABLE_LPI |
14568 ELINK_EEE_MODE_OUTPUT_TIME);
14569 } else {
14570 sc->link_params.eee_mode = 0;
14571 }
14572
14573 /* get the media type */
14574 bxe_media_detect(sc);
14575 ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14576 }
14577
14578 static void
bxe_get_params(struct bxe_softc * sc)14579 bxe_get_params(struct bxe_softc *sc)
14580 {
14581 /* get user tunable params */
14582 bxe_get_tunable_params(sc);
14583
14584 /* select the RX and TX ring sizes */
14585 sc->tx_ring_size = TX_BD_USABLE;
14586 sc->rx_ring_size = RX_BD_USABLE;
14587
14588 /* XXX disable WoL */
14589 sc->wol = 0;
14590 }
14591
14592 static void
bxe_set_modes_bitmap(struct bxe_softc * sc)14593 bxe_set_modes_bitmap(struct bxe_softc *sc)
14594 {
14595 uint32_t flags = 0;
14596
14597 if (CHIP_REV_IS_FPGA(sc)) {
14598 SET_FLAGS(flags, MODE_FPGA);
14599 } else if (CHIP_REV_IS_EMUL(sc)) {
14600 SET_FLAGS(flags, MODE_EMUL);
14601 } else {
14602 SET_FLAGS(flags, MODE_ASIC);
14603 }
14604
14605 if (CHIP_IS_MODE_4_PORT(sc)) {
14606 SET_FLAGS(flags, MODE_PORT4);
14607 } else {
14608 SET_FLAGS(flags, MODE_PORT2);
14609 }
14610
14611 if (CHIP_IS_E2(sc)) {
14612 SET_FLAGS(flags, MODE_E2);
14613 } else if (CHIP_IS_E3(sc)) {
14614 SET_FLAGS(flags, MODE_E3);
14615 if (CHIP_REV(sc) == CHIP_REV_Ax) {
14616 SET_FLAGS(flags, MODE_E3_A0);
14617 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14618 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14619 }
14620 }
14621
14622 if (IS_MF(sc)) {
14623 SET_FLAGS(flags, MODE_MF);
14624 switch (sc->devinfo.mf_info.mf_mode) {
14625 case MULTI_FUNCTION_SD:
14626 SET_FLAGS(flags, MODE_MF_SD);
14627 break;
14628 case MULTI_FUNCTION_SI:
14629 SET_FLAGS(flags, MODE_MF_SI);
14630 break;
14631 case MULTI_FUNCTION_AFEX:
14632 SET_FLAGS(flags, MODE_MF_AFEX);
14633 break;
14634 }
14635 } else {
14636 SET_FLAGS(flags, MODE_SF);
14637 }
14638
14639 #if defined(__LITTLE_ENDIAN)
14640 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14641 #else /* __BIG_ENDIAN */
14642 SET_FLAGS(flags, MODE_BIG_ENDIAN);
14643 #endif
14644
14645 INIT_MODE_FLAGS(sc) = flags;
14646 }
14647
14648 static int
bxe_alloc_hsi_mem(struct bxe_softc * sc)14649 bxe_alloc_hsi_mem(struct bxe_softc *sc)
14650 {
14651 struct bxe_fastpath *fp;
14652 bus_addr_t busaddr;
14653 int max_agg_queues;
14654 int max_segments;
14655 bus_size_t max_size;
14656 bus_size_t max_seg_size;
14657 char buf[32];
14658 int rc;
14659 int i, j;
14660
14661 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14662
14663 /* allocate the parent bus DMA tag */
14664 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14665 1, /* alignment */
14666 0, /* boundary limit */
14667 BUS_SPACE_MAXADDR, /* restricted low */
14668 BUS_SPACE_MAXADDR, /* restricted hi */
14669 NULL, /* addr filter() */
14670 NULL, /* addr filter() arg */
14671 BUS_SPACE_MAXSIZE_32BIT, /* max map size */
14672 BUS_SPACE_UNRESTRICTED, /* num discontinuous */
14673 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */
14674 0, /* flags */
14675 NULL, /* lock() */
14676 NULL, /* lock() arg */
14677 &sc->parent_dma_tag); /* returned dma tag */
14678 if (rc != 0) {
14679 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14680 return (1);
14681 }
14682
14683 /************************/
14684 /* DEFAULT STATUS BLOCK */
14685 /************************/
14686
14687 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14688 &sc->def_sb_dma, "default status block") != 0) {
14689 /* XXX */
14690 bus_dma_tag_destroy(sc->parent_dma_tag);
14691 return (1);
14692 }
14693
14694 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14695
14696 /***************/
14697 /* EVENT QUEUE */
14698 /***************/
14699
14700 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14701 &sc->eq_dma, "event queue") != 0) {
14702 /* XXX */
14703 bxe_dma_free(sc, &sc->def_sb_dma);
14704 sc->def_sb = NULL;
14705 bus_dma_tag_destroy(sc->parent_dma_tag);
14706 return (1);
14707 }
14708
14709 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14710
14711 /*************/
14712 /* SLOW PATH */
14713 /*************/
14714
14715 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14716 &sc->sp_dma, "slow path") != 0) {
14717 /* XXX */
14718 bxe_dma_free(sc, &sc->eq_dma);
14719 sc->eq = NULL;
14720 bxe_dma_free(sc, &sc->def_sb_dma);
14721 sc->def_sb = NULL;
14722 bus_dma_tag_destroy(sc->parent_dma_tag);
14723 return (1);
14724 }
14725
14726 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14727
14728 /*******************/
14729 /* SLOW PATH QUEUE */
14730 /*******************/
14731
14732 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14733 &sc->spq_dma, "slow path queue") != 0) {
14734 /* XXX */
14735 bxe_dma_free(sc, &sc->sp_dma);
14736 sc->sp = NULL;
14737 bxe_dma_free(sc, &sc->eq_dma);
14738 sc->eq = NULL;
14739 bxe_dma_free(sc, &sc->def_sb_dma);
14740 sc->def_sb = NULL;
14741 bus_dma_tag_destroy(sc->parent_dma_tag);
14742 return (1);
14743 }
14744
14745 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14746
14747 /***************************/
14748 /* FW DECOMPRESSION BUFFER */
14749 /***************************/
14750
14751 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14752 "fw decompression buffer") != 0) {
14753 /* XXX */
14754 bxe_dma_free(sc, &sc->spq_dma);
14755 sc->spq = NULL;
14756 bxe_dma_free(sc, &sc->sp_dma);
14757 sc->sp = NULL;
14758 bxe_dma_free(sc, &sc->eq_dma);
14759 sc->eq = NULL;
14760 bxe_dma_free(sc, &sc->def_sb_dma);
14761 sc->def_sb = NULL;
14762 bus_dma_tag_destroy(sc->parent_dma_tag);
14763 return (1);
14764 }
14765
14766 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14767
14768 if ((sc->gz_strm =
14769 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14770 /* XXX */
14771 bxe_dma_free(sc, &sc->gz_buf_dma);
14772 sc->gz_buf = NULL;
14773 bxe_dma_free(sc, &sc->spq_dma);
14774 sc->spq = NULL;
14775 bxe_dma_free(sc, &sc->sp_dma);
14776 sc->sp = NULL;
14777 bxe_dma_free(sc, &sc->eq_dma);
14778 sc->eq = NULL;
14779 bxe_dma_free(sc, &sc->def_sb_dma);
14780 sc->def_sb = NULL;
14781 bus_dma_tag_destroy(sc->parent_dma_tag);
14782 return (1);
14783 }
14784
14785 /*************/
14786 /* FASTPATHS */
14787 /*************/
14788
14789 /* allocate DMA memory for each fastpath structure */
14790 for (i = 0; i < sc->num_queues; i++) {
14791 fp = &sc->fp[i];
14792 fp->sc = sc;
14793 fp->index = i;
14794
14795 /*******************/
14796 /* FP STATUS BLOCK */
14797 /*******************/
14798
14799 snprintf(buf, sizeof(buf), "fp %d status block", i);
14800 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14801 &fp->sb_dma, buf) != 0) {
14802 /* XXX unwind and free previous fastpath allocations */
14803 BLOGE(sc, "Failed to alloc %s\n", buf);
14804 return (1);
14805 } else {
14806 if (CHIP_IS_E2E3(sc)) {
14807 fp->status_block.e2_sb =
14808 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14809 } else {
14810 fp->status_block.e1x_sb =
14811 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14812 }
14813 }
14814
14815 /******************/
14816 /* FP TX BD CHAIN */
14817 /******************/
14818
14819 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14820 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14821 &fp->tx_dma, buf) != 0) {
14822 /* XXX unwind and free previous fastpath allocations */
14823 BLOGE(sc, "Failed to alloc %s\n", buf);
14824 return (1);
14825 } else {
14826 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14827 }
14828
14829 /* link together the tx bd chain pages */
14830 for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14831 /* index into the tx bd chain array to last entry per page */
14832 struct eth_tx_next_bd *tx_next_bd =
14833 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14834 /* point to the next page and wrap from last page */
14835 busaddr = (fp->tx_dma.paddr +
14836 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14837 tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14838 tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14839 }
14840
14841 /******************/
14842 /* FP RX BD CHAIN */
14843 /******************/
14844
14845 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14846 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14847 &fp->rx_dma, buf) != 0) {
14848 /* XXX unwind and free previous fastpath allocations */
14849 BLOGE(sc, "Failed to alloc %s\n", buf);
14850 return (1);
14851 } else {
14852 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14853 }
14854
14855 /* link together the rx bd chain pages */
14856 for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14857 /* index into the rx bd chain array to last entry per page */
14858 struct eth_rx_bd *rx_bd =
14859 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14860 /* point to the next page and wrap from last page */
14861 busaddr = (fp->rx_dma.paddr +
14862 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14863 rx_bd->addr_hi = htole32(U64_HI(busaddr));
14864 rx_bd->addr_lo = htole32(U64_LO(busaddr));
14865 }
14866
14867 /*******************/
14868 /* FP RX RCQ CHAIN */
14869 /*******************/
14870
14871 snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14872 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14873 &fp->rcq_dma, buf) != 0) {
14874 /* XXX unwind and free previous fastpath allocations */
14875 BLOGE(sc, "Failed to alloc %s\n", buf);
14876 return (1);
14877 } else {
14878 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14879 }
14880
14881 /* link together the rcq chain pages */
14882 for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14883 /* index into the rcq chain array to last entry per page */
14884 struct eth_rx_cqe_next_page *rx_cqe_next =
14885 (struct eth_rx_cqe_next_page *)
14886 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14887 /* point to the next page and wrap from last page */
14888 busaddr = (fp->rcq_dma.paddr +
14889 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14890 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14891 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14892 }
14893
14894 /*******************/
14895 /* FP RX SGE CHAIN */
14896 /*******************/
14897
14898 snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14899 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14900 &fp->rx_sge_dma, buf) != 0) {
14901 /* XXX unwind and free previous fastpath allocations */
14902 BLOGE(sc, "Failed to alloc %s\n", buf);
14903 return (1);
14904 } else {
14905 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14906 }
14907
14908 /* link together the sge chain pages */
14909 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14910 /* index into the rcq chain array to last entry per page */
14911 struct eth_rx_sge *rx_sge =
14912 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14913 /* point to the next page and wrap from last page */
14914 busaddr = (fp->rx_sge_dma.paddr +
14915 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14916 rx_sge->addr_hi = htole32(U64_HI(busaddr));
14917 rx_sge->addr_lo = htole32(U64_LO(busaddr));
14918 }
14919
14920 /***********************/
14921 /* FP TX MBUF DMA MAPS */
14922 /***********************/
14923
14924 /* set required sizes before mapping to conserve resources */
14925 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14926 max_size = BXE_TSO_MAX_SIZE;
14927 max_segments = BXE_TSO_MAX_SEGMENTS;
14928 max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14929 } else {
14930 max_size = (MCLBYTES * BXE_MAX_SEGMENTS);
14931 max_segments = BXE_MAX_SEGMENTS;
14932 max_seg_size = MCLBYTES;
14933 }
14934
14935 /* create a dma tag for the tx mbufs */
14936 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14937 1, /* alignment */
14938 0, /* boundary limit */
14939 BUS_SPACE_MAXADDR, /* restricted low */
14940 BUS_SPACE_MAXADDR, /* restricted hi */
14941 NULL, /* addr filter() */
14942 NULL, /* addr filter() arg */
14943 max_size, /* max map size */
14944 max_segments, /* num discontinuous */
14945 max_seg_size, /* max seg size */
14946 0, /* flags */
14947 NULL, /* lock() */
14948 NULL, /* lock() arg */
14949 &fp->tx_mbuf_tag); /* returned dma tag */
14950 if (rc != 0) {
14951 /* XXX unwind and free previous fastpath allocations */
14952 BLOGE(sc, "Failed to create dma tag for "
14953 "'fp %d tx mbufs' (%d)\n", i, rc);
14954 return (1);
14955 }
14956
14957 /* create dma maps for each of the tx mbuf clusters */
14958 for (j = 0; j < TX_BD_TOTAL; j++) {
14959 if (bus_dmamap_create(fp->tx_mbuf_tag,
14960 BUS_DMA_NOWAIT,
14961 &fp->tx_mbuf_chain[j].m_map)) {
14962 /* XXX unwind and free previous fastpath allocations */
14963 BLOGE(sc, "Failed to create dma map for "
14964 "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14965 return (1);
14966 }
14967 }
14968
14969 /***********************/
14970 /* FP RX MBUF DMA MAPS */
14971 /***********************/
14972
14973 /* create a dma tag for the rx mbufs */
14974 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14975 1, /* alignment */
14976 0, /* boundary limit */
14977 BUS_SPACE_MAXADDR, /* restricted low */
14978 BUS_SPACE_MAXADDR, /* restricted hi */
14979 NULL, /* addr filter() */
14980 NULL, /* addr filter() arg */
14981 MJUM9BYTES, /* max map size */
14982 1, /* num discontinuous */
14983 MJUM9BYTES, /* max seg size */
14984 0, /* flags */
14985 NULL, /* lock() */
14986 NULL, /* lock() arg */
14987 &fp->rx_mbuf_tag); /* returned dma tag */
14988 if (rc != 0) {
14989 /* XXX unwind and free previous fastpath allocations */
14990 BLOGE(sc, "Failed to create dma tag for "
14991 "'fp %d rx mbufs' (%d)\n", i, rc);
14992 return (1);
14993 }
14994
14995 /* create dma maps for each of the rx mbuf clusters */
14996 for (j = 0; j < RX_BD_TOTAL; j++) {
14997 if (bus_dmamap_create(fp->rx_mbuf_tag,
14998 BUS_DMA_NOWAIT,
14999 &fp->rx_mbuf_chain[j].m_map)) {
15000 /* XXX unwind and free previous fastpath allocations */
15001 BLOGE(sc, "Failed to create dma map for "
15002 "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
15003 return (1);
15004 }
15005 }
15006
15007 /* create dma map for the spare rx mbuf cluster */
15008 if (bus_dmamap_create(fp->rx_mbuf_tag,
15009 BUS_DMA_NOWAIT,
15010 &fp->rx_mbuf_spare_map)) {
15011 /* XXX unwind and free previous fastpath allocations */
15012 BLOGE(sc, "Failed to create dma map for "
15013 "'fp %d spare rx mbuf' (%d)\n", i, rc);
15014 return (1);
15015 }
15016
15017 /***************************/
15018 /* FP RX SGE MBUF DMA MAPS */
15019 /***************************/
15020
15021 /* create a dma tag for the rx sge mbufs */
15022 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15023 1, /* alignment */
15024 0, /* boundary limit */
15025 BUS_SPACE_MAXADDR, /* restricted low */
15026 BUS_SPACE_MAXADDR, /* restricted hi */
15027 NULL, /* addr filter() */
15028 NULL, /* addr filter() arg */
15029 BCM_PAGE_SIZE, /* max map size */
15030 1, /* num discontinuous */
15031 BCM_PAGE_SIZE, /* max seg size */
15032 0, /* flags */
15033 NULL, /* lock() */
15034 NULL, /* lock() arg */
15035 &fp->rx_sge_mbuf_tag); /* returned dma tag */
15036 if (rc != 0) {
15037 /* XXX unwind and free previous fastpath allocations */
15038 BLOGE(sc, "Failed to create dma tag for "
15039 "'fp %d rx sge mbufs' (%d)\n", i, rc);
15040 return (1);
15041 }
15042
15043 /* create dma maps for the rx sge mbuf clusters */
15044 for (j = 0; j < RX_SGE_TOTAL; j++) {
15045 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15046 BUS_DMA_NOWAIT,
15047 &fp->rx_sge_mbuf_chain[j].m_map)) {
15048 /* XXX unwind and free previous fastpath allocations */
15049 BLOGE(sc, "Failed to create dma map for "
15050 "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
15051 return (1);
15052 }
15053 }
15054
15055 /* create dma map for the spare rx sge mbuf cluster */
15056 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15057 BUS_DMA_NOWAIT,
15058 &fp->rx_sge_mbuf_spare_map)) {
15059 /* XXX unwind and free previous fastpath allocations */
15060 BLOGE(sc, "Failed to create dma map for "
15061 "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
15062 return (1);
15063 }
15064
15065 /***************************/
15066 /* FP RX TPA MBUF DMA MAPS */
15067 /***************************/
15068
15069 /* create dma maps for the rx tpa mbuf clusters */
15070 max_agg_queues = MAX_AGG_QS(sc);
15071
15072 for (j = 0; j < max_agg_queues; j++) {
15073 if (bus_dmamap_create(fp->rx_mbuf_tag,
15074 BUS_DMA_NOWAIT,
15075 &fp->rx_tpa_info[j].bd.m_map)) {
15076 /* XXX unwind and free previous fastpath allocations */
15077 BLOGE(sc, "Failed to create dma map for "
15078 "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
15079 return (1);
15080 }
15081 }
15082
15083 /* create dma map for the spare rx tpa mbuf cluster */
15084 if (bus_dmamap_create(fp->rx_mbuf_tag,
15085 BUS_DMA_NOWAIT,
15086 &fp->rx_tpa_info_mbuf_spare_map)) {
15087 /* XXX unwind and free previous fastpath allocations */
15088 BLOGE(sc, "Failed to create dma map for "
15089 "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
15090 return (1);
15091 }
15092
15093 bxe_init_sge_ring_bit_mask(fp);
15094 }
15095
15096 return (0);
15097 }
15098
15099 static void
bxe_free_hsi_mem(struct bxe_softc * sc)15100 bxe_free_hsi_mem(struct bxe_softc *sc)
15101 {
15102 struct bxe_fastpath *fp;
15103 int max_agg_queues;
15104 int i, j;
15105
15106 if (sc->parent_dma_tag == NULL) {
15107 return; /* assume nothing was allocated */
15108 }
15109
15110 for (i = 0; i < sc->num_queues; i++) {
15111 fp = &sc->fp[i];
15112
15113 /*******************/
15114 /* FP STATUS BLOCK */
15115 /*******************/
15116
15117 bxe_dma_free(sc, &fp->sb_dma);
15118 memset(&fp->status_block, 0, sizeof(fp->status_block));
15119
15120 /******************/
15121 /* FP TX BD CHAIN */
15122 /******************/
15123
15124 bxe_dma_free(sc, &fp->tx_dma);
15125 fp->tx_chain = NULL;
15126
15127 /******************/
15128 /* FP RX BD CHAIN */
15129 /******************/
15130
15131 bxe_dma_free(sc, &fp->rx_dma);
15132 fp->rx_chain = NULL;
15133
15134 /*******************/
15135 /* FP RX RCQ CHAIN */
15136 /*******************/
15137
15138 bxe_dma_free(sc, &fp->rcq_dma);
15139 fp->rcq_chain = NULL;
15140
15141 /*******************/
15142 /* FP RX SGE CHAIN */
15143 /*******************/
15144
15145 bxe_dma_free(sc, &fp->rx_sge_dma);
15146 fp->rx_sge_chain = NULL;
15147
15148 /***********************/
15149 /* FP TX MBUF DMA MAPS */
15150 /***********************/
15151
15152 if (fp->tx_mbuf_tag != NULL) {
15153 for (j = 0; j < TX_BD_TOTAL; j++) {
15154 if (fp->tx_mbuf_chain[j].m_map != NULL) {
15155 bus_dmamap_unload(fp->tx_mbuf_tag,
15156 fp->tx_mbuf_chain[j].m_map);
15157 bus_dmamap_destroy(fp->tx_mbuf_tag,
15158 fp->tx_mbuf_chain[j].m_map);
15159 }
15160 }
15161
15162 bus_dma_tag_destroy(fp->tx_mbuf_tag);
15163 fp->tx_mbuf_tag = NULL;
15164 }
15165
15166 /***********************/
15167 /* FP RX MBUF DMA MAPS */
15168 /***********************/
15169
15170 if (fp->rx_mbuf_tag != NULL) {
15171 for (j = 0; j < RX_BD_TOTAL; j++) {
15172 if (fp->rx_mbuf_chain[j].m_map != NULL) {
15173 bus_dmamap_unload(fp->rx_mbuf_tag,
15174 fp->rx_mbuf_chain[j].m_map);
15175 bus_dmamap_destroy(fp->rx_mbuf_tag,
15176 fp->rx_mbuf_chain[j].m_map);
15177 }
15178 }
15179
15180 if (fp->rx_mbuf_spare_map != NULL) {
15181 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15182 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15183 }
15184
15185 /***************************/
15186 /* FP RX TPA MBUF DMA MAPS */
15187 /***************************/
15188
15189 max_agg_queues = MAX_AGG_QS(sc);
15190
15191 for (j = 0; j < max_agg_queues; j++) {
15192 if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15193 bus_dmamap_unload(fp->rx_mbuf_tag,
15194 fp->rx_tpa_info[j].bd.m_map);
15195 bus_dmamap_destroy(fp->rx_mbuf_tag,
15196 fp->rx_tpa_info[j].bd.m_map);
15197 }
15198 }
15199
15200 if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15201 bus_dmamap_unload(fp->rx_mbuf_tag,
15202 fp->rx_tpa_info_mbuf_spare_map);
15203 bus_dmamap_destroy(fp->rx_mbuf_tag,
15204 fp->rx_tpa_info_mbuf_spare_map);
15205 }
15206
15207 bus_dma_tag_destroy(fp->rx_mbuf_tag);
15208 fp->rx_mbuf_tag = NULL;
15209 }
15210
15211 /***************************/
15212 /* FP RX SGE MBUF DMA MAPS */
15213 /***************************/
15214
15215 if (fp->rx_sge_mbuf_tag != NULL) {
15216 for (j = 0; j < RX_SGE_TOTAL; j++) {
15217 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15218 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15219 fp->rx_sge_mbuf_chain[j].m_map);
15220 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15221 fp->rx_sge_mbuf_chain[j].m_map);
15222 }
15223 }
15224
15225 if (fp->rx_sge_mbuf_spare_map != NULL) {
15226 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15227 fp->rx_sge_mbuf_spare_map);
15228 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15229 fp->rx_sge_mbuf_spare_map);
15230 }
15231
15232 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15233 fp->rx_sge_mbuf_tag = NULL;
15234 }
15235 }
15236
15237 /***************************/
15238 /* FW DECOMPRESSION BUFFER */
15239 /***************************/
15240
15241 bxe_dma_free(sc, &sc->gz_buf_dma);
15242 sc->gz_buf = NULL;
15243 free(sc->gz_strm, M_DEVBUF);
15244 sc->gz_strm = NULL;
15245
15246 /*******************/
15247 /* SLOW PATH QUEUE */
15248 /*******************/
15249
15250 bxe_dma_free(sc, &sc->spq_dma);
15251 sc->spq = NULL;
15252
15253 /*************/
15254 /* SLOW PATH */
15255 /*************/
15256
15257 bxe_dma_free(sc, &sc->sp_dma);
15258 sc->sp = NULL;
15259
15260 /***************/
15261 /* EVENT QUEUE */
15262 /***************/
15263
15264 bxe_dma_free(sc, &sc->eq_dma);
15265 sc->eq = NULL;
15266
15267 /************************/
15268 /* DEFAULT STATUS BLOCK */
15269 /************************/
15270
15271 bxe_dma_free(sc, &sc->def_sb_dma);
15272 sc->def_sb = NULL;
15273
15274 bus_dma_tag_destroy(sc->parent_dma_tag);
15275 sc->parent_dma_tag = NULL;
15276 }
15277
15278 /*
15279 * Previous driver DMAE transaction may have occurred when pre-boot stage
15280 * ended and boot began. This would invalidate the addresses of the
15281 * transaction, resulting in was-error bit set in the PCI causing all
15282 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15283 * the interrupt which detected this from the pglueb and the was-done bit
15284 */
15285 static void
bxe_prev_interrupted_dmae(struct bxe_softc * sc)15286 bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15287 {
15288 uint32_t val;
15289
15290 if (!CHIP_IS_E1x(sc)) {
15291 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15292 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15293 BLOGD(sc, DBG_LOAD,
15294 "Clearing 'was-error' bit that was set in pglueb");
15295 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15296 }
15297 }
15298 }
15299
15300 static int
bxe_prev_mcp_done(struct bxe_softc * sc)15301 bxe_prev_mcp_done(struct bxe_softc *sc)
15302 {
15303 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15304 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15305 if (!rc) {
15306 BLOGE(sc, "MCP response failure, aborting\n");
15307 return (-1);
15308 }
15309
15310 return (0);
15311 }
15312
15313 static struct bxe_prev_list_node *
bxe_prev_path_get_entry(struct bxe_softc * sc)15314 bxe_prev_path_get_entry(struct bxe_softc *sc)
15315 {
15316 struct bxe_prev_list_node *tmp;
15317
15318 LIST_FOREACH(tmp, &bxe_prev_list, node) {
15319 if ((sc->pcie_bus == tmp->bus) &&
15320 (sc->pcie_device == tmp->slot) &&
15321 (SC_PATH(sc) == tmp->path)) {
15322 return (tmp);
15323 }
15324 }
15325
15326 return (NULL);
15327 }
15328
15329 static uint8_t
bxe_prev_is_path_marked(struct bxe_softc * sc)15330 bxe_prev_is_path_marked(struct bxe_softc *sc)
15331 {
15332 struct bxe_prev_list_node *tmp;
15333 int rc = FALSE;
15334
15335 mtx_lock(&bxe_prev_mtx);
15336
15337 tmp = bxe_prev_path_get_entry(sc);
15338 if (tmp) {
15339 if (tmp->aer) {
15340 BLOGD(sc, DBG_LOAD,
15341 "Path %d/%d/%d was marked by AER\n",
15342 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15343 } else {
15344 rc = TRUE;
15345 BLOGD(sc, DBG_LOAD,
15346 "Path %d/%d/%d was already cleaned from previous drivers\n",
15347 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15348 }
15349 }
15350
15351 mtx_unlock(&bxe_prev_mtx);
15352
15353 return (rc);
15354 }
15355
15356 static int
bxe_prev_mark_path(struct bxe_softc * sc,uint8_t after_undi)15357 bxe_prev_mark_path(struct bxe_softc *sc,
15358 uint8_t after_undi)
15359 {
15360 struct bxe_prev_list_node *tmp;
15361
15362 mtx_lock(&bxe_prev_mtx);
15363
15364 /* Check whether the entry for this path already exists */
15365 tmp = bxe_prev_path_get_entry(sc);
15366 if (tmp) {
15367 if (!tmp->aer) {
15368 BLOGD(sc, DBG_LOAD,
15369 "Re-marking AER in path %d/%d/%d\n",
15370 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15371 } else {
15372 BLOGD(sc, DBG_LOAD,
15373 "Removing AER indication from path %d/%d/%d\n",
15374 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15375 tmp->aer = 0;
15376 }
15377
15378 mtx_unlock(&bxe_prev_mtx);
15379 return (0);
15380 }
15381
15382 mtx_unlock(&bxe_prev_mtx);
15383
15384 /* Create an entry for this path and add it */
15385 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15386 (M_NOWAIT | M_ZERO));
15387 if (!tmp) {
15388 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15389 return (-1);
15390 }
15391
15392 tmp->bus = sc->pcie_bus;
15393 tmp->slot = sc->pcie_device;
15394 tmp->path = SC_PATH(sc);
15395 tmp->aer = 0;
15396 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15397
15398 mtx_lock(&bxe_prev_mtx);
15399
15400 BLOGD(sc, DBG_LOAD,
15401 "Marked path %d/%d/%d - finished previous unload\n",
15402 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15403 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15404
15405 mtx_unlock(&bxe_prev_mtx);
15406
15407 return (0);
15408 }
15409
15410 static int
bxe_do_flr(struct bxe_softc * sc)15411 bxe_do_flr(struct bxe_softc *sc)
15412 {
15413 int i;
15414
15415 /* only E2 and onwards support FLR */
15416 if (CHIP_IS_E1x(sc)) {
15417 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15418 return (-1);
15419 }
15420
15421 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15422 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15423 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15424 sc->devinfo.bc_ver);
15425 return (-1);
15426 }
15427
15428 /* Wait for Transaction Pending bit clean */
15429 for (i = 0; i < 4; i++) {
15430 if (i) {
15431 DELAY(((1 << (i - 1)) * 100) * 1000);
15432 }
15433
15434 if (!bxe_is_pcie_pending(sc)) {
15435 goto clear;
15436 }
15437 }
15438
15439 BLOGE(sc, "PCIE transaction is not cleared, "
15440 "proceeding with reset anyway\n");
15441
15442 clear:
15443
15444 BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15445 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15446
15447 return (0);
15448 }
15449
15450 struct bxe_mac_vals {
15451 uint32_t xmac_addr;
15452 uint32_t xmac_val;
15453 uint32_t emac_addr;
15454 uint32_t emac_val;
15455 uint32_t umac_addr;
15456 uint32_t umac_val;
15457 uint32_t bmac_addr;
15458 uint32_t bmac_val[2];
15459 };
15460
15461 static void
bxe_prev_unload_close_mac(struct bxe_softc * sc,struct bxe_mac_vals * vals)15462 bxe_prev_unload_close_mac(struct bxe_softc *sc,
15463 struct bxe_mac_vals *vals)
15464 {
15465 uint32_t val, base_addr, offset, mask, reset_reg;
15466 uint8_t mac_stopped = FALSE;
15467 uint8_t port = SC_PORT(sc);
15468 uint32_t wb_data[2];
15469
15470 /* reset addresses as they also mark which values were changed */
15471 vals->bmac_addr = 0;
15472 vals->umac_addr = 0;
15473 vals->xmac_addr = 0;
15474 vals->emac_addr = 0;
15475
15476 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15477
15478 if (!CHIP_IS_E3(sc)) {
15479 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15480 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15481 if ((mask & reset_reg) && val) {
15482 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15483 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15484 : NIG_REG_INGRESS_BMAC0_MEM;
15485 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15486 : BIGMAC_REGISTER_BMAC_CONTROL;
15487
15488 /*
15489 * use rd/wr since we cannot use dmae. This is safe
15490 * since MCP won't access the bus due to the request
15491 * to unload, and no function on the path can be
15492 * loaded at this time.
15493 */
15494 wb_data[0] = REG_RD(sc, base_addr + offset);
15495 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15496 vals->bmac_addr = base_addr + offset;
15497 vals->bmac_val[0] = wb_data[0];
15498 vals->bmac_val[1] = wb_data[1];
15499 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15500 REG_WR(sc, vals->bmac_addr, wb_data[0]);
15501 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15502 }
15503
15504 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15505 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15506 vals->emac_val = REG_RD(sc, vals->emac_addr);
15507 REG_WR(sc, vals->emac_addr, 0);
15508 mac_stopped = TRUE;
15509 } else {
15510 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15511 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15512 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15513 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15514 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15515 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15516 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15517 vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15518 REG_WR(sc, vals->xmac_addr, 0);
15519 mac_stopped = TRUE;
15520 }
15521
15522 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15523 if (mask & reset_reg) {
15524 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15525 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15526 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15527 vals->umac_val = REG_RD(sc, vals->umac_addr);
15528 REG_WR(sc, vals->umac_addr, 0);
15529 mac_stopped = TRUE;
15530 }
15531 }
15532
15533 if (mac_stopped) {
15534 DELAY(20000);
15535 }
15536 }
15537
15538 #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15539 #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff)
15540 #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
15541 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15542
15543 static void
bxe_prev_unload_undi_inc(struct bxe_softc * sc,uint8_t port,uint8_t inc)15544 bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15545 uint8_t port,
15546 uint8_t inc)
15547 {
15548 uint16_t rcq, bd;
15549 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15550
15551 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15552 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15553
15554 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15555 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15556
15557 BLOGD(sc, DBG_LOAD,
15558 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15559 port, bd, rcq);
15560 }
15561
15562 static int
bxe_prev_unload_common(struct bxe_softc * sc)15563 bxe_prev_unload_common(struct bxe_softc *sc)
15564 {
15565 uint32_t reset_reg, tmp_reg = 0, rc;
15566 uint8_t prev_undi = FALSE;
15567 struct bxe_mac_vals mac_vals;
15568 uint32_t timer_count = 1000;
15569 uint32_t prev_brb;
15570
15571 /*
15572 * It is possible a previous function received 'common' answer,
15573 * but hasn't loaded yet, therefore creating a scenario of
15574 * multiple functions receiving 'common' on the same path.
15575 */
15576 BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15577
15578 memset(&mac_vals, 0, sizeof(mac_vals));
15579
15580 if (bxe_prev_is_path_marked(sc)) {
15581 return (bxe_prev_mcp_done(sc));
15582 }
15583
15584 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15585
15586 /* Reset should be performed after BRB is emptied */
15587 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15588 /* Close the MAC Rx to prevent BRB from filling up */
15589 bxe_prev_unload_close_mac(sc, &mac_vals);
15590
15591 /* close LLH filters towards the BRB */
15592 elink_set_rx_filter(&sc->link_params, 0);
15593
15594 /*
15595 * Check if the UNDI driver was previously loaded.
15596 * UNDI driver initializes CID offset for normal bell to 0x7
15597 */
15598 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15599 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15600 if (tmp_reg == 0x7) {
15601 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15602 prev_undi = TRUE;
15603 /* clear the UNDI indication */
15604 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15605 /* clear possible idle check errors */
15606 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15607 }
15608 }
15609
15610 /* wait until BRB is empty */
15611 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15612 while (timer_count) {
15613 prev_brb = tmp_reg;
15614
15615 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15616 if (!tmp_reg) {
15617 break;
15618 }
15619
15620 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15621
15622 /* reset timer as long as BRB actually gets emptied */
15623 if (prev_brb > tmp_reg) {
15624 timer_count = 1000;
15625 } else {
15626 timer_count--;
15627 }
15628
15629 /* If UNDI resides in memory, manually increment it */
15630 if (prev_undi) {
15631 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15632 }
15633
15634 DELAY(10);
15635 }
15636
15637 if (!timer_count) {
15638 BLOGE(sc, "Failed to empty BRB\n");
15639 }
15640 }
15641
15642 /* No packets are in the pipeline, path is ready for reset */
15643 bxe_reset_common(sc);
15644
15645 if (mac_vals.xmac_addr) {
15646 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15647 }
15648 if (mac_vals.umac_addr) {
15649 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15650 }
15651 if (mac_vals.emac_addr) {
15652 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15653 }
15654 if (mac_vals.bmac_addr) {
15655 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15656 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15657 }
15658
15659 rc = bxe_prev_mark_path(sc, prev_undi);
15660 if (rc) {
15661 bxe_prev_mcp_done(sc);
15662 return (rc);
15663 }
15664
15665 return (bxe_prev_mcp_done(sc));
15666 }
15667
15668 static int
bxe_prev_unload_uncommon(struct bxe_softc * sc)15669 bxe_prev_unload_uncommon(struct bxe_softc *sc)
15670 {
15671 int rc;
15672
15673 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15674
15675 /* Test if previous unload process was already finished for this path */
15676 if (bxe_prev_is_path_marked(sc)) {
15677 return (bxe_prev_mcp_done(sc));
15678 }
15679
15680 BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15681
15682 /*
15683 * If function has FLR capabilities, and existing FW version matches
15684 * the one required, then FLR will be sufficient to clean any residue
15685 * left by previous driver
15686 */
15687 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15688 if (!rc) {
15689 /* fw version is good */
15690 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15691 rc = bxe_do_flr(sc);
15692 }
15693
15694 if (!rc) {
15695 /* FLR was performed */
15696 BLOGD(sc, DBG_LOAD, "FLR successful\n");
15697 return (0);
15698 }
15699
15700 BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15701
15702 /* Close the MCP request, return failure*/
15703 rc = bxe_prev_mcp_done(sc);
15704 if (!rc) {
15705 rc = BXE_PREV_WAIT_NEEDED;
15706 }
15707
15708 return (rc);
15709 }
15710
15711 static int
bxe_prev_unload(struct bxe_softc * sc)15712 bxe_prev_unload(struct bxe_softc *sc)
15713 {
15714 int time_counter = 10;
15715 uint32_t fw, hw_lock_reg, hw_lock_val;
15716 uint32_t rc = 0;
15717
15718 /*
15719 * Clear HW from errors which may have resulted from an interrupted
15720 * DMAE transaction.
15721 */
15722 bxe_prev_interrupted_dmae(sc);
15723
15724 /* Release previously held locks */
15725 hw_lock_reg =
15726 (SC_FUNC(sc) <= 5) ?
15727 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15728 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15729
15730 hw_lock_val = (REG_RD(sc, hw_lock_reg));
15731 if (hw_lock_val) {
15732 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15733 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15734 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15735 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15736 }
15737 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15738 REG_WR(sc, hw_lock_reg, 0xffffffff);
15739 } else {
15740 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15741 }
15742
15743 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15744 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15745 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15746 }
15747
15748 do {
15749 /* Lock MCP using an unload request */
15750 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15751 if (!fw) {
15752 BLOGE(sc, "MCP response failure, aborting\n");
15753 rc = -1;
15754 break;
15755 }
15756
15757 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15758 rc = bxe_prev_unload_common(sc);
15759 break;
15760 }
15761
15762 /* non-common reply from MCP night require looping */
15763 rc = bxe_prev_unload_uncommon(sc);
15764 if (rc != BXE_PREV_WAIT_NEEDED) {
15765 break;
15766 }
15767
15768 DELAY(20000);
15769 } while (--time_counter);
15770
15771 if (!time_counter || rc) {
15772 BLOGE(sc, "Failed to unload previous driver!"
15773 " time_counter %d rc %d\n", time_counter, rc);
15774 rc = -1;
15775 }
15776
15777 return (rc);
15778 }
15779
15780 void
bxe_dcbx_set_state(struct bxe_softc * sc,uint8_t dcb_on,uint32_t dcbx_enabled)15781 bxe_dcbx_set_state(struct bxe_softc *sc,
15782 uint8_t dcb_on,
15783 uint32_t dcbx_enabled)
15784 {
15785 if (!CHIP_IS_E1x(sc)) {
15786 sc->dcb_state = dcb_on;
15787 sc->dcbx_enabled = dcbx_enabled;
15788 } else {
15789 sc->dcb_state = FALSE;
15790 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15791 }
15792 BLOGD(sc, DBG_LOAD,
15793 "DCB state [%s:%s]\n",
15794 dcb_on ? "ON" : "OFF",
15795 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15796 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15797 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15798 "on-chip with negotiation" : "invalid");
15799 }
15800
15801 /* must be called after sriov-enable */
15802 static int
bxe_set_qm_cid_count(struct bxe_softc * sc)15803 bxe_set_qm_cid_count(struct bxe_softc *sc)
15804 {
15805 int cid_count = BXE_L2_MAX_CID(sc);
15806
15807 if (IS_SRIOV(sc)) {
15808 cid_count += BXE_VF_CIDS;
15809 }
15810
15811 if (CNIC_SUPPORT(sc)) {
15812 cid_count += CNIC_CID_MAX;
15813 }
15814
15815 return (roundup(cid_count, QM_CID_ROUND));
15816 }
15817
15818 static void
bxe_init_multi_cos(struct bxe_softc * sc)15819 bxe_init_multi_cos(struct bxe_softc *sc)
15820 {
15821 int pri, cos;
15822
15823 uint32_t pri_map = 0; /* XXX change to user config */
15824
15825 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15826 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15827 if (cos < sc->max_cos) {
15828 sc->prio_to_cos[pri] = cos;
15829 } else {
15830 BLOGW(sc, "Invalid COS %d for priority %d "
15831 "(max COS is %d), setting to 0\n",
15832 cos, pri, (sc->max_cos - 1));
15833 sc->prio_to_cos[pri] = 0;
15834 }
15835 }
15836 }
15837
15838 static int
bxe_sysctl_state(SYSCTL_HANDLER_ARGS)15839 bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15840 {
15841 struct bxe_softc *sc;
15842 int error, result;
15843
15844 result = 0;
15845 error = sysctl_handle_int(oidp, &result, 0, req);
15846
15847 if (error || !req->newptr) {
15848 return (error);
15849 }
15850
15851 if (result == 1) {
15852 uint32_t temp;
15853 sc = (struct bxe_softc *)arg1;
15854
15855 BLOGI(sc, "... dumping driver state ...\n");
15856 temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15857 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15858 }
15859
15860 return (error);
15861 }
15862
15863 static int
bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)15864 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15865 {
15866 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15867 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15868 uint32_t *offset;
15869 uint64_t value = 0;
15870 int index = (int)arg2;
15871
15872 if (index >= BXE_NUM_ETH_STATS) {
15873 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15874 return (-1);
15875 }
15876
15877 offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15878
15879 switch (bxe_eth_stats_arr[index].size) {
15880 case 4:
15881 value = (uint64_t)*offset;
15882 break;
15883 case 8:
15884 value = HILO_U64(*offset, *(offset + 1));
15885 break;
15886 default:
15887 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15888 index, bxe_eth_stats_arr[index].size);
15889 return (-1);
15890 }
15891
15892 return (sysctl_handle_64(oidp, &value, 0, req));
15893 }
15894
15895 static int
bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)15896 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15897 {
15898 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15899 uint32_t *eth_stats;
15900 uint32_t *offset;
15901 uint64_t value = 0;
15902 uint32_t q_stat = (uint32_t)arg2;
15903 uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15904 uint32_t index = (q_stat & 0xffff);
15905
15906 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15907
15908 if (index >= BXE_NUM_ETH_Q_STATS) {
15909 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15910 return (-1);
15911 }
15912
15913 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15914
15915 switch (bxe_eth_q_stats_arr[index].size) {
15916 case 4:
15917 value = (uint64_t)*offset;
15918 break;
15919 case 8:
15920 value = HILO_U64(*offset, *(offset + 1));
15921 break;
15922 default:
15923 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15924 index, bxe_eth_q_stats_arr[index].size);
15925 return (-1);
15926 }
15927
15928 return (sysctl_handle_64(oidp, &value, 0, req));
15929 }
15930
bxe_force_link_reset(struct bxe_softc * sc)15931 static void bxe_force_link_reset(struct bxe_softc *sc)
15932 {
15933
15934 bxe_acquire_phy_lock(sc);
15935 elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15936 bxe_release_phy_lock(sc);
15937 }
15938
15939 static int
bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)15940 bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15941 {
15942 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15943 uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15944 int rc = 0;
15945 int error;
15946 int result;
15947
15948
15949 error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15950
15951 if (error || !req->newptr) {
15952 return (error);
15953 }
15954 if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) {
15955 BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param);
15956 sc->bxe_pause_param = 8;
15957 }
15958
15959 result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15960
15961
15962 if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) {
15963 BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15964 return -EINVAL;
15965 }
15966
15967 if(IS_MF(sc))
15968 return 0;
15969 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15970 if(result & ELINK_FLOW_CTRL_RX)
15971 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15972
15973 if(result & ELINK_FLOW_CTRL_TX)
15974 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15975 if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15976 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15977
15978 if(result & 0x400) {
15979 if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15980 sc->link_params.req_flow_ctrl[cfg_idx] =
15981 ELINK_FLOW_CTRL_AUTO;
15982 }
15983 sc->link_params.req_fc_auto_adv = 0;
15984 if (result & ELINK_FLOW_CTRL_RX)
15985 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15986
15987 if (result & ELINK_FLOW_CTRL_TX)
15988 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15989 if (!sc->link_params.req_fc_auto_adv)
15990 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15991 }
15992 if (IS_PF(sc)) {
15993 if (sc->link_vars.link_up) {
15994 bxe_stats_handle(sc, STATS_EVENT_STOP);
15995 }
15996 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15997 bxe_force_link_reset(sc);
15998 bxe_acquire_phy_lock(sc);
15999
16000 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
16001
16002 bxe_release_phy_lock(sc);
16003
16004 bxe_calc_fc_adv(sc);
16005 }
16006 }
16007 return rc;
16008 }
16009
16010
16011 static void
bxe_add_sysctls(struct bxe_softc * sc)16012 bxe_add_sysctls(struct bxe_softc *sc)
16013 {
16014 struct sysctl_ctx_list *ctx;
16015 struct sysctl_oid_list *children;
16016 struct sysctl_oid *queue_top, *queue;
16017 struct sysctl_oid_list *queue_top_children, *queue_children;
16018 char queue_num_buf[32];
16019 uint32_t q_stat;
16020 int i, j;
16021
16022 ctx = device_get_sysctl_ctx(sc->dev);
16023 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
16024
16025 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
16026 CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
16027 "version");
16028
16029 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16030 BCM_5710_FW_MAJOR_VERSION,
16031 BCM_5710_FW_MINOR_VERSION,
16032 BCM_5710_FW_REVISION_VERSION,
16033 BCM_5710_FW_ENGINEERING_VERSION);
16034
16035 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16036 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" :
16037 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" :
16038 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" :
16039 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16040 "Unknown"));
16041 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16042 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16043 "multifunction vnics per port");
16044
16045 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16046 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16047 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16048 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16049 "???GT/s"),
16050 sc->devinfo.pcie_link_width);
16051
16052 sc->debug = bxe_debug;
16053
16054 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16055 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
16056 "bootcode version");
16057 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16058 CTLFLAG_RD, sc->fw_ver_str, 0,
16059 "firmware version");
16060 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16061 CTLFLAG_RD, sc->mf_mode_str, 0,
16062 "multifunction mode");
16063 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16064 CTLFLAG_RD, sc->mac_addr_str, 0,
16065 "mac address");
16066 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16067 CTLFLAG_RD, sc->pci_link_str, 0,
16068 "pci link status");
16069 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
16070 CTLFLAG_RW, &sc->debug,
16071 "debug logging mode");
16072
16073 sc->trigger_grcdump = 0;
16074 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
16075 CTLFLAG_RW, &sc->trigger_grcdump, 0,
16076 "trigger grcdump should be invoked"
16077 " before collecting grcdump");
16078
16079 sc->grcdump_started = 0;
16080 sc->grcdump_done = 0;
16081 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
16082 CTLFLAG_RD, &sc->grcdump_done, 0,
16083 "set by driver when grcdump is done");
16084
16085 sc->rx_budget = bxe_rx_budget;
16086 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16087 CTLFLAG_RW, &sc->rx_budget, 0,
16088 "rx processing budget");
16089
16090 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
16091 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16092 bxe_sysctl_pauseparam, "IU",
16093 "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
16094
16095
16096 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16097 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16098 bxe_sysctl_state, "IU", "dump driver state");
16099
16100 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16101 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16102 bxe_eth_stats_arr[i].string,
16103 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
16104 bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string);
16105 }
16106
16107 /* add a new parent node for all queues "dev.bxe.#.queue" */
16108 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16109 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "queue");
16110 queue_top_children = SYSCTL_CHILDREN(queue_top);
16111
16112 for (i = 0; i < sc->num_queues; i++) {
16113 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16114 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16115 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16116 queue_num_buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "single queue");
16117 queue_children = SYSCTL_CHILDREN(queue);
16118
16119 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16120 q_stat = ((i << 16) | j);
16121 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16122 bxe_eth_q_stats_arr[j].string,
16123 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, q_stat,
16124 bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string);
16125 }
16126 }
16127 }
16128
16129 static int
bxe_alloc_buf_rings(struct bxe_softc * sc)16130 bxe_alloc_buf_rings(struct bxe_softc *sc)
16131 {
16132 int i;
16133 struct bxe_fastpath *fp;
16134
16135 for (i = 0; i < sc->num_queues; i++) {
16136
16137 fp = &sc->fp[i];
16138
16139 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
16140 M_NOWAIT, &fp->tx_mtx);
16141 if (fp->tx_br == NULL)
16142 return (-1);
16143 }
16144
16145 return (0);
16146 }
16147
16148 static void
bxe_free_buf_rings(struct bxe_softc * sc)16149 bxe_free_buf_rings(struct bxe_softc *sc)
16150 {
16151 int i;
16152 struct bxe_fastpath *fp;
16153
16154 for (i = 0; i < sc->num_queues; i++) {
16155
16156 fp = &sc->fp[i];
16157
16158 if (fp->tx_br) {
16159 buf_ring_free(fp->tx_br, M_DEVBUF);
16160 fp->tx_br = NULL;
16161 }
16162 }
16163 }
16164
16165 static void
bxe_init_fp_mutexs(struct bxe_softc * sc)16166 bxe_init_fp_mutexs(struct bxe_softc *sc)
16167 {
16168 int i;
16169 struct bxe_fastpath *fp;
16170
16171 for (i = 0; i < sc->num_queues; i++) {
16172
16173 fp = &sc->fp[i];
16174
16175 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
16176 "bxe%d_fp%d_tx_lock", sc->unit, i);
16177 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
16178
16179 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
16180 "bxe%d_fp%d_rx_lock", sc->unit, i);
16181 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
16182 }
16183 }
16184
16185 static void
bxe_destroy_fp_mutexs(struct bxe_softc * sc)16186 bxe_destroy_fp_mutexs(struct bxe_softc *sc)
16187 {
16188 int i;
16189 struct bxe_fastpath *fp;
16190
16191 for (i = 0; i < sc->num_queues; i++) {
16192
16193 fp = &sc->fp[i];
16194
16195 if (mtx_initialized(&fp->tx_mtx)) {
16196 mtx_destroy(&fp->tx_mtx);
16197 }
16198
16199 if (mtx_initialized(&fp->rx_mtx)) {
16200 mtx_destroy(&fp->rx_mtx);
16201 }
16202 }
16203 }
16204
16205
16206 /*
16207 * Device attach function.
16208 *
16209 * Allocates device resources, performs secondary chip identification, and
16210 * initializes driver instance variables. This function is called from driver
16211 * load after a successful probe.
16212 *
16213 * Returns:
16214 * 0 = Success, >0 = Failure
16215 */
16216 static int
bxe_attach(device_t dev)16217 bxe_attach(device_t dev)
16218 {
16219 struct bxe_softc *sc;
16220
16221 sc = device_get_softc(dev);
16222
16223 BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16224
16225 sc->state = BXE_STATE_CLOSED;
16226
16227 sc->dev = dev;
16228 sc->unit = device_get_unit(dev);
16229
16230 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16231
16232 sc->pcie_bus = pci_get_bus(dev);
16233 sc->pcie_device = pci_get_slot(dev);
16234 sc->pcie_func = pci_get_function(dev);
16235
16236 /* enable bus master capability */
16237 pci_enable_busmaster(dev);
16238
16239 /* get the BARs */
16240 if (bxe_allocate_bars(sc) != 0) {
16241 return (ENXIO);
16242 }
16243
16244 /* initialize the mutexes */
16245 bxe_init_mutexes(sc);
16246
16247 /* prepare the periodic callout */
16248 callout_init(&sc->periodic_callout, 1);
16249
16250 /* prepare the chip taskqueue */
16251 sc->chip_tq_flags = CHIP_TQ_NONE;
16252 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16253 "bxe%d_chip_tq", sc->unit);
16254 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16255 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16256 taskqueue_thread_enqueue,
16257 &sc->chip_tq);
16258 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16259 "%s", sc->chip_tq_name);
16260
16261 TIMEOUT_TASK_INIT(taskqueue_thread,
16262 &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc);
16263
16264
16265 /* get device info and set params */
16266 if (bxe_get_device_info(sc) != 0) {
16267 BLOGE(sc, "getting device info\n");
16268 bxe_deallocate_bars(sc);
16269 pci_disable_busmaster(dev);
16270 return (ENXIO);
16271 }
16272
16273 /* get final misc params */
16274 bxe_get_params(sc);
16275
16276 /* set the default MTU (changed via ifconfig) */
16277 sc->mtu = ETHERMTU;
16278
16279 bxe_set_modes_bitmap(sc);
16280
16281 /* XXX
16282 * If in AFEX mode and the function is configured for FCoE
16283 * then bail... no L2 allowed.
16284 */
16285
16286 /* get phy settings from shmem and 'and' against admin settings */
16287 bxe_get_phy_info(sc);
16288
16289 /* initialize the FreeBSD ifnet interface */
16290 if (bxe_init_ifnet(sc) != 0) {
16291 bxe_release_mutexes(sc);
16292 bxe_deallocate_bars(sc);
16293 pci_disable_busmaster(dev);
16294 return (ENXIO);
16295 }
16296
16297 if (bxe_add_cdev(sc) != 0) {
16298 if (sc->ifp != NULL) {
16299 ether_ifdetach(sc->ifp);
16300 }
16301 ifmedia_removeall(&sc->ifmedia);
16302 bxe_release_mutexes(sc);
16303 bxe_deallocate_bars(sc);
16304 pci_disable_busmaster(dev);
16305 return (ENXIO);
16306 }
16307
16308 /* allocate device interrupts */
16309 if (bxe_interrupt_alloc(sc) != 0) {
16310 bxe_del_cdev(sc);
16311 if (sc->ifp != NULL) {
16312 ether_ifdetach(sc->ifp);
16313 }
16314 ifmedia_removeall(&sc->ifmedia);
16315 bxe_release_mutexes(sc);
16316 bxe_deallocate_bars(sc);
16317 pci_disable_busmaster(dev);
16318 return (ENXIO);
16319 }
16320
16321 bxe_init_fp_mutexs(sc);
16322
16323 if (bxe_alloc_buf_rings(sc) != 0) {
16324 bxe_free_buf_rings(sc);
16325 bxe_interrupt_free(sc);
16326 bxe_del_cdev(sc);
16327 if (sc->ifp != NULL) {
16328 ether_ifdetach(sc->ifp);
16329 }
16330 ifmedia_removeall(&sc->ifmedia);
16331 bxe_release_mutexes(sc);
16332 bxe_deallocate_bars(sc);
16333 pci_disable_busmaster(dev);
16334 return (ENXIO);
16335 }
16336
16337 /* allocate ilt */
16338 if (bxe_alloc_ilt_mem(sc) != 0) {
16339 bxe_free_buf_rings(sc);
16340 bxe_interrupt_free(sc);
16341 bxe_del_cdev(sc);
16342 if (sc->ifp != NULL) {
16343 ether_ifdetach(sc->ifp);
16344 }
16345 ifmedia_removeall(&sc->ifmedia);
16346 bxe_release_mutexes(sc);
16347 bxe_deallocate_bars(sc);
16348 pci_disable_busmaster(dev);
16349 return (ENXIO);
16350 }
16351
16352 /* allocate the host hardware/software hsi structures */
16353 if (bxe_alloc_hsi_mem(sc) != 0) {
16354 bxe_free_ilt_mem(sc);
16355 bxe_free_buf_rings(sc);
16356 bxe_interrupt_free(sc);
16357 bxe_del_cdev(sc);
16358 if (sc->ifp != NULL) {
16359 ether_ifdetach(sc->ifp);
16360 }
16361 ifmedia_removeall(&sc->ifmedia);
16362 bxe_release_mutexes(sc);
16363 bxe_deallocate_bars(sc);
16364 pci_disable_busmaster(dev);
16365 return (ENXIO);
16366 }
16367
16368 /* need to reset chip if UNDI was active */
16369 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16370 /* init fw_seq */
16371 sc->fw_seq =
16372 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16373 DRV_MSG_SEQ_NUMBER_MASK);
16374 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16375 bxe_prev_unload(sc);
16376 }
16377
16378 #if 1
16379 /* XXX */
16380 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16381 #else
16382 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16383 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16384 SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16385 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16386 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16387 bxe_dcbx_init_params(sc);
16388 } else {
16389 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16390 }
16391 #endif
16392
16393 /* calculate qm_cid_count */
16394 sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16395 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16396
16397 sc->max_cos = 1;
16398 bxe_init_multi_cos(sc);
16399
16400 bxe_add_sysctls(sc);
16401
16402 return (0);
16403 }
16404
16405 /*
16406 * Device detach function.
16407 *
16408 * Stops the controller, resets the controller, and releases resources.
16409 *
16410 * Returns:
16411 * 0 = Success, >0 = Failure
16412 */
16413 static int
bxe_detach(device_t dev)16414 bxe_detach(device_t dev)
16415 {
16416 struct bxe_softc *sc;
16417 if_t ifp;
16418
16419 sc = device_get_softc(dev);
16420
16421 BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16422
16423 ifp = sc->ifp;
16424 if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16425 BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16426 return(EBUSY);
16427 }
16428
16429 bxe_del_cdev(sc);
16430
16431 /* stop the periodic callout */
16432 bxe_periodic_stop(sc);
16433
16434 /* stop the chip taskqueue */
16435 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16436 if (sc->chip_tq) {
16437 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16438 taskqueue_free(sc->chip_tq);
16439 sc->chip_tq = NULL;
16440 taskqueue_drain_timeout(taskqueue_thread,
16441 &sc->sp_err_timeout_task);
16442 }
16443
16444 /* stop and reset the controller if it was open */
16445 if (sc->state != BXE_STATE_CLOSED) {
16446 BXE_CORE_LOCK(sc);
16447 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16448 sc->state = BXE_STATE_DISABLED;
16449 BXE_CORE_UNLOCK(sc);
16450 }
16451
16452 /* release the network interface */
16453 if (ifp != NULL) {
16454 ether_ifdetach(ifp);
16455 }
16456 ifmedia_removeall(&sc->ifmedia);
16457
16458 /* XXX do the following based on driver state... */
16459
16460 /* free the host hardware/software hsi structures */
16461 bxe_free_hsi_mem(sc);
16462
16463 /* free ilt */
16464 bxe_free_ilt_mem(sc);
16465
16466 bxe_free_buf_rings(sc);
16467
16468 /* release the interrupts */
16469 bxe_interrupt_free(sc);
16470
16471 /* Release the mutexes*/
16472 bxe_destroy_fp_mutexs(sc);
16473 bxe_release_mutexes(sc);
16474
16475
16476 /* Release the PCIe BAR mapped memory */
16477 bxe_deallocate_bars(sc);
16478
16479 /* Release the FreeBSD interface. */
16480 if (sc->ifp != NULL) {
16481 if_free(sc->ifp);
16482 }
16483
16484 pci_disable_busmaster(dev);
16485
16486 return (0);
16487 }
16488
16489 /*
16490 * Device shutdown function.
16491 *
16492 * Stops and resets the controller.
16493 *
16494 * Returns:
16495 * Nothing
16496 */
16497 static int
bxe_shutdown(device_t dev)16498 bxe_shutdown(device_t dev)
16499 {
16500 struct bxe_softc *sc;
16501
16502 sc = device_get_softc(dev);
16503
16504 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16505
16506 /* stop the periodic callout */
16507 bxe_periodic_stop(sc);
16508
16509 if (sc->state != BXE_STATE_CLOSED) {
16510 BXE_CORE_LOCK(sc);
16511 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16512 BXE_CORE_UNLOCK(sc);
16513 }
16514
16515 return (0);
16516 }
16517
16518 void
bxe_igu_ack_sb(struct bxe_softc * sc,uint8_t igu_sb_id,uint8_t segment,uint16_t index,uint8_t op,uint8_t update)16519 bxe_igu_ack_sb(struct bxe_softc *sc,
16520 uint8_t igu_sb_id,
16521 uint8_t segment,
16522 uint16_t index,
16523 uint8_t op,
16524 uint8_t update)
16525 {
16526 uint32_t igu_addr = sc->igu_base_addr;
16527 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16528 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16529 }
16530
16531 static void
bxe_igu_clear_sb_gen(struct bxe_softc * sc,uint8_t func,uint8_t idu_sb_id,uint8_t is_pf)16532 bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16533 uint8_t func,
16534 uint8_t idu_sb_id,
16535 uint8_t is_pf)
16536 {
16537 uint32_t data, ctl, cnt = 100;
16538 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16539 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16540 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16541 uint32_t sb_bit = 1 << (idu_sb_id%32);
16542 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16543 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16544
16545 /* Not supported in BC mode */
16546 if (CHIP_INT_MODE_IS_BC(sc)) {
16547 return;
16548 }
16549
16550 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16551 IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16552 IGU_REGULAR_CLEANUP_SET |
16553 IGU_REGULAR_BCLEANUP);
16554
16555 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16556 (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16557 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16558
16559 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16560 data, igu_addr_data);
16561 REG_WR(sc, igu_addr_data, data);
16562
16563 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16564 BUS_SPACE_BARRIER_WRITE);
16565 mb();
16566
16567 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16568 ctl, igu_addr_ctl);
16569 REG_WR(sc, igu_addr_ctl, ctl);
16570
16571 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16572 BUS_SPACE_BARRIER_WRITE);
16573 mb();
16574
16575 /* wait for clean up to finish */
16576 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16577 DELAY(20000);
16578 }
16579
16580 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16581 BLOGD(sc, DBG_LOAD,
16582 "Unable to finish IGU cleanup: "
16583 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16584 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16585 }
16586 }
16587
16588 static void
bxe_igu_clear_sb(struct bxe_softc * sc,uint8_t idu_sb_id)16589 bxe_igu_clear_sb(struct bxe_softc *sc,
16590 uint8_t idu_sb_id)
16591 {
16592 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16593 }
16594
16595
16596
16597
16598
16599
16600
16601 /*******************/
16602 /* ECORE CALLBACKS */
16603 /*******************/
16604
16605 static void
bxe_reset_common(struct bxe_softc * sc)16606 bxe_reset_common(struct bxe_softc *sc)
16607 {
16608 uint32_t val = 0x1400;
16609
16610 /* reset_common */
16611 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16612
16613 if (CHIP_IS_E3(sc)) {
16614 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16615 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16616 }
16617
16618 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16619 }
16620
16621 static void
bxe_common_init_phy(struct bxe_softc * sc)16622 bxe_common_init_phy(struct bxe_softc *sc)
16623 {
16624 uint32_t shmem_base[2];
16625 uint32_t shmem2_base[2];
16626
16627 /* Avoid common init in case MFW supports LFA */
16628 if (SHMEM2_RD(sc, size) >
16629 (uint32_t)offsetof(struct shmem2_region,
16630 lfa_host_addr[SC_PORT(sc)])) {
16631 return;
16632 }
16633
16634 shmem_base[0] = sc->devinfo.shmem_base;
16635 shmem2_base[0] = sc->devinfo.shmem2_base;
16636
16637 if (!CHIP_IS_E1x(sc)) {
16638 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr);
16639 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16640 }
16641
16642 bxe_acquire_phy_lock(sc);
16643 elink_common_init_phy(sc, shmem_base, shmem2_base,
16644 sc->devinfo.chip_id, 0);
16645 bxe_release_phy_lock(sc);
16646 }
16647
16648 static void
bxe_pf_disable(struct bxe_softc * sc)16649 bxe_pf_disable(struct bxe_softc *sc)
16650 {
16651 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16652
16653 val &= ~IGU_PF_CONF_FUNC_EN;
16654
16655 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16656 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16657 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16658 }
16659
16660 static void
bxe_init_pxp(struct bxe_softc * sc)16661 bxe_init_pxp(struct bxe_softc *sc)
16662 {
16663 uint16_t devctl;
16664 int r_order, w_order;
16665
16666 devctl = bxe_pcie_capability_read(sc, PCIER_DEVICE_CTL, 2);
16667
16668 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16669
16670 w_order = ((devctl & PCIEM_CTL_MAX_PAYLOAD) >> 5);
16671
16672 if (sc->mrrs == -1) {
16673 r_order = ((devctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12);
16674 } else {
16675 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16676 r_order = sc->mrrs;
16677 }
16678
16679 ecore_init_pxp_arb(sc, r_order, w_order);
16680 }
16681
16682 static uint32_t
bxe_get_pretend_reg(struct bxe_softc * sc)16683 bxe_get_pretend_reg(struct bxe_softc *sc)
16684 {
16685 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16686 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16687 return (base + (SC_ABS_FUNC(sc)) * stride);
16688 }
16689
16690 /*
16691 * Called only on E1H or E2.
16692 * When pretending to be PF, the pretend value is the function number 0..7.
16693 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16694 * combination.
16695 */
16696 static int
bxe_pretend_func(struct bxe_softc * sc,uint16_t pretend_func_val)16697 bxe_pretend_func(struct bxe_softc *sc,
16698 uint16_t pretend_func_val)
16699 {
16700 uint32_t pretend_reg;
16701
16702 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16703 return (-1);
16704 }
16705
16706 /* get my own pretend register */
16707 pretend_reg = bxe_get_pretend_reg(sc);
16708 REG_WR(sc, pretend_reg, pretend_func_val);
16709 REG_RD(sc, pretend_reg);
16710 return (0);
16711 }
16712
16713 static void
bxe_iov_init_dmae(struct bxe_softc * sc)16714 bxe_iov_init_dmae(struct bxe_softc *sc)
16715 {
16716 return;
16717 }
16718
16719 static void
bxe_iov_init_dq(struct bxe_softc * sc)16720 bxe_iov_init_dq(struct bxe_softc *sc)
16721 {
16722 return;
16723 }
16724
16725 /* send a NIG loopback debug packet */
16726 static void
bxe_lb_pckt(struct bxe_softc * sc)16727 bxe_lb_pckt(struct bxe_softc *sc)
16728 {
16729 uint32_t wb_write[3];
16730
16731 /* Ethernet source and destination addresses */
16732 wb_write[0] = 0x55555555;
16733 wb_write[1] = 0x55555555;
16734 wb_write[2] = 0x20; /* SOP */
16735 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16736
16737 /* NON-IP protocol */
16738 wb_write[0] = 0x09000000;
16739 wb_write[1] = 0x55555555;
16740 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
16741 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16742 }
16743
16744 /*
16745 * Some of the internal memories are not directly readable from the driver.
16746 * To test them we send debug packets.
16747 */
16748 static int
bxe_int_mem_test(struct bxe_softc * sc)16749 bxe_int_mem_test(struct bxe_softc *sc)
16750 {
16751 int factor;
16752 int count, i;
16753 uint32_t val = 0;
16754
16755 if (CHIP_REV_IS_FPGA(sc)) {
16756 factor = 120;
16757 } else if (CHIP_REV_IS_EMUL(sc)) {
16758 factor = 200;
16759 } else {
16760 factor = 1;
16761 }
16762
16763 /* disable inputs of parser neighbor blocks */
16764 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16765 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16766 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16767 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16768
16769 /* write 0 to parser credits for CFC search request */
16770 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16771
16772 /* send Ethernet packet */
16773 bxe_lb_pckt(sc);
16774
16775 /* TODO do i reset NIG statistic? */
16776 /* Wait until NIG register shows 1 packet of size 0x10 */
16777 count = 1000 * factor;
16778 while (count) {
16779 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16780 val = *BXE_SP(sc, wb_data[0]);
16781 if (val == 0x10) {
16782 break;
16783 }
16784
16785 DELAY(10000);
16786 count--;
16787 }
16788
16789 if (val != 0x10) {
16790 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16791 return (-1);
16792 }
16793
16794 /* wait until PRS register shows 1 packet */
16795 count = (1000 * factor);
16796 while (count) {
16797 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16798 if (val == 1) {
16799 break;
16800 }
16801
16802 DELAY(10000);
16803 count--;
16804 }
16805
16806 if (val != 0x1) {
16807 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16808 return (-2);
16809 }
16810
16811 /* Reset and init BRB, PRS */
16812 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16813 DELAY(50000);
16814 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16815 DELAY(50000);
16816 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16817 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16818
16819 /* Disable inputs of parser neighbor blocks */
16820 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16821 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16822 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16823 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16824
16825 /* Write 0 to parser credits for CFC search request */
16826 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16827
16828 /* send 10 Ethernet packets */
16829 for (i = 0; i < 10; i++) {
16830 bxe_lb_pckt(sc);
16831 }
16832
16833 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16834 count = (1000 * factor);
16835 while (count) {
16836 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16837 val = *BXE_SP(sc, wb_data[0]);
16838 if (val == 0xb0) {
16839 break;
16840 }
16841
16842 DELAY(10000);
16843 count--;
16844 }
16845
16846 if (val != 0xb0) {
16847 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16848 return (-3);
16849 }
16850
16851 /* Wait until PRS register shows 2 packets */
16852 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16853 if (val != 2) {
16854 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16855 }
16856
16857 /* Write 1 to parser credits for CFC search request */
16858 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16859
16860 /* Wait until PRS register shows 3 packets */
16861 DELAY(10000 * factor);
16862
16863 /* Wait until NIG register shows 1 packet of size 0x10 */
16864 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16865 if (val != 3) {
16866 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16867 }
16868
16869 /* clear NIG EOP FIFO */
16870 for (i = 0; i < 11; i++) {
16871 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16872 }
16873
16874 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16875 if (val != 1) {
16876 BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16877 return (-4);
16878 }
16879
16880 /* Reset and init BRB, PRS, NIG */
16881 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16882 DELAY(50000);
16883 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16884 DELAY(50000);
16885 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16886 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16887 if (!CNIC_SUPPORT(sc)) {
16888 /* set NIC mode */
16889 REG_WR(sc, PRS_REG_NIC_MODE, 1);
16890 }
16891
16892 /* Enable inputs of parser neighbor blocks */
16893 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16894 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16895 REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16896 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16897
16898 return (0);
16899 }
16900
16901 static void
bxe_setup_fan_failure_detection(struct bxe_softc * sc)16902 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16903 {
16904 int is_required;
16905 uint32_t val;
16906 int port;
16907
16908 is_required = 0;
16909 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16910 SHARED_HW_CFG_FAN_FAILURE_MASK);
16911
16912 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16913 is_required = 1;
16914 }
16915 /*
16916 * The fan failure mechanism is usually related to the PHY type since
16917 * the power consumption of the board is affected by the PHY. Currently,
16918 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16919 */
16920 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16921 for (port = PORT_0; port < PORT_MAX; port++) {
16922 is_required |= elink_fan_failure_det_req(sc,
16923 sc->devinfo.shmem_base,
16924 sc->devinfo.shmem2_base,
16925 port);
16926 }
16927 }
16928
16929 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16930
16931 if (is_required == 0) {
16932 return;
16933 }
16934
16935 /* Fan failure is indicated by SPIO 5 */
16936 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16937
16938 /* set to active low mode */
16939 val = REG_RD(sc, MISC_REG_SPIO_INT);
16940 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16941 REG_WR(sc, MISC_REG_SPIO_INT, val);
16942
16943 /* enable interrupt to signal the IGU */
16944 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16945 val |= MISC_SPIO_SPIO5;
16946 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16947 }
16948
16949 static void
bxe_enable_blocks_attention(struct bxe_softc * sc)16950 bxe_enable_blocks_attention(struct bxe_softc *sc)
16951 {
16952 uint32_t val;
16953
16954 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16955 if (!CHIP_IS_E1x(sc)) {
16956 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16957 } else {
16958 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16959 }
16960 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16961 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16962 /*
16963 * mask read length error interrupts in brb for parser
16964 * (parsing unit and 'checksum and crc' unit)
16965 * these errors are legal (PU reads fixed length and CAC can cause
16966 * read length error on truncated packets)
16967 */
16968 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16969 REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16970 REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16971 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16972 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16973 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16974 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16975 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16976 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16977 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16978 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16979 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16980 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16981 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16982 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16983 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16984 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16985 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16986 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16987
16988 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16989 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16990 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16991 if (!CHIP_IS_E1x(sc)) {
16992 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16993 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16994 }
16995 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16996
16997 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16998 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16999 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
17000 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
17001
17002 if (!CHIP_IS_E1x(sc)) {
17003 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
17004 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
17005 }
17006
17007 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
17008 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
17009 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
17010 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
17011 }
17012
17013 /**
17014 * bxe_init_hw_common - initialize the HW at the COMMON phase.
17015 *
17016 * @sc: driver handle
17017 */
17018 static int
bxe_init_hw_common(struct bxe_softc * sc)17019 bxe_init_hw_common(struct bxe_softc *sc)
17020 {
17021 uint8_t abs_func_id;
17022 uint32_t val;
17023
17024 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
17025 SC_ABS_FUNC(sc));
17026
17027 /*
17028 * take the RESET lock to protect undi_unload flow from accessing
17029 * registers while we are resetting the chip
17030 */
17031 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17032
17033 bxe_reset_common(sc);
17034
17035 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17036
17037 val = 0xfffc;
17038 if (CHIP_IS_E3(sc)) {
17039 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17040 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17041 }
17042
17043 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17044
17045 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17046
17047 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17048 BLOGD(sc, DBG_LOAD, "after misc block init\n");
17049
17050 if (!CHIP_IS_E1x(sc)) {
17051 /*
17052 * 4-port mode or 2-port mode we need to turn off master-enable for
17053 * everyone. After that we turn it back on for self. So, we disregard
17054 * multi-function, and always disable all functions on the given path,
17055 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17056 */
17057 for (abs_func_id = SC_PATH(sc);
17058 abs_func_id < (E2_FUNC_MAX * 2);
17059 abs_func_id += 2) {
17060 if (abs_func_id == SC_ABS_FUNC(sc)) {
17061 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17062 continue;
17063 }
17064
17065 bxe_pretend_func(sc, abs_func_id);
17066
17067 /* clear pf enable */
17068 bxe_pf_disable(sc);
17069
17070 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17071 }
17072 }
17073
17074 BLOGD(sc, DBG_LOAD, "after pf disable\n");
17075
17076 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17077
17078 if (CHIP_IS_E1(sc)) {
17079 /*
17080 * enable HW interrupt from PXP on USDM overflow
17081 * bit 16 on INT_MASK_0
17082 */
17083 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17084 }
17085
17086 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17087 bxe_init_pxp(sc);
17088
17089 #ifdef __BIG_ENDIAN
17090 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17091 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17092 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17093 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17094 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17095 /* make sure this value is 0 */
17096 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17097
17098 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17099 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17100 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17101 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17102 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17103 #endif
17104
17105 ecore_ilt_init_page_size(sc, INITOP_SET);
17106
17107 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17108 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17109 }
17110
17111 /* let the HW do it's magic... */
17112 DELAY(100000);
17113
17114 /* finish PXP init */
17115 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17116 if (val != 1) {
17117 BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
17118 val);
17119 return (-1);
17120 }
17121 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17122 if (val != 1) {
17123 BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
17124 return (-1);
17125 }
17126
17127 BLOGD(sc, DBG_LOAD, "after pxp init\n");
17128
17129 /*
17130 * Timer bug workaround for E2 only. We need to set the entire ILT to have
17131 * entries with value "0" and valid bit on. This needs to be done by the
17132 * first PF that is loaded in a path (i.e. common phase)
17133 */
17134 if (!CHIP_IS_E1x(sc)) {
17135 /*
17136 * In E2 there is a bug in the timers block that can cause function 6 / 7
17137 * (i.e. vnic3) to start even if it is marked as "scan-off".
17138 * This occurs when a different function (func2,3) is being marked
17139 * as "scan-off". Real-life scenario for example: if a driver is being
17140 * load-unloaded while func6,7 are down. This will cause the timer to access
17141 * the ilt, translate to a logical address and send a request to read/write.
17142 * Since the ilt for the function that is down is not valid, this will cause
17143 * a translation error which is unrecoverable.
17144 * The Workaround is intended to make sure that when this happens nothing
17145 * fatal will occur. The workaround:
17146 * 1. First PF driver which loads on a path will:
17147 * a. After taking the chip out of reset, by using pretend,
17148 * it will write "0" to the following registers of
17149 * the other vnics.
17150 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17151 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17152 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17153 * And for itself it will write '1' to
17154 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17155 * dmae-operations (writing to pram for example.)
17156 * note: can be done for only function 6,7 but cleaner this
17157 * way.
17158 * b. Write zero+valid to the entire ILT.
17159 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
17160 * VNIC3 (of that port). The range allocated will be the
17161 * entire ILT. This is needed to prevent ILT range error.
17162 * 2. Any PF driver load flow:
17163 * a. ILT update with the physical addresses of the allocated
17164 * logical pages.
17165 * b. Wait 20msec. - note that this timeout is needed to make
17166 * sure there are no requests in one of the PXP internal
17167 * queues with "old" ILT addresses.
17168 * c. PF enable in the PGLC.
17169 * d. Clear the was_error of the PF in the PGLC. (could have
17170 * occurred while driver was down)
17171 * e. PF enable in the CFC (WEAK + STRONG)
17172 * f. Timers scan enable
17173 * 3. PF driver unload flow:
17174 * a. Clear the Timers scan_en.
17175 * b. Polling for scan_on=0 for that PF.
17176 * c. Clear the PF enable bit in the PXP.
17177 * d. Clear the PF enable in the CFC (WEAK + STRONG)
17178 * e. Write zero+valid to all ILT entries (The valid bit must
17179 * stay set)
17180 * f. If this is VNIC 3 of a port then also init
17181 * first_timers_ilt_entry to zero and last_timers_ilt_entry
17182 * to the last entry in the ILT.
17183 *
17184 * Notes:
17185 * Currently the PF error in the PGLC is non recoverable.
17186 * In the future the there will be a recovery routine for this error.
17187 * Currently attention is masked.
17188 * Having an MCP lock on the load/unload process does not guarantee that
17189 * there is no Timer disable during Func6/7 enable. This is because the
17190 * Timers scan is currently being cleared by the MCP on FLR.
17191 * Step 2.d can be done only for PF6/7 and the driver can also check if
17192 * there is error before clearing it. But the flow above is simpler and
17193 * more general.
17194 * All ILT entries are written by zero+valid and not just PF6/7
17195 * ILT entries since in the future the ILT entries allocation for
17196 * PF-s might be dynamic.
17197 */
17198 struct ilt_client_info ilt_cli;
17199 struct ecore_ilt ilt;
17200
17201 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17202 memset(&ilt, 0, sizeof(struct ecore_ilt));
17203
17204 /* initialize dummy TM client */
17205 ilt_cli.start = 0;
17206 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
17207 ilt_cli.client_num = ILT_CLIENT_TM;
17208
17209 /*
17210 * Step 1: set zeroes to all ilt page entries with valid bit on
17211 * Step 2: set the timers first/last ilt entry to point
17212 * to the entire range to prevent ILT range error for 3rd/4th
17213 * vnic (this code assumes existence of the vnic)
17214 *
17215 * both steps performed by call to ecore_ilt_client_init_op()
17216 * with dummy TM client
17217 *
17218 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17219 * and his brother are split registers
17220 */
17221
17222 bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17223 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17224 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17225
17226 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17227 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17228 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17229 }
17230
17231 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17232 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17233
17234 if (!CHIP_IS_E1x(sc)) {
17235 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17236 (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17237
17238 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17239 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17240
17241 /* let the HW do it's magic... */
17242 do {
17243 DELAY(200000);
17244 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17245 } while (factor-- && (val != 1));
17246
17247 if (val != 1) {
17248 BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
17249 return (-1);
17250 }
17251 }
17252
17253 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17254
17255 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17256
17257 bxe_iov_init_dmae(sc);
17258
17259 /* clean the DMAE memory */
17260 sc->dmae_ready = 1;
17261 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17262
17263 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17264
17265 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17266
17267 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17268
17269 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17270
17271 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17272 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17273 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17274 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17275
17276 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17277
17278 /* QM queues pointers table */
17279 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17280
17281 /* soft reset pulse */
17282 REG_WR(sc, QM_REG_SOFT_RESET, 1);
17283 REG_WR(sc, QM_REG_SOFT_RESET, 0);
17284
17285 if (CNIC_SUPPORT(sc))
17286 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17287
17288 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17289 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17290 if (!CHIP_REV_IS_SLOW(sc)) {
17291 /* enable hw interrupt from doorbell Q */
17292 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17293 }
17294
17295 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17296
17297 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17298 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17299
17300 if (!CHIP_IS_E1(sc)) {
17301 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17302 }
17303
17304 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17305 if (IS_MF_AFEX(sc)) {
17306 /*
17307 * configure that AFEX and VLAN headers must be
17308 * received in AFEX mode
17309 */
17310 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17311 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17312 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17313 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17314 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17315 } else {
17316 /*
17317 * Bit-map indicating which L2 hdrs may appear
17318 * after the basic Ethernet header
17319 */
17320 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17321 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17322 }
17323 }
17324
17325 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17326 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17327 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17328 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17329
17330 if (!CHIP_IS_E1x(sc)) {
17331 /* reset VFC memories */
17332 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17333 VFC_MEMORIES_RST_REG_CAM_RST |
17334 VFC_MEMORIES_RST_REG_RAM_RST);
17335 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17336 VFC_MEMORIES_RST_REG_CAM_RST |
17337 VFC_MEMORIES_RST_REG_RAM_RST);
17338
17339 DELAY(20000);
17340 }
17341
17342 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17343 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17344 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17345 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17346
17347 /* sync semi rtc */
17348 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17349 0x80000000);
17350 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17351 0x80000000);
17352
17353 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17354 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17355 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17356
17357 if (!CHIP_IS_E1x(sc)) {
17358 if (IS_MF_AFEX(sc)) {
17359 /*
17360 * configure that AFEX and VLAN headers must be
17361 * sent in AFEX mode
17362 */
17363 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17364 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17365 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17366 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17367 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17368 } else {
17369 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17370 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17371 }
17372 }
17373
17374 REG_WR(sc, SRC_REG_SOFT_RST, 1);
17375
17376 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17377
17378 if (CNIC_SUPPORT(sc)) {
17379 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17380 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17381 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17382 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17383 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17384 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17385 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17386 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17387 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17388 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17389 }
17390 REG_WR(sc, SRC_REG_SOFT_RST, 0);
17391
17392 if (sizeof(union cdu_context) != 1024) {
17393 /* we currently assume that a context is 1024 bytes */
17394 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17395 (long)sizeof(union cdu_context));
17396 }
17397
17398 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17399 val = (4 << 24) + (0 << 12) + 1024;
17400 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17401
17402 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17403
17404 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17405 /* enable context validation interrupt from CFC */
17406 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17407
17408 /* set the thresholds to prevent CFC/CDU race */
17409 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17410 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17411
17412 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17413 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17414 }
17415
17416 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17417 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17418
17419 /* Reset PCIE errors for debug */
17420 REG_WR(sc, 0x2814, 0xffffffff);
17421 REG_WR(sc, 0x3820, 0xffffffff);
17422
17423 if (!CHIP_IS_E1x(sc)) {
17424 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17425 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17426 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17427 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17428 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17429 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17430 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17431 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17432 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17433 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17434 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17435 }
17436
17437 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17438
17439 if (!CHIP_IS_E1(sc)) {
17440 /* in E3 this done in per-port section */
17441 if (!CHIP_IS_E3(sc))
17442 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17443 }
17444
17445 if (CHIP_IS_E1H(sc)) {
17446 /* not applicable for E2 (and above ...) */
17447 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17448 }
17449
17450 if (CHIP_REV_IS_SLOW(sc)) {
17451 DELAY(200000);
17452 }
17453
17454 /* finish CFC init */
17455 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17456 if (val != 1) {
17457 BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17458 return (-1);
17459 }
17460 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17461 if (val != 1) {
17462 BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17463 return (-1);
17464 }
17465 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17466 if (val != 1) {
17467 BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17468 return (-1);
17469 }
17470 REG_WR(sc, CFC_REG_DEBUG0, 0);
17471
17472 if (CHIP_IS_E1(sc)) {
17473 /* read NIG statistic to see if this is our first up since powerup */
17474 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17475 val = *BXE_SP(sc, wb_data[0]);
17476
17477 /* do internal memory self test */
17478 if ((val == 0) && bxe_int_mem_test(sc)) {
17479 BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17480 return (-1);
17481 }
17482 }
17483
17484 bxe_setup_fan_failure_detection(sc);
17485
17486 /* clear PXP2 attentions */
17487 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17488
17489 bxe_enable_blocks_attention(sc);
17490
17491 if (!CHIP_REV_IS_SLOW(sc)) {
17492 ecore_enable_blocks_parity(sc);
17493 }
17494
17495 if (!BXE_NOMCP(sc)) {
17496 if (CHIP_IS_E1x(sc)) {
17497 bxe_common_init_phy(sc);
17498 }
17499 }
17500
17501 return (0);
17502 }
17503
17504 /**
17505 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17506 *
17507 * @sc: driver handle
17508 */
17509 static int
bxe_init_hw_common_chip(struct bxe_softc * sc)17510 bxe_init_hw_common_chip(struct bxe_softc *sc)
17511 {
17512 int rc = bxe_init_hw_common(sc);
17513
17514 if (rc) {
17515 BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17516 return (rc);
17517 }
17518
17519 /* In E2 2-PORT mode, same ext phy is used for the two paths */
17520 if (!BXE_NOMCP(sc)) {
17521 bxe_common_init_phy(sc);
17522 }
17523
17524 return (0);
17525 }
17526
17527 static int
bxe_init_hw_port(struct bxe_softc * sc)17528 bxe_init_hw_port(struct bxe_softc *sc)
17529 {
17530 int port = SC_PORT(sc);
17531 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17532 uint32_t low, high;
17533 uint32_t val;
17534
17535 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17536
17537 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17538
17539 ecore_init_block(sc, BLOCK_MISC, init_phase);
17540 ecore_init_block(sc, BLOCK_PXP, init_phase);
17541 ecore_init_block(sc, BLOCK_PXP2, init_phase);
17542
17543 /*
17544 * Timers bug workaround: disables the pf_master bit in pglue at
17545 * common phase, we need to enable it here before any dmae access are
17546 * attempted. Therefore we manually added the enable-master to the
17547 * port phase (it also happens in the function phase)
17548 */
17549 if (!CHIP_IS_E1x(sc)) {
17550 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17551 }
17552
17553 ecore_init_block(sc, BLOCK_ATC, init_phase);
17554 ecore_init_block(sc, BLOCK_DMAE, init_phase);
17555 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17556 ecore_init_block(sc, BLOCK_QM, init_phase);
17557
17558 ecore_init_block(sc, BLOCK_TCM, init_phase);
17559 ecore_init_block(sc, BLOCK_UCM, init_phase);
17560 ecore_init_block(sc, BLOCK_CCM, init_phase);
17561 ecore_init_block(sc, BLOCK_XCM, init_phase);
17562
17563 /* QM cid (connection) count */
17564 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17565
17566 if (CNIC_SUPPORT(sc)) {
17567 ecore_init_block(sc, BLOCK_TM, init_phase);
17568 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17569 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17570 }
17571
17572 ecore_init_block(sc, BLOCK_DORQ, init_phase);
17573
17574 ecore_init_block(sc, BLOCK_BRB1, init_phase);
17575
17576 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17577 if (IS_MF(sc)) {
17578 low = (BXE_ONE_PORT(sc) ? 160 : 246);
17579 } else if (sc->mtu > 4096) {
17580 if (BXE_ONE_PORT(sc)) {
17581 low = 160;
17582 } else {
17583 val = sc->mtu;
17584 /* (24*1024 + val*4)/256 */
17585 low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17586 }
17587 } else {
17588 low = (BXE_ONE_PORT(sc) ? 80 : 160);
17589 }
17590 high = (low + 56); /* 14*1024/256 */
17591 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17592 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17593 }
17594
17595 if (CHIP_IS_MODE_4_PORT(sc)) {
17596 REG_WR(sc, SC_PORT(sc) ?
17597 BRB1_REG_MAC_GUARANTIED_1 :
17598 BRB1_REG_MAC_GUARANTIED_0, 40);
17599 }
17600
17601 ecore_init_block(sc, BLOCK_PRS, init_phase);
17602 if (CHIP_IS_E3B0(sc)) {
17603 if (IS_MF_AFEX(sc)) {
17604 /* configure headers for AFEX mode */
17605 REG_WR(sc, SC_PORT(sc) ?
17606 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17607 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17608 REG_WR(sc, SC_PORT(sc) ?
17609 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17610 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17611 REG_WR(sc, SC_PORT(sc) ?
17612 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17613 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17614 } else {
17615 /* Ovlan exists only if we are in multi-function +
17616 * switch-dependent mode, in switch-independent there
17617 * is no ovlan headers
17618 */
17619 REG_WR(sc, SC_PORT(sc) ?
17620 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17621 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17622 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17623 }
17624 }
17625
17626 ecore_init_block(sc, BLOCK_TSDM, init_phase);
17627 ecore_init_block(sc, BLOCK_CSDM, init_phase);
17628 ecore_init_block(sc, BLOCK_USDM, init_phase);
17629 ecore_init_block(sc, BLOCK_XSDM, init_phase);
17630
17631 ecore_init_block(sc, BLOCK_TSEM, init_phase);
17632 ecore_init_block(sc, BLOCK_USEM, init_phase);
17633 ecore_init_block(sc, BLOCK_CSEM, init_phase);
17634 ecore_init_block(sc, BLOCK_XSEM, init_phase);
17635
17636 ecore_init_block(sc, BLOCK_UPB, init_phase);
17637 ecore_init_block(sc, BLOCK_XPB, init_phase);
17638
17639 ecore_init_block(sc, BLOCK_PBF, init_phase);
17640
17641 if (CHIP_IS_E1x(sc)) {
17642 /* configure PBF to work without PAUSE mtu 9000 */
17643 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17644
17645 /* update threshold */
17646 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17647 /* update init credit */
17648 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17649
17650 /* probe changes */
17651 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17652 DELAY(50);
17653 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17654 }
17655
17656 if (CNIC_SUPPORT(sc)) {
17657 ecore_init_block(sc, BLOCK_SRC, init_phase);
17658 }
17659
17660 ecore_init_block(sc, BLOCK_CDU, init_phase);
17661 ecore_init_block(sc, BLOCK_CFC, init_phase);
17662
17663 if (CHIP_IS_E1(sc)) {
17664 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17665 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17666 }
17667 ecore_init_block(sc, BLOCK_HC, init_phase);
17668
17669 ecore_init_block(sc, BLOCK_IGU, init_phase);
17670
17671 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17672 /* init aeu_mask_attn_func_0/1:
17673 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17674 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17675 * bits 4-7 are used for "per vn group attention" */
17676 val = IS_MF(sc) ? 0xF7 : 0x7;
17677 /* Enable DCBX attention for all but E1 */
17678 val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17679 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17680
17681 ecore_init_block(sc, BLOCK_NIG, init_phase);
17682
17683 if (!CHIP_IS_E1x(sc)) {
17684 /* Bit-map indicating which L2 hdrs may appear after the
17685 * basic Ethernet header
17686 */
17687 if (IS_MF_AFEX(sc)) {
17688 REG_WR(sc, SC_PORT(sc) ?
17689 NIG_REG_P1_HDRS_AFTER_BASIC :
17690 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17691 } else {
17692 REG_WR(sc, SC_PORT(sc) ?
17693 NIG_REG_P1_HDRS_AFTER_BASIC :
17694 NIG_REG_P0_HDRS_AFTER_BASIC,
17695 IS_MF_SD(sc) ? 7 : 6);
17696 }
17697
17698 if (CHIP_IS_E3(sc)) {
17699 REG_WR(sc, SC_PORT(sc) ?
17700 NIG_REG_LLH1_MF_MODE :
17701 NIG_REG_LLH_MF_MODE, IS_MF(sc));
17702 }
17703 }
17704 if (!CHIP_IS_E3(sc)) {
17705 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17706 }
17707
17708 if (!CHIP_IS_E1(sc)) {
17709 /* 0x2 disable mf_ov, 0x1 enable */
17710 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17711 (IS_MF_SD(sc) ? 0x1 : 0x2));
17712
17713 if (!CHIP_IS_E1x(sc)) {
17714 val = 0;
17715 switch (sc->devinfo.mf_info.mf_mode) {
17716 case MULTI_FUNCTION_SD:
17717 val = 1;
17718 break;
17719 case MULTI_FUNCTION_SI:
17720 case MULTI_FUNCTION_AFEX:
17721 val = 2;
17722 break;
17723 }
17724
17725 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17726 NIG_REG_LLH0_CLS_TYPE), val);
17727 }
17728 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17729 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17730 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17731 }
17732
17733 /* If SPIO5 is set to generate interrupts, enable it for this port */
17734 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17735 if (val & MISC_SPIO_SPIO5) {
17736 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17737 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17738 val = REG_RD(sc, reg_addr);
17739 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17740 REG_WR(sc, reg_addr, val);
17741 }
17742
17743 return (0);
17744 }
17745
17746 static uint32_t
bxe_flr_clnup_reg_poll(struct bxe_softc * sc,uint32_t reg,uint32_t expected,uint32_t poll_count)17747 bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17748 uint32_t reg,
17749 uint32_t expected,
17750 uint32_t poll_count)
17751 {
17752 uint32_t cur_cnt = poll_count;
17753 uint32_t val;
17754
17755 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17756 DELAY(FLR_WAIT_INTERVAL);
17757 }
17758
17759 return (val);
17760 }
17761
17762 static int
bxe_flr_clnup_poll_hw_counter(struct bxe_softc * sc,uint32_t reg,char * msg,uint32_t poll_cnt)17763 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17764 uint32_t reg,
17765 char *msg,
17766 uint32_t poll_cnt)
17767 {
17768 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17769
17770 if (val != 0) {
17771 BLOGE(sc, "%s usage count=%d\n", msg, val);
17772 return (1);
17773 }
17774
17775 return (0);
17776 }
17777
17778 /* Common routines with VF FLR cleanup */
17779 static uint32_t
bxe_flr_clnup_poll_count(struct bxe_softc * sc)17780 bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17781 {
17782 /* adjust polling timeout */
17783 if (CHIP_REV_IS_EMUL(sc)) {
17784 return (FLR_POLL_CNT * 2000);
17785 }
17786
17787 if (CHIP_REV_IS_FPGA(sc)) {
17788 return (FLR_POLL_CNT * 120);
17789 }
17790
17791 return (FLR_POLL_CNT);
17792 }
17793
17794 static int
bxe_poll_hw_usage_counters(struct bxe_softc * sc,uint32_t poll_cnt)17795 bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17796 uint32_t poll_cnt)
17797 {
17798 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17799 if (bxe_flr_clnup_poll_hw_counter(sc,
17800 CFC_REG_NUM_LCIDS_INSIDE_PF,
17801 "CFC PF usage counter timed out",
17802 poll_cnt)) {
17803 return (1);
17804 }
17805
17806 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17807 if (bxe_flr_clnup_poll_hw_counter(sc,
17808 DORQ_REG_PF_USAGE_CNT,
17809 "DQ PF usage counter timed out",
17810 poll_cnt)) {
17811 return (1);
17812 }
17813
17814 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17815 if (bxe_flr_clnup_poll_hw_counter(sc,
17816 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17817 "QM PF usage counter timed out",
17818 poll_cnt)) {
17819 return (1);
17820 }
17821
17822 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17823 if (bxe_flr_clnup_poll_hw_counter(sc,
17824 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17825 "Timers VNIC usage counter timed out",
17826 poll_cnt)) {
17827 return (1);
17828 }
17829
17830 if (bxe_flr_clnup_poll_hw_counter(sc,
17831 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17832 "Timers NUM_SCANS usage counter timed out",
17833 poll_cnt)) {
17834 return (1);
17835 }
17836
17837 /* Wait DMAE PF usage counter to zero */
17838 if (bxe_flr_clnup_poll_hw_counter(sc,
17839 dmae_reg_go_c[INIT_DMAE_C(sc)],
17840 "DMAE dommand register timed out",
17841 poll_cnt)) {
17842 return (1);
17843 }
17844
17845 return (0);
17846 }
17847
17848 #define OP_GEN_PARAM(param) \
17849 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17850 #define OP_GEN_TYPE(type) \
17851 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17852 #define OP_GEN_AGG_VECT(index) \
17853 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17854
17855 static int
bxe_send_final_clnup(struct bxe_softc * sc,uint8_t clnup_func,uint32_t poll_cnt)17856 bxe_send_final_clnup(struct bxe_softc *sc,
17857 uint8_t clnup_func,
17858 uint32_t poll_cnt)
17859 {
17860 uint32_t op_gen_command = 0;
17861 uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17862 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17863 int ret = 0;
17864
17865 if (REG_RD(sc, comp_addr)) {
17866 BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17867 return (1);
17868 }
17869
17870 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17871 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17872 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17873 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17874
17875 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17876 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17877
17878 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17879 BLOGE(sc, "FW final cleanup did not succeed\n");
17880 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17881 (REG_RD(sc, comp_addr)));
17882 bxe_panic(sc, ("FLR cleanup failed\n"));
17883 return (1);
17884 }
17885
17886 /* Zero completion for nxt FLR */
17887 REG_WR(sc, comp_addr, 0);
17888
17889 return (ret);
17890 }
17891
17892 static void
bxe_pbf_pN_buf_flushed(struct bxe_softc * sc,struct pbf_pN_buf_regs * regs,uint32_t poll_count)17893 bxe_pbf_pN_buf_flushed(struct bxe_softc *sc,
17894 struct pbf_pN_buf_regs *regs,
17895 uint32_t poll_count)
17896 {
17897 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17898 uint32_t cur_cnt = poll_count;
17899
17900 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17901 crd = crd_start = REG_RD(sc, regs->crd);
17902 init_crd = REG_RD(sc, regs->init_crd);
17903
17904 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17905 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd);
17906 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17907
17908 while ((crd != init_crd) &&
17909 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17910 (init_crd - crd_start))) {
17911 if (cur_cnt--) {
17912 DELAY(FLR_WAIT_INTERVAL);
17913 crd = REG_RD(sc, regs->crd);
17914 crd_freed = REG_RD(sc, regs->crd_freed);
17915 } else {
17916 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17917 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd);
17918 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17919 break;
17920 }
17921 }
17922
17923 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17924 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17925 }
17926
17927 static void
bxe_pbf_pN_cmd_flushed(struct bxe_softc * sc,struct pbf_pN_cmd_regs * regs,uint32_t poll_count)17928 bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc,
17929 struct pbf_pN_cmd_regs *regs,
17930 uint32_t poll_count)
17931 {
17932 uint32_t occup, to_free, freed, freed_start;
17933 uint32_t cur_cnt = poll_count;
17934
17935 occup = to_free = REG_RD(sc, regs->lines_occup);
17936 freed = freed_start = REG_RD(sc, regs->lines_freed);
17937
17938 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
17939 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17940
17941 while (occup &&
17942 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17943 if (cur_cnt--) {
17944 DELAY(FLR_WAIT_INTERVAL);
17945 occup = REG_RD(sc, regs->lines_occup);
17946 freed = REG_RD(sc, regs->lines_freed);
17947 } else {
17948 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17949 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
17950 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17951 break;
17952 }
17953 }
17954
17955 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17956 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17957 }
17958
17959 static void
bxe_tx_hw_flushed(struct bxe_softc * sc,uint32_t poll_count)17960 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17961 {
17962 struct pbf_pN_cmd_regs cmd_regs[] = {
17963 {0, (CHIP_IS_E3B0(sc)) ?
17964 PBF_REG_TQ_OCCUPANCY_Q0 :
17965 PBF_REG_P0_TQ_OCCUPANCY,
17966 (CHIP_IS_E3B0(sc)) ?
17967 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17968 PBF_REG_P0_TQ_LINES_FREED_CNT},
17969 {1, (CHIP_IS_E3B0(sc)) ?
17970 PBF_REG_TQ_OCCUPANCY_Q1 :
17971 PBF_REG_P1_TQ_OCCUPANCY,
17972 (CHIP_IS_E3B0(sc)) ?
17973 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17974 PBF_REG_P1_TQ_LINES_FREED_CNT},
17975 {4, (CHIP_IS_E3B0(sc)) ?
17976 PBF_REG_TQ_OCCUPANCY_LB_Q :
17977 PBF_REG_P4_TQ_OCCUPANCY,
17978 (CHIP_IS_E3B0(sc)) ?
17979 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17980 PBF_REG_P4_TQ_LINES_FREED_CNT}
17981 };
17982
17983 struct pbf_pN_buf_regs buf_regs[] = {
17984 {0, (CHIP_IS_E3B0(sc)) ?
17985 PBF_REG_INIT_CRD_Q0 :
17986 PBF_REG_P0_INIT_CRD ,
17987 (CHIP_IS_E3B0(sc)) ?
17988 PBF_REG_CREDIT_Q0 :
17989 PBF_REG_P0_CREDIT,
17990 (CHIP_IS_E3B0(sc)) ?
17991 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17992 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17993 {1, (CHIP_IS_E3B0(sc)) ?
17994 PBF_REG_INIT_CRD_Q1 :
17995 PBF_REG_P1_INIT_CRD,
17996 (CHIP_IS_E3B0(sc)) ?
17997 PBF_REG_CREDIT_Q1 :
17998 PBF_REG_P1_CREDIT,
17999 (CHIP_IS_E3B0(sc)) ?
18000 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
18001 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
18002 {4, (CHIP_IS_E3B0(sc)) ?
18003 PBF_REG_INIT_CRD_LB_Q :
18004 PBF_REG_P4_INIT_CRD,
18005 (CHIP_IS_E3B0(sc)) ?
18006 PBF_REG_CREDIT_LB_Q :
18007 PBF_REG_P4_CREDIT,
18008 (CHIP_IS_E3B0(sc)) ?
18009 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
18010 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
18011 };
18012
18013 int i;
18014
18015 /* Verify the command queues are flushed P0, P1, P4 */
18016 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
18017 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
18018 }
18019
18020 /* Verify the transmission buffers are flushed P0, P1, P4 */
18021 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
18022 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
18023 }
18024 }
18025
18026 static void
bxe_hw_enable_status(struct bxe_softc * sc)18027 bxe_hw_enable_status(struct bxe_softc *sc)
18028 {
18029 uint32_t val;
18030
18031 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
18032 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
18033
18034 val = REG_RD(sc, PBF_REG_DISABLE_PF);
18035 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18036
18037 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18038 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18039
18040 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18041 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18042
18043 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18044 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18045
18046 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18047 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18048
18049 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18050 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18051
18052 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18053 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18054 }
18055
18056 static int
bxe_pf_flr_clnup(struct bxe_softc * sc)18057 bxe_pf_flr_clnup(struct bxe_softc *sc)
18058 {
18059 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18060
18061 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18062
18063 /* Re-enable PF target read access */
18064 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18065
18066 /* Poll HW usage counters */
18067 BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18068 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18069 return (-1);
18070 }
18071
18072 /* Zero the igu 'trailing edge' and 'leading edge' */
18073
18074 /* Send the FW cleanup command */
18075 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18076 return (-1);
18077 }
18078
18079 /* ATC cleanup */
18080
18081 /* Verify TX hw is flushed */
18082 bxe_tx_hw_flushed(sc, poll_cnt);
18083
18084 /* Wait 100ms (not adjusted according to platform) */
18085 DELAY(100000);
18086
18087 /* Verify no pending pci transactions */
18088 if (bxe_is_pcie_pending(sc)) {
18089 BLOGE(sc, "PCIE Transactions still pending\n");
18090 }
18091
18092 /* Debug */
18093 bxe_hw_enable_status(sc);
18094
18095 /*
18096 * Master enable - Due to WB DMAE writes performed before this
18097 * register is re-initialized as part of the regular function init
18098 */
18099 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18100
18101 return (0);
18102 }
18103
18104 static int
bxe_init_hw_func(struct bxe_softc * sc)18105 bxe_init_hw_func(struct bxe_softc *sc)
18106 {
18107 int port = SC_PORT(sc);
18108 int func = SC_FUNC(sc);
18109 int init_phase = PHASE_PF0 + func;
18110 struct ecore_ilt *ilt = sc->ilt;
18111 uint16_t cdu_ilt_start;
18112 uint32_t addr, val;
18113 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18114 int i, main_mem_width, rc;
18115
18116 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18117
18118 /* FLR cleanup */
18119 if (!CHIP_IS_E1x(sc)) {
18120 rc = bxe_pf_flr_clnup(sc);
18121 if (rc) {
18122 BLOGE(sc, "FLR cleanup failed!\n");
18123 // XXX bxe_fw_dump(sc);
18124 // XXX bxe_idle_chk(sc);
18125 return (rc);
18126 }
18127 }
18128
18129 /* set MSI reconfigure capability */
18130 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18131 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18132 val = REG_RD(sc, addr);
18133 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18134 REG_WR(sc, addr, val);
18135 }
18136
18137 ecore_init_block(sc, BLOCK_PXP, init_phase);
18138 ecore_init_block(sc, BLOCK_PXP2, init_phase);
18139
18140 ilt = sc->ilt;
18141 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18142
18143 for (i = 0; i < L2_ILT_LINES(sc); i++) {
18144 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18145 ilt->lines[cdu_ilt_start + i].page_mapping =
18146 sc->context[i].vcxt_dma.paddr;
18147 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18148 }
18149 ecore_ilt_init_op(sc, INITOP_SET);
18150
18151 /* Set NIC mode */
18152 REG_WR(sc, PRS_REG_NIC_MODE, 1);
18153 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18154
18155 if (!CHIP_IS_E1x(sc)) {
18156 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18157
18158 /* Turn on a single ISR mode in IGU if driver is going to use
18159 * INT#x or MSI
18160 */
18161 if (sc->interrupt_mode != INTR_MODE_MSIX) {
18162 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18163 }
18164
18165 /*
18166 * Timers workaround bug: function init part.
18167 * Need to wait 20msec after initializing ILT,
18168 * needed to make sure there are no requests in
18169 * one of the PXP internal queues with "old" ILT addresses
18170 */
18171 DELAY(20000);
18172
18173 /*
18174 * Master enable - Due to WB DMAE writes performed before this
18175 * register is re-initialized as part of the regular function
18176 * init
18177 */
18178 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18179 /* Enable the function in IGU */
18180 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18181 }
18182
18183 sc->dmae_ready = 1;
18184
18185 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18186
18187 if (!CHIP_IS_E1x(sc))
18188 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18189
18190 ecore_init_block(sc, BLOCK_ATC, init_phase);
18191 ecore_init_block(sc, BLOCK_DMAE, init_phase);
18192 ecore_init_block(sc, BLOCK_NIG, init_phase);
18193 ecore_init_block(sc, BLOCK_SRC, init_phase);
18194 ecore_init_block(sc, BLOCK_MISC, init_phase);
18195 ecore_init_block(sc, BLOCK_TCM, init_phase);
18196 ecore_init_block(sc, BLOCK_UCM, init_phase);
18197 ecore_init_block(sc, BLOCK_CCM, init_phase);
18198 ecore_init_block(sc, BLOCK_XCM, init_phase);
18199 ecore_init_block(sc, BLOCK_TSEM, init_phase);
18200 ecore_init_block(sc, BLOCK_USEM, init_phase);
18201 ecore_init_block(sc, BLOCK_CSEM, init_phase);
18202 ecore_init_block(sc, BLOCK_XSEM, init_phase);
18203
18204 if (!CHIP_IS_E1x(sc))
18205 REG_WR(sc, QM_REG_PF_EN, 1);
18206
18207 if (!CHIP_IS_E1x(sc)) {
18208 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18209 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18210 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18211 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18212 }
18213 ecore_init_block(sc, BLOCK_QM, init_phase);
18214
18215 ecore_init_block(sc, BLOCK_TM, init_phase);
18216 ecore_init_block(sc, BLOCK_DORQ, init_phase);
18217
18218 bxe_iov_init_dq(sc);
18219
18220 ecore_init_block(sc, BLOCK_BRB1, init_phase);
18221 ecore_init_block(sc, BLOCK_PRS, init_phase);
18222 ecore_init_block(sc, BLOCK_TSDM, init_phase);
18223 ecore_init_block(sc, BLOCK_CSDM, init_phase);
18224 ecore_init_block(sc, BLOCK_USDM, init_phase);
18225 ecore_init_block(sc, BLOCK_XSDM, init_phase);
18226 ecore_init_block(sc, BLOCK_UPB, init_phase);
18227 ecore_init_block(sc, BLOCK_XPB, init_phase);
18228 ecore_init_block(sc, BLOCK_PBF, init_phase);
18229 if (!CHIP_IS_E1x(sc))
18230 REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18231
18232 ecore_init_block(sc, BLOCK_CDU, init_phase);
18233
18234 ecore_init_block(sc, BLOCK_CFC, init_phase);
18235
18236 if (!CHIP_IS_E1x(sc))
18237 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18238
18239 if (IS_MF(sc)) {
18240 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18241 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18242 }
18243
18244 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18245
18246 /* HC init per function */
18247 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18248 if (CHIP_IS_E1H(sc)) {
18249 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18250
18251 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18252 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18253 }
18254 ecore_init_block(sc, BLOCK_HC, init_phase);
18255
18256 } else {
18257 int num_segs, sb_idx, prod_offset;
18258
18259 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18260
18261 if (!CHIP_IS_E1x(sc)) {
18262 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18263 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18264 }
18265
18266 ecore_init_block(sc, BLOCK_IGU, init_phase);
18267
18268 if (!CHIP_IS_E1x(sc)) {
18269 int dsb_idx = 0;
18270 /**
18271 * Producer memory:
18272 * E2 mode: address 0-135 match to the mapping memory;
18273 * 136 - PF0 default prod; 137 - PF1 default prod;
18274 * 138 - PF2 default prod; 139 - PF3 default prod;
18275 * 140 - PF0 attn prod; 141 - PF1 attn prod;
18276 * 142 - PF2 attn prod; 143 - PF3 attn prod;
18277 * 144-147 reserved.
18278 *
18279 * E1.5 mode - In backward compatible mode;
18280 * for non default SB; each even line in the memory
18281 * holds the U producer and each odd line hold
18282 * the C producer. The first 128 producers are for
18283 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18284 * producers are for the DSB for each PF.
18285 * Each PF has five segments: (the order inside each
18286 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18287 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18288 * 144-147 attn prods;
18289 */
18290 /* non-default-status-blocks */
18291 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18292 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18293 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18294 prod_offset = (sc->igu_base_sb + sb_idx) *
18295 num_segs;
18296
18297 for (i = 0; i < num_segs; i++) {
18298 addr = IGU_REG_PROD_CONS_MEMORY +
18299 (prod_offset + i) * 4;
18300 REG_WR(sc, addr, 0);
18301 }
18302 /* send consumer update with value 0 */
18303 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18304 USTORM_ID, 0, IGU_INT_NOP, 1);
18305 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18306 }
18307
18308 /* default-status-blocks */
18309 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18310 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18311
18312 if (CHIP_IS_MODE_4_PORT(sc))
18313 dsb_idx = SC_FUNC(sc);
18314 else
18315 dsb_idx = SC_VN(sc);
18316
18317 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18318 IGU_BC_BASE_DSB_PROD + dsb_idx :
18319 IGU_NORM_BASE_DSB_PROD + dsb_idx);
18320
18321 /*
18322 * igu prods come in chunks of E1HVN_MAX (4) -
18323 * does not matters what is the current chip mode
18324 */
18325 for (i = 0; i < (num_segs * E1HVN_MAX);
18326 i += E1HVN_MAX) {
18327 addr = IGU_REG_PROD_CONS_MEMORY +
18328 (prod_offset + i)*4;
18329 REG_WR(sc, addr, 0);
18330 }
18331 /* send consumer update with 0 */
18332 if (CHIP_INT_MODE_IS_BC(sc)) {
18333 bxe_ack_sb(sc, sc->igu_dsb_id,
18334 USTORM_ID, 0, IGU_INT_NOP, 1);
18335 bxe_ack_sb(sc, sc->igu_dsb_id,
18336 CSTORM_ID, 0, IGU_INT_NOP, 1);
18337 bxe_ack_sb(sc, sc->igu_dsb_id,
18338 XSTORM_ID, 0, IGU_INT_NOP, 1);
18339 bxe_ack_sb(sc, sc->igu_dsb_id,
18340 TSTORM_ID, 0, IGU_INT_NOP, 1);
18341 bxe_ack_sb(sc, sc->igu_dsb_id,
18342 ATTENTION_ID, 0, IGU_INT_NOP, 1);
18343 } else {
18344 bxe_ack_sb(sc, sc->igu_dsb_id,
18345 USTORM_ID, 0, IGU_INT_NOP, 1);
18346 bxe_ack_sb(sc, sc->igu_dsb_id,
18347 ATTENTION_ID, 0, IGU_INT_NOP, 1);
18348 }
18349 bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18350
18351 /* !!! these should become driver const once
18352 rf-tool supports split-68 const */
18353 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18354 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18355 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18356 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18357 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18358 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18359 }
18360 }
18361
18362 /* Reset PCIE errors for debug */
18363 REG_WR(sc, 0x2114, 0xffffffff);
18364 REG_WR(sc, 0x2120, 0xffffffff);
18365
18366 if (CHIP_IS_E1x(sc)) {
18367 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18368 main_mem_base = HC_REG_MAIN_MEMORY +
18369 SC_PORT(sc) * (main_mem_size * 4);
18370 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18371 main_mem_width = 8;
18372
18373 val = REG_RD(sc, main_mem_prty_clr);
18374 if (val) {
18375 BLOGD(sc, DBG_LOAD,
18376 "Parity errors in HC block during function init (0x%x)!\n",
18377 val);
18378 }
18379
18380 /* Clear "false" parity errors in MSI-X table */
18381 for (i = main_mem_base;
18382 i < main_mem_base + main_mem_size * 4;
18383 i += main_mem_width) {
18384 bxe_read_dmae(sc, i, main_mem_width / 4);
18385 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18386 i, main_mem_width / 4);
18387 }
18388 /* Clear HC parity attention */
18389 REG_RD(sc, main_mem_prty_clr);
18390 }
18391
18392 #if 1
18393 /* Enable STORMs SP logging */
18394 REG_WR8(sc, BAR_USTRORM_INTMEM +
18395 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18396 REG_WR8(sc, BAR_TSTRORM_INTMEM +
18397 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18398 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18399 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18400 REG_WR8(sc, BAR_XSTRORM_INTMEM +
18401 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18402 #endif
18403
18404 elink_phy_probe(&sc->link_params);
18405
18406 return (0);
18407 }
18408
18409 static void
bxe_link_reset(struct bxe_softc * sc)18410 bxe_link_reset(struct bxe_softc *sc)
18411 {
18412 if (!BXE_NOMCP(sc)) {
18413 bxe_acquire_phy_lock(sc);
18414 elink_lfa_reset(&sc->link_params, &sc->link_vars);
18415 bxe_release_phy_lock(sc);
18416 } else {
18417 if (!CHIP_REV_IS_SLOW(sc)) {
18418 BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18419 }
18420 }
18421 }
18422
18423 static void
bxe_reset_port(struct bxe_softc * sc)18424 bxe_reset_port(struct bxe_softc *sc)
18425 {
18426 int port = SC_PORT(sc);
18427 uint32_t val;
18428
18429 ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18430 /* reset physical Link */
18431 bxe_link_reset(sc);
18432
18433 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18434
18435 /* Do not rcv packets to BRB */
18436 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18437 /* Do not direct rcv packets that are not for MCP to the BRB */
18438 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18439 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18440
18441 /* Configure AEU */
18442 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18443
18444 DELAY(100000);
18445
18446 /* Check for BRB port occupancy */
18447 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18448 if (val) {
18449 BLOGD(sc, DBG_LOAD,
18450 "BRB1 is not empty, %d blocks are occupied\n", val);
18451 }
18452
18453 /* TODO: Close Doorbell port? */
18454 }
18455
18456 static void
bxe_ilt_wr(struct bxe_softc * sc,uint32_t index,bus_addr_t addr)18457 bxe_ilt_wr(struct bxe_softc *sc,
18458 uint32_t index,
18459 bus_addr_t addr)
18460 {
18461 int reg;
18462 uint32_t wb_write[2];
18463
18464 if (CHIP_IS_E1(sc)) {
18465 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18466 } else {
18467 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18468 }
18469
18470 wb_write[0] = ONCHIP_ADDR1(addr);
18471 wb_write[1] = ONCHIP_ADDR2(addr);
18472 REG_WR_DMAE(sc, reg, wb_write, 2);
18473 }
18474
18475 static void
bxe_clear_func_ilt(struct bxe_softc * sc,uint32_t func)18476 bxe_clear_func_ilt(struct bxe_softc *sc,
18477 uint32_t func)
18478 {
18479 uint32_t i, base = FUNC_ILT_BASE(func);
18480 for (i = base; i < base + ILT_PER_FUNC; i++) {
18481 bxe_ilt_wr(sc, i, 0);
18482 }
18483 }
18484
18485 static void
bxe_reset_func(struct bxe_softc * sc)18486 bxe_reset_func(struct bxe_softc *sc)
18487 {
18488 struct bxe_fastpath *fp;
18489 int port = SC_PORT(sc);
18490 int func = SC_FUNC(sc);
18491 int i;
18492
18493 /* Disable the function in the FW */
18494 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18495 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18496 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18497 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18498
18499 /* FP SBs */
18500 FOR_EACH_ETH_QUEUE(sc, i) {
18501 fp = &sc->fp[i];
18502 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18503 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18504 SB_DISABLED);
18505 }
18506
18507 /* SP SB */
18508 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18509 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18510 SB_DISABLED);
18511
18512 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18513 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18514 }
18515
18516 /* Configure IGU */
18517 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18518 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18519 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18520 } else {
18521 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18522 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18523 }
18524
18525 if (CNIC_LOADED(sc)) {
18526 /* Disable Timer scan */
18527 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18528 /*
18529 * Wait for at least 10ms and up to 2 second for the timers
18530 * scan to complete
18531 */
18532 for (i = 0; i < 200; i++) {
18533 DELAY(10000);
18534 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18535 break;
18536 }
18537 }
18538
18539 /* Clear ILT */
18540 bxe_clear_func_ilt(sc, func);
18541
18542 /*
18543 * Timers workaround bug for E2: if this is vnic-3,
18544 * we need to set the entire ilt range for this timers.
18545 */
18546 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18547 struct ilt_client_info ilt_cli;
18548 /* use dummy TM client */
18549 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18550 ilt_cli.start = 0;
18551 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18552 ilt_cli.client_num = ILT_CLIENT_TM;
18553
18554 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18555 }
18556
18557 /* this assumes that reset_port() called before reset_func()*/
18558 if (!CHIP_IS_E1x(sc)) {
18559 bxe_pf_disable(sc);
18560 }
18561
18562 sc->dmae_ready = 0;
18563 }
18564
18565 static int
bxe_gunzip_init(struct bxe_softc * sc)18566 bxe_gunzip_init(struct bxe_softc *sc)
18567 {
18568 return (0);
18569 }
18570
18571 static void
bxe_gunzip_end(struct bxe_softc * sc)18572 bxe_gunzip_end(struct bxe_softc *sc)
18573 {
18574 return;
18575 }
18576
18577 static int
bxe_init_firmware(struct bxe_softc * sc)18578 bxe_init_firmware(struct bxe_softc *sc)
18579 {
18580 if (CHIP_IS_E1(sc)) {
18581 ecore_init_e1_firmware(sc);
18582 sc->iro_array = e1_iro_arr;
18583 } else if (CHIP_IS_E1H(sc)) {
18584 ecore_init_e1h_firmware(sc);
18585 sc->iro_array = e1h_iro_arr;
18586 } else if (!CHIP_IS_E1x(sc)) {
18587 ecore_init_e2_firmware(sc);
18588 sc->iro_array = e2_iro_arr;
18589 } else {
18590 BLOGE(sc, "Unsupported chip revision\n");
18591 return (-1);
18592 }
18593
18594 return (0);
18595 }
18596
18597 static void
bxe_release_firmware(struct bxe_softc * sc)18598 bxe_release_firmware(struct bxe_softc *sc)
18599 {
18600 /* Do nothing */
18601 return;
18602 }
18603
18604 static int
ecore_gunzip(struct bxe_softc * sc,const uint8_t * zbuf,int len)18605 ecore_gunzip(struct bxe_softc *sc,
18606 const uint8_t *zbuf,
18607 int len)
18608 {
18609 /* XXX : Implement... */
18610 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18611 return (FALSE);
18612 }
18613
18614 static void
ecore_reg_wr_ind(struct bxe_softc * sc,uint32_t addr,uint32_t val)18615 ecore_reg_wr_ind(struct bxe_softc *sc,
18616 uint32_t addr,
18617 uint32_t val)
18618 {
18619 bxe_reg_wr_ind(sc, addr, val);
18620 }
18621
18622 static void
ecore_write_dmae_phys_len(struct bxe_softc * sc,bus_addr_t phys_addr,uint32_t addr,uint32_t len)18623 ecore_write_dmae_phys_len(struct bxe_softc *sc,
18624 bus_addr_t phys_addr,
18625 uint32_t addr,
18626 uint32_t len)
18627 {
18628 bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18629 }
18630
18631 void
ecore_storm_memset_struct(struct bxe_softc * sc,uint32_t addr,size_t size,uint32_t * data)18632 ecore_storm_memset_struct(struct bxe_softc *sc,
18633 uint32_t addr,
18634 size_t size,
18635 uint32_t *data)
18636 {
18637 uint8_t i;
18638 for (i = 0; i < size/4; i++) {
18639 REG_WR(sc, addr + (i * 4), data[i]);
18640 }
18641 }
18642
18643
18644 /*
18645 * character device - ioctl interface definitions
18646 */
18647
18648
18649 #include "bxe_dump.h"
18650 #include "bxe_ioctl.h"
18651 #include <sys/conf.h>
18652
18653 static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18654 struct thread *td);
18655
18656 static struct cdevsw bxe_cdevsw = {
18657 .d_version = D_VERSION,
18658 .d_ioctl = bxe_eioctl,
18659 .d_name = "bxecnic",
18660 };
18661
18662 #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18663
18664
18665 #define DUMP_ALL_PRESETS 0x1FFF
18666 #define DUMP_MAX_PRESETS 13
18667 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18668 #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18669 #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18670 #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18671 #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18672
18673 #define IS_REG_IN_PRESET(presets, idx) \
18674 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18675
18676
18677 static int
bxe_get_preset_regs_len(struct bxe_softc * sc,uint32_t preset)18678 bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18679 {
18680 if (CHIP_IS_E1(sc))
18681 return dump_num_registers[0][preset-1];
18682 else if (CHIP_IS_E1H(sc))
18683 return dump_num_registers[1][preset-1];
18684 else if (CHIP_IS_E2(sc))
18685 return dump_num_registers[2][preset-1];
18686 else if (CHIP_IS_E3A0(sc))
18687 return dump_num_registers[3][preset-1];
18688 else if (CHIP_IS_E3B0(sc))
18689 return dump_num_registers[4][preset-1];
18690 else
18691 return 0;
18692 }
18693
18694 static int
bxe_get_total_regs_len32(struct bxe_softc * sc)18695 bxe_get_total_regs_len32(struct bxe_softc *sc)
18696 {
18697 uint32_t preset_idx;
18698 int regdump_len32 = 0;
18699
18700
18701 /* Calculate the total preset regs length */
18702 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18703 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18704 }
18705
18706 return regdump_len32;
18707 }
18708
18709 static const uint32_t *
__bxe_get_page_addr_ar(struct bxe_softc * sc)18710 __bxe_get_page_addr_ar(struct bxe_softc *sc)
18711 {
18712 if (CHIP_IS_E2(sc))
18713 return page_vals_e2;
18714 else if (CHIP_IS_E3(sc))
18715 return page_vals_e3;
18716 else
18717 return NULL;
18718 }
18719
18720 static uint32_t
__bxe_get_page_reg_num(struct bxe_softc * sc)18721 __bxe_get_page_reg_num(struct bxe_softc *sc)
18722 {
18723 if (CHIP_IS_E2(sc))
18724 return PAGE_MODE_VALUES_E2;
18725 else if (CHIP_IS_E3(sc))
18726 return PAGE_MODE_VALUES_E3;
18727 else
18728 return 0;
18729 }
18730
18731 static const uint32_t *
__bxe_get_page_write_ar(struct bxe_softc * sc)18732 __bxe_get_page_write_ar(struct bxe_softc *sc)
18733 {
18734 if (CHIP_IS_E2(sc))
18735 return page_write_regs_e2;
18736 else if (CHIP_IS_E3(sc))
18737 return page_write_regs_e3;
18738 else
18739 return NULL;
18740 }
18741
18742 static uint32_t
__bxe_get_page_write_num(struct bxe_softc * sc)18743 __bxe_get_page_write_num(struct bxe_softc *sc)
18744 {
18745 if (CHIP_IS_E2(sc))
18746 return PAGE_WRITE_REGS_E2;
18747 else if (CHIP_IS_E3(sc))
18748 return PAGE_WRITE_REGS_E3;
18749 else
18750 return 0;
18751 }
18752
18753 static const struct reg_addr *
__bxe_get_page_read_ar(struct bxe_softc * sc)18754 __bxe_get_page_read_ar(struct bxe_softc *sc)
18755 {
18756 if (CHIP_IS_E2(sc))
18757 return page_read_regs_e2;
18758 else if (CHIP_IS_E3(sc))
18759 return page_read_regs_e3;
18760 else
18761 return NULL;
18762 }
18763
18764 static uint32_t
__bxe_get_page_read_num(struct bxe_softc * sc)18765 __bxe_get_page_read_num(struct bxe_softc *sc)
18766 {
18767 if (CHIP_IS_E2(sc))
18768 return PAGE_READ_REGS_E2;
18769 else if (CHIP_IS_E3(sc))
18770 return PAGE_READ_REGS_E3;
18771 else
18772 return 0;
18773 }
18774
18775 static bool
bxe_is_reg_in_chip(struct bxe_softc * sc,const struct reg_addr * reg_info)18776 bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18777 {
18778 if (CHIP_IS_E1(sc))
18779 return IS_E1_REG(reg_info->chips);
18780 else if (CHIP_IS_E1H(sc))
18781 return IS_E1H_REG(reg_info->chips);
18782 else if (CHIP_IS_E2(sc))
18783 return IS_E2_REG(reg_info->chips);
18784 else if (CHIP_IS_E3A0(sc))
18785 return IS_E3A0_REG(reg_info->chips);
18786 else if (CHIP_IS_E3B0(sc))
18787 return IS_E3B0_REG(reg_info->chips);
18788 else
18789 return 0;
18790 }
18791
18792 static bool
bxe_is_wreg_in_chip(struct bxe_softc * sc,const struct wreg_addr * wreg_info)18793 bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18794 {
18795 if (CHIP_IS_E1(sc))
18796 return IS_E1_REG(wreg_info->chips);
18797 else if (CHIP_IS_E1H(sc))
18798 return IS_E1H_REG(wreg_info->chips);
18799 else if (CHIP_IS_E2(sc))
18800 return IS_E2_REG(wreg_info->chips);
18801 else if (CHIP_IS_E3A0(sc))
18802 return IS_E3A0_REG(wreg_info->chips);
18803 else if (CHIP_IS_E3B0(sc))
18804 return IS_E3B0_REG(wreg_info->chips);
18805 else
18806 return 0;
18807 }
18808
18809 /**
18810 * bxe_read_pages_regs - read "paged" registers
18811 *
18812 * @bp device handle
18813 * @p output buffer
18814 *
18815 * Reads "paged" memories: memories that may only be read by first writing to a
18816 * specific address ("write address") and then reading from a specific address
18817 * ("read address"). There may be more than one write address per "page" and
18818 * more than one read address per write address.
18819 */
18820 static void
bxe_read_pages_regs(struct bxe_softc * sc,uint32_t * p,uint32_t preset)18821 bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18822 {
18823 uint32_t i, j, k, n;
18824
18825 /* addresses of the paged registers */
18826 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18827 /* number of paged registers */
18828 int num_pages = __bxe_get_page_reg_num(sc);
18829 /* write addresses */
18830 const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18831 /* number of write addresses */
18832 int write_num = __bxe_get_page_write_num(sc);
18833 /* read addresses info */
18834 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18835 /* number of read addresses */
18836 int read_num = __bxe_get_page_read_num(sc);
18837 uint32_t addr, size;
18838
18839 for (i = 0; i < num_pages; i++) {
18840 for (j = 0; j < write_num; j++) {
18841 REG_WR(sc, write_addr[j], page_addr[i]);
18842
18843 for (k = 0; k < read_num; k++) {
18844 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18845 size = read_addr[k].size;
18846 for (n = 0; n < size; n++) {
18847 addr = read_addr[k].addr + n*4;
18848 *p++ = REG_RD(sc, addr);
18849 }
18850 }
18851 }
18852 }
18853 }
18854 return;
18855 }
18856
18857
18858 static int
bxe_get_preset_regs(struct bxe_softc * sc,uint32_t * p,uint32_t preset)18859 bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18860 {
18861 uint32_t i, j, addr;
18862 const struct wreg_addr *wreg_addr_p = NULL;
18863
18864 if (CHIP_IS_E1(sc))
18865 wreg_addr_p = &wreg_addr_e1;
18866 else if (CHIP_IS_E1H(sc))
18867 wreg_addr_p = &wreg_addr_e1h;
18868 else if (CHIP_IS_E2(sc))
18869 wreg_addr_p = &wreg_addr_e2;
18870 else if (CHIP_IS_E3A0(sc))
18871 wreg_addr_p = &wreg_addr_e3;
18872 else if (CHIP_IS_E3B0(sc))
18873 wreg_addr_p = &wreg_addr_e3b0;
18874 else
18875 return (-1);
18876
18877 /* Read the idle_chk registers */
18878 for (i = 0; i < IDLE_REGS_COUNT; i++) {
18879 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18880 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18881 for (j = 0; j < idle_reg_addrs[i].size; j++)
18882 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18883 }
18884 }
18885
18886 /* Read the regular registers */
18887 for (i = 0; i < REGS_COUNT; i++) {
18888 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) &&
18889 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18890 for (j = 0; j < reg_addrs[i].size; j++)
18891 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18892 }
18893 }
18894
18895 /* Read the CAM registers */
18896 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18897 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18898 for (i = 0; i < wreg_addr_p->size; i++) {
18899 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18900
18901 /* In case of wreg_addr register, read additional
18902 registers from read_regs array
18903 */
18904 for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18905 addr = *(wreg_addr_p->read_regs);
18906 *p++ = REG_RD(sc, addr + j*4);
18907 }
18908 }
18909 }
18910
18911 /* Paged registers are supported in E2 & E3 only */
18912 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18913 /* Read "paged" registers */
18914 bxe_read_pages_regs(sc, p, preset);
18915 }
18916
18917 return 0;
18918 }
18919
18920 int
bxe_grc_dump(struct bxe_softc * sc)18921 bxe_grc_dump(struct bxe_softc *sc)
18922 {
18923 int rval = 0;
18924 uint32_t preset_idx;
18925 uint8_t *buf;
18926 uint32_t size;
18927 struct dump_header *d_hdr;
18928 uint32_t i;
18929 uint32_t reg_val;
18930 uint32_t reg_addr;
18931 uint32_t cmd_offset;
18932 struct ecore_ilt *ilt = SC_ILT(sc);
18933 struct bxe_fastpath *fp;
18934 struct ilt_client_info *ilt_cli;
18935 int grc_dump_size;
18936
18937
18938 if (sc->grcdump_done || sc->grcdump_started)
18939 return (rval);
18940
18941 sc->grcdump_started = 1;
18942 BLOGI(sc, "Started collecting grcdump\n");
18943
18944 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18945 sizeof(struct dump_header);
18946
18947 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18948
18949 if (sc->grc_dump == NULL) {
18950 BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18951 return(ENOMEM);
18952 }
18953
18954
18955
18956 /* Disable parity attentions as long as following dump may
18957 * cause false alarms by reading never written registers. We
18958 * will re-enable parity attentions right after the dump.
18959 */
18960
18961 /* Disable parity on path 0 */
18962 bxe_pretend_func(sc, 0);
18963
18964 ecore_disable_blocks_parity(sc);
18965
18966 /* Disable parity on path 1 */
18967 bxe_pretend_func(sc, 1);
18968 ecore_disable_blocks_parity(sc);
18969
18970 /* Return to current function */
18971 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18972
18973 buf = sc->grc_dump;
18974 d_hdr = sc->grc_dump;
18975
18976 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1;
18977 d_hdr->version = BNX2X_DUMP_VERSION;
18978 d_hdr->preset = DUMP_ALL_PRESETS;
18979
18980 if (CHIP_IS_E1(sc)) {
18981 d_hdr->dump_meta_data = DUMP_CHIP_E1;
18982 } else if (CHIP_IS_E1H(sc)) {
18983 d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18984 } else if (CHIP_IS_E2(sc)) {
18985 d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18986 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18987 } else if (CHIP_IS_E3A0(sc)) {
18988 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18989 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18990 } else if (CHIP_IS_E3B0(sc)) {
18991 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18992 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18993 }
18994
18995 buf += sizeof(struct dump_header);
18996
18997 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18998
18999 /* Skip presets with IOR */
19000 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
19001 (preset_idx == 11))
19002 continue;
19003
19004 rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
19005
19006 if (rval)
19007 break;
19008
19009 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
19010
19011 buf += size;
19012 }
19013
19014 bxe_pretend_func(sc, 0);
19015 ecore_clear_blocks_parity(sc);
19016 ecore_enable_blocks_parity(sc);
19017
19018 bxe_pretend_func(sc, 1);
19019 ecore_clear_blocks_parity(sc);
19020 ecore_enable_blocks_parity(sc);
19021
19022 /* Return to current function */
19023 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
19024
19025
19026
19027 if(sc->state == BXE_STATE_OPEN) {
19028 if(sc->fw_stats_req != NULL) {
19029 BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
19030 (uintmax_t)sc->fw_stats_req_mapping,
19031 (uintmax_t)sc->fw_stats_data_mapping,
19032 sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
19033 }
19034 if(sc->def_sb != NULL) {
19035 BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
19036 (void *)sc->def_sb_dma.paddr, sc->def_sb,
19037 sizeof(struct host_sp_status_block));
19038 }
19039 if(sc->eq_dma.vaddr != NULL) {
19040 BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
19041 (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
19042 }
19043 if(sc->sp_dma.vaddr != NULL) {
19044 BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
19045 (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
19046 sizeof(struct bxe_slowpath));
19047 }
19048 if(sc->spq_dma.vaddr != NULL) {
19049 BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
19050 (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
19051 }
19052 if(sc->gz_buf_dma.vaddr != NULL) {
19053 BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
19054 (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
19055 FW_BUF_SIZE);
19056 }
19057 for (i = 0; i < sc->num_queues; i++) {
19058 fp = &sc->fp[i];
19059 if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
19060 fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
19061 fp->rx_sge_dma.vaddr != NULL) {
19062
19063 BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19064 (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
19065 sizeof(union bxe_host_hc_status_block));
19066 BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19067 (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
19068 (BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
19069 BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19070 (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
19071 (BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
19072 BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19073 (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
19074 (BCM_PAGE_SIZE * RCQ_NUM_PAGES));
19075 BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19076 (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
19077 (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
19078 }
19079 }
19080 if(ilt != NULL ) {
19081 ilt_cli = &ilt->clients[1];
19082 if(ilt->lines != NULL) {
19083 for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
19084 BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
19085 (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
19086 ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
19087 }
19088 }
19089 }
19090
19091
19092 cmd_offset = DMAE_REG_CMD_MEM;
19093 for (i = 0; i < 224; i++) {
19094 reg_addr = (cmd_offset +(i * 4));
19095 reg_val = REG_RD(sc, reg_addr);
19096 BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
19097 reg_addr, reg_val);
19098 }
19099 }
19100
19101 BLOGI(sc, "Collection of grcdump done\n");
19102 sc->grcdump_done = 1;
19103 return(rval);
19104 }
19105
19106 static int
bxe_add_cdev(struct bxe_softc * sc)19107 bxe_add_cdev(struct bxe_softc *sc)
19108 {
19109 sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
19110
19111 if (sc->eeprom == NULL) {
19112 BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
19113 return (-1);
19114 }
19115
19116 sc->ioctl_dev = make_dev(&bxe_cdevsw,
19117 sc->ifp->if_dunit,
19118 UID_ROOT,
19119 GID_WHEEL,
19120 0600,
19121 "%s",
19122 if_name(sc->ifp));
19123
19124 if (sc->ioctl_dev == NULL) {
19125 free(sc->eeprom, M_DEVBUF);
19126 sc->eeprom = NULL;
19127 return (-1);
19128 }
19129
19130 sc->ioctl_dev->si_drv1 = sc;
19131
19132 return (0);
19133 }
19134
19135 static void
bxe_del_cdev(struct bxe_softc * sc)19136 bxe_del_cdev(struct bxe_softc *sc)
19137 {
19138 if (sc->ioctl_dev != NULL)
19139 destroy_dev(sc->ioctl_dev);
19140
19141 if (sc->eeprom != NULL) {
19142 free(sc->eeprom, M_DEVBUF);
19143 sc->eeprom = NULL;
19144 }
19145 sc->ioctl_dev = NULL;
19146
19147 return;
19148 }
19149
bxe_is_nvram_accessible(struct bxe_softc * sc)19150 static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
19151 {
19152
19153 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
19154 return FALSE;
19155
19156 return TRUE;
19157 }
19158
19159
19160 static int
bxe_wr_eeprom(struct bxe_softc * sc,void * data,uint32_t offset,uint32_t len)19161 bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19162 {
19163 int rval = 0;
19164
19165 if(!bxe_is_nvram_accessible(sc)) {
19166 BLOGW(sc, "Cannot access eeprom when interface is down\n");
19167 return (-EAGAIN);
19168 }
19169 rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
19170
19171
19172 return (rval);
19173 }
19174
19175 static int
bxe_rd_eeprom(struct bxe_softc * sc,void * data,uint32_t offset,uint32_t len)19176 bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19177 {
19178 int rval = 0;
19179
19180 if(!bxe_is_nvram_accessible(sc)) {
19181 BLOGW(sc, "Cannot access eeprom when interface is down\n");
19182 return (-EAGAIN);
19183 }
19184 rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
19185
19186 return (rval);
19187 }
19188
19189 static int
bxe_eeprom_rd_wr(struct bxe_softc * sc,bxe_eeprom_t * eeprom)19190 bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
19191 {
19192 int rval = 0;
19193
19194 switch (eeprom->eeprom_cmd) {
19195
19196 case BXE_EEPROM_CMD_SET_EEPROM:
19197
19198 rval = copyin(eeprom->eeprom_data, sc->eeprom,
19199 eeprom->eeprom_data_len);
19200
19201 if (rval)
19202 break;
19203
19204 rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19205 eeprom->eeprom_data_len);
19206 break;
19207
19208 case BXE_EEPROM_CMD_GET_EEPROM:
19209
19210 rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19211 eeprom->eeprom_data_len);
19212
19213 if (rval) {
19214 break;
19215 }
19216
19217 rval = copyout(sc->eeprom, eeprom->eeprom_data,
19218 eeprom->eeprom_data_len);
19219 break;
19220
19221 default:
19222 rval = EINVAL;
19223 break;
19224 }
19225
19226 if (rval) {
19227 BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval);
19228 }
19229
19230 return (rval);
19231 }
19232
19233 static int
bxe_get_settings(struct bxe_softc * sc,bxe_dev_setting_t * dev_p)19234 bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
19235 {
19236 uint32_t ext_phy_config;
19237 int port = SC_PORT(sc);
19238 int cfg_idx = bxe_get_link_cfg_idx(sc);
19239
19240 dev_p->supported = sc->port.supported[cfg_idx] |
19241 (sc->port.supported[cfg_idx ^ 1] &
19242 (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
19243 dev_p->advertising = sc->port.advertising[cfg_idx];
19244 if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
19245 ELINK_ETH_PHY_SFP_1G_FIBER) {
19246 dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
19247 dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
19248 }
19249 if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
19250 !(sc->flags & BXE_MF_FUNC_DIS)) {
19251 dev_p->duplex = sc->link_vars.duplex;
19252 if (IS_MF(sc) && !BXE_NOMCP(sc))
19253 dev_p->speed = bxe_get_mf_speed(sc);
19254 else
19255 dev_p->speed = sc->link_vars.line_speed;
19256 } else {
19257 dev_p->duplex = DUPLEX_UNKNOWN;
19258 dev_p->speed = SPEED_UNKNOWN;
19259 }
19260
19261 dev_p->port = bxe_media_detect(sc);
19262
19263 ext_phy_config = SHMEM_RD(sc,
19264 dev_info.port_hw_config[port].external_phy_config);
19265 if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
19266 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
19267 dev_p->phy_address = sc->port.phy_addr;
19268 else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19269 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
19270 ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19271 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
19272 dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19273 else
19274 dev_p->phy_address = 0;
19275
19276 if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19277 dev_p->autoneg = AUTONEG_ENABLE;
19278 else
19279 dev_p->autoneg = AUTONEG_DISABLE;
19280
19281
19282 return 0;
19283 }
19284
19285 static int
bxe_eioctl(struct cdev * dev,u_long cmd,caddr_t data,int fflag,struct thread * td)19286 bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19287 struct thread *td)
19288 {
19289 struct bxe_softc *sc;
19290 int rval = 0;
19291 device_t pci_dev;
19292 bxe_grcdump_t *dump = NULL;
19293 int grc_dump_size;
19294 bxe_drvinfo_t *drv_infop = NULL;
19295 bxe_dev_setting_t *dev_p;
19296 bxe_dev_setting_t dev_set;
19297 bxe_get_regs_t *reg_p;
19298 bxe_reg_rdw_t *reg_rdw_p;
19299 bxe_pcicfg_rdw_t *cfg_rdw_p;
19300 bxe_perm_mac_addr_t *mac_addr_p;
19301
19302
19303 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19304 return ENXIO;
19305
19306 pci_dev= sc->dev;
19307
19308 dump = (bxe_grcdump_t *)data;
19309
19310 switch(cmd) {
19311
19312 case BXE_GRC_DUMP_SIZE:
19313 dump->pci_func = sc->pcie_func;
19314 dump->grcdump_size =
19315 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19316 sizeof(struct dump_header);
19317 break;
19318
19319 case BXE_GRC_DUMP:
19320
19321 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19322 sizeof(struct dump_header);
19323 if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19324 (dump->grcdump_size < grc_dump_size)) {
19325 rval = EINVAL;
19326 break;
19327 }
19328
19329 if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19330 (!sc->grcdump_started)) {
19331 rval = bxe_grc_dump(sc);
19332 }
19333
19334 if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19335 (sc->grc_dump != NULL)) {
19336 dump->grcdump_dwords = grc_dump_size >> 2;
19337 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19338 free(sc->grc_dump, M_DEVBUF);
19339 sc->grc_dump = NULL;
19340 sc->grcdump_started = 0;
19341 sc->grcdump_done = 0;
19342 }
19343
19344 break;
19345
19346 case BXE_DRV_INFO:
19347 drv_infop = (bxe_drvinfo_t *)data;
19348 snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19349 snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19350 BXE_DRIVER_VERSION);
19351 snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19352 sc->devinfo.bc_ver_str);
19353 snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19354 "%s", sc->fw_ver_str);
19355 drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19356 drv_infop->reg_dump_len =
19357 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19358 + sizeof(struct dump_header);
19359 snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19360 sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19361 break;
19362
19363 case BXE_DEV_SETTING:
19364 dev_p = (bxe_dev_setting_t *)data;
19365 bxe_get_settings(sc, &dev_set);
19366 dev_p->supported = dev_set.supported;
19367 dev_p->advertising = dev_set.advertising;
19368 dev_p->speed = dev_set.speed;
19369 dev_p->duplex = dev_set.duplex;
19370 dev_p->port = dev_set.port;
19371 dev_p->phy_address = dev_set.phy_address;
19372 dev_p->autoneg = dev_set.autoneg;
19373
19374 break;
19375
19376 case BXE_GET_REGS:
19377
19378 reg_p = (bxe_get_regs_t *)data;
19379 grc_dump_size = reg_p->reg_buf_len;
19380
19381 if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19382 bxe_grc_dump(sc);
19383 }
19384 if((sc->grcdump_done) && (sc->grcdump_started) &&
19385 (sc->grc_dump != NULL)) {
19386 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19387 free(sc->grc_dump, M_DEVBUF);
19388 sc->grc_dump = NULL;
19389 sc->grcdump_started = 0;
19390 sc->grcdump_done = 0;
19391 }
19392
19393 break;
19394
19395 case BXE_RDW_REG:
19396 reg_rdw_p = (bxe_reg_rdw_t *)data;
19397 if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19398 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19399 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19400
19401 if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19402 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19403 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19404
19405 break;
19406
19407 case BXE_RDW_PCICFG:
19408 cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19409 if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19410
19411 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19412 cfg_rdw_p->cfg_width);
19413
19414 } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19415 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19416 cfg_rdw_p->cfg_width);
19417 } else {
19418 BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19419 }
19420 break;
19421
19422 case BXE_MAC_ADDR:
19423 mac_addr_p = (bxe_perm_mac_addr_t *)data;
19424 snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19425 sc->mac_addr_str);
19426 break;
19427
19428 case BXE_EEPROM:
19429 rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19430 break;
19431
19432
19433 default:
19434 break;
19435 }
19436
19437 return (rval);
19438 }
19439
19440 #ifdef DEBUGNET
19441 static void
bxe_debugnet_init(struct ifnet * ifp,int * nrxr,int * ncl,int * clsize)19442 bxe_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
19443 {
19444 struct bxe_softc *sc;
19445
19446 sc = if_getsoftc(ifp);
19447 BXE_CORE_LOCK(sc);
19448 *nrxr = sc->num_queues;
19449 *ncl = DEBUGNET_MAX_IN_FLIGHT;
19450 *clsize = sc->fp[0].mbuf_alloc_size;
19451 BXE_CORE_UNLOCK(sc);
19452 }
19453
19454 static void
bxe_debugnet_event(struct ifnet * ifp __unused,enum debugnet_ev event __unused)19455 bxe_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused)
19456 {
19457 }
19458
19459 static int
bxe_debugnet_transmit(struct ifnet * ifp,struct mbuf * m)19460 bxe_debugnet_transmit(struct ifnet *ifp, struct mbuf *m)
19461 {
19462 struct bxe_softc *sc;
19463 int error;
19464
19465 sc = if_getsoftc(ifp);
19466 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
19467 IFF_DRV_RUNNING || !sc->link_vars.link_up)
19468 return (ENOENT);
19469
19470 error = bxe_tx_encap(&sc->fp[0], &m);
19471 if (error != 0 && m != NULL)
19472 m_freem(m);
19473 return (error);
19474 }
19475
19476 static int
bxe_debugnet_poll(struct ifnet * ifp,int count)19477 bxe_debugnet_poll(struct ifnet *ifp, int count)
19478 {
19479 struct bxe_softc *sc;
19480 int i;
19481
19482 sc = if_getsoftc(ifp);
19483 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
19484 !sc->link_vars.link_up)
19485 return (ENOENT);
19486
19487 for (i = 0; i < sc->num_queues; i++)
19488 (void)bxe_rxeof(sc, &sc->fp[i]);
19489 (void)bxe_txeof(sc, &sc->fp[0]);
19490 return (0);
19491 }
19492 #endif /* DEBUGNET */
19493