1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2013-2016 Qlogic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * File: ql_os.c
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38
39 #include "ql_os.h"
40 #include "ql_hw.h"
41 #include "ql_def.h"
42 #include "ql_inline.h"
43 #include "ql_ver.h"
44 #include "ql_glbl.h"
45 #include "ql_dbg.h"
46 #include <sys/smp.h>
47
48 /*
49 * Some PCI Configuration Space Related Defines
50 */
51
52 #ifndef PCI_VENDOR_QLOGIC
53 #define PCI_VENDOR_QLOGIC 0x1077
54 #endif
55
56 #ifndef PCI_PRODUCT_QLOGIC_ISP8030
57 #define PCI_PRODUCT_QLOGIC_ISP8030 0x8030
58 #endif
59
60 #define PCI_QLOGIC_ISP8030 \
61 ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
62
63 /*
64 * static functions
65 */
66 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
67 static void qla_free_parent_dma_tag(qla_host_t *ha);
68 static int qla_alloc_xmt_bufs(qla_host_t *ha);
69 static void qla_free_xmt_bufs(qla_host_t *ha);
70 static int qla_alloc_rcv_bufs(qla_host_t *ha);
71 static void qla_free_rcv_bufs(qla_host_t *ha);
72 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
73
74 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
75 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
76 static void qla_release(qla_host_t *ha);
77 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
78 int error);
79 static void qla_stop(qla_host_t *ha);
80 static void qla_get_peer(qla_host_t *ha);
81 static void qla_error_recovery(void *context, int pending);
82 static void qla_async_event(void *context, int pending);
83 static void qla_stats(void *context, int pending);
84 static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
85 uint32_t iscsi_pdu);
86
87 /*
88 * Hooks to the Operating Systems
89 */
90 static int qla_pci_probe (device_t);
91 static int qla_pci_attach (device_t);
92 static int qla_pci_detach (device_t);
93
94 static void qla_init(void *arg);
95 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
96 static int qla_media_change(struct ifnet *ifp);
97 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
98
99 static int qla_transmit(struct ifnet *ifp, struct mbuf *mp);
100 static void qla_qflush(struct ifnet *ifp);
101 static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
102 static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
103 static int qla_create_fp_taskqueues(qla_host_t *ha);
104 static void qla_destroy_fp_taskqueues(qla_host_t *ha);
105 static void qla_drain_fp_taskqueues(qla_host_t *ha);
106
107 static device_method_t qla_pci_methods[] = {
108 /* Device interface */
109 DEVMETHOD(device_probe, qla_pci_probe),
110 DEVMETHOD(device_attach, qla_pci_attach),
111 DEVMETHOD(device_detach, qla_pci_detach),
112 { 0, 0 }
113 };
114
115 static driver_t qla_pci_driver = {
116 "ql", qla_pci_methods, sizeof (qla_host_t),
117 };
118
119 static devclass_t qla83xx_devclass;
120
121 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
122
123 MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
124 MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
125
126 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
127
128 #define QL_STD_REPLENISH_THRES 0
129 #define QL_JUMBO_REPLENISH_THRES 32
130
131
132 static char dev_str[64];
133 static char ver_str[64];
134
135 /*
136 * Name: qla_pci_probe
137 * Function: Validate the PCI device to be a QLA80XX device
138 */
139 static int
qla_pci_probe(device_t dev)140 qla_pci_probe(device_t dev)
141 {
142 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
143 case PCI_QLOGIC_ISP8030:
144 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
145 "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
146 QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
147 QLA_VERSION_BUILD);
148 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
149 QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
150 QLA_VERSION_BUILD);
151 device_set_desc(dev, dev_str);
152 break;
153 default:
154 return (ENXIO);
155 }
156
157 if (bootverbose)
158 printf("%s: %s\n ", __func__, dev_str);
159
160 return (BUS_PROBE_DEFAULT);
161 }
162
163 static void
qla_add_sysctls(qla_host_t * ha)164 qla_add_sysctls(qla_host_t *ha)
165 {
166 device_t dev = ha->pci_dev;
167
168 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
169 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
170 OID_AUTO, "version", CTLFLAG_RD,
171 ver_str, 0, "Driver Version");
172
173 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
174 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
175 OID_AUTO, "fw_version", CTLFLAG_RD,
176 ha->fw_ver_str, 0, "firmware version");
177
178 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
179 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
180 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
181 (void *)ha, 0,
182 qla_sysctl_get_link_status, "I", "Link Status");
183
184 ha->dbg_level = 0;
185 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
186 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
187 OID_AUTO, "debug", CTLFLAG_RW,
188 &ha->dbg_level, ha->dbg_level, "Debug Level");
189
190 ha->enable_minidump = 1;
191 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
192 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
193 OID_AUTO, "enable_minidump", CTLFLAG_RW,
194 &ha->enable_minidump, ha->enable_minidump,
195 "Minidump retrival prior to error recovery "
196 "is enabled only when this is set");
197
198 ha->enable_driverstate_dump = 1;
199 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
200 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
201 OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW,
202 &ha->enable_driverstate_dump, ha->enable_driverstate_dump,
203 "Driver State retrival prior to error recovery "
204 "is enabled only when this is set");
205
206 ha->enable_error_recovery = 1;
207 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
208 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
209 OID_AUTO, "enable_error_recovery", CTLFLAG_RW,
210 &ha->enable_error_recovery, ha->enable_error_recovery,
211 "when set error recovery is enabled on fatal errors "
212 "otherwise the port is turned offline");
213
214 ha->ms_delay_after_init = 1000;
215 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
216 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
217 OID_AUTO, "ms_delay_after_init", CTLFLAG_RW,
218 &ha->ms_delay_after_init, ha->ms_delay_after_init,
219 "millisecond delay after hw_init");
220
221 ha->std_replenish = QL_STD_REPLENISH_THRES;
222 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
223 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
224 OID_AUTO, "std_replenish", CTLFLAG_RW,
225 &ha->std_replenish, ha->std_replenish,
226 "Threshold for Replenishing Standard Frames");
227
228 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
229 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
230 OID_AUTO, "ipv4_lro",
231 CTLFLAG_RD, &ha->ipv4_lro,
232 "number of ipv4 lro completions");
233
234 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
235 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
236 OID_AUTO, "ipv6_lro",
237 CTLFLAG_RD, &ha->ipv6_lro,
238 "number of ipv6 lro completions");
239
240 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
241 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
242 OID_AUTO, "tx_tso_frames",
243 CTLFLAG_RD, &ha->tx_tso_frames,
244 "number of Tx TSO Frames");
245
246 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
247 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
248 OID_AUTO, "hw_vlan_tx_frames",
249 CTLFLAG_RD, &ha->hw_vlan_tx_frames,
250 "number of Tx VLAN Frames");
251
252 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
253 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
254 OID_AUTO, "hw_lock_failed",
255 CTLFLAG_RD, &ha->hw_lock_failed,
256 "number of hw_lock failures");
257
258 return;
259 }
260
261 static void
qla_watchdog(void * arg)262 qla_watchdog(void *arg)
263 {
264 qla_host_t *ha = arg;
265 qla_hw_t *hw;
266 struct ifnet *ifp;
267
268 hw = &ha->hw;
269 ifp = ha->ifp;
270
271 if (ha->qla_watchdog_exit) {
272 ha->qla_watchdog_exited = 1;
273 return;
274 }
275 ha->qla_watchdog_exited = 0;
276
277 if (!ha->qla_watchdog_pause) {
278 if (!ha->offline &&
279 (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
280 (ha->msg_from_peer == QL_PEER_MSG_RESET))) {
281
282 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
283 ql_update_link_state(ha);
284
285 if (ha->enable_error_recovery) {
286 ha->qla_watchdog_paused = 1;
287 ha->qla_watchdog_pause = 1;
288 ha->err_inject = 0;
289 device_printf(ha->pci_dev,
290 "%s: taskqueue_enqueue(err_task) \n",
291 __func__);
292 taskqueue_enqueue(ha->err_tq, &ha->err_task);
293 } else {
294 if (ifp != NULL)
295 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
296 ha->offline = 1;
297 }
298 return;
299
300 } else {
301 if (ha->qla_interface_up) {
302
303 ha->watchdog_ticks++;
304
305 if (ha->watchdog_ticks > 1000)
306 ha->watchdog_ticks = 0;
307
308 if (!ha->watchdog_ticks && QL_RUNNING(ifp)) {
309 taskqueue_enqueue(ha->stats_tq,
310 &ha->stats_task);
311 }
312
313 if (ha->async_event) {
314 taskqueue_enqueue(ha->async_event_tq,
315 &ha->async_event_task);
316 }
317
318 }
319 ha->qla_watchdog_paused = 0;
320 }
321 } else {
322 ha->qla_watchdog_paused = 1;
323 }
324
325 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
326 qla_watchdog, ha);
327 }
328
329 /*
330 * Name: qla_pci_attach
331 * Function: attaches the device to the operating system
332 */
333 static int
qla_pci_attach(device_t dev)334 qla_pci_attach(device_t dev)
335 {
336 qla_host_t *ha = NULL;
337 uint32_t rsrc_len;
338 int i;
339 uint32_t num_rcvq = 0;
340
341 if ((ha = device_get_softc(dev)) == NULL) {
342 device_printf(dev, "cannot get softc\n");
343 return (ENOMEM);
344 }
345
346 memset(ha, 0, sizeof (qla_host_t));
347
348 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
349 device_printf(dev, "device is not ISP8030\n");
350 return (ENXIO);
351 }
352
353 ha->pci_func = pci_get_function(dev) & 0x1;
354
355 ha->pci_dev = dev;
356
357 pci_enable_busmaster(dev);
358
359 ha->reg_rid = PCIR_BAR(0);
360 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
361 RF_ACTIVE);
362
363 if (ha->pci_reg == NULL) {
364 device_printf(dev, "unable to map any ports\n");
365 goto qla_pci_attach_err;
366 }
367
368 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
369 ha->reg_rid);
370
371 mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
372 mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF);
373 ha->flags.lock_init = 1;
374
375 qla_add_sysctls(ha);
376
377 ha->hw.num_sds_rings = MAX_SDS_RINGS;
378 ha->hw.num_rds_rings = MAX_RDS_RINGS;
379 ha->hw.num_tx_rings = NUM_TX_RINGS;
380
381 ha->reg_rid1 = PCIR_BAR(2);
382 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
383 &ha->reg_rid1, RF_ACTIVE);
384
385 ha->msix_count = pci_msix_count(dev);
386
387 if (ha->msix_count < 1 ) {
388 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
389 ha->msix_count);
390 goto qla_pci_attach_err;
391 }
392
393 if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
394 ha->hw.num_sds_rings = ha->msix_count - 1;
395 }
396
397 QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
398 " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
399 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
400 ha->pci_reg1));
401
402 /* initialize hardware */
403 if (ql_init_hw(ha)) {
404 device_printf(dev, "%s: ql_init_hw failed\n", __func__);
405 goto qla_pci_attach_err;
406 }
407
408 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
409 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
410 ha->fw_ver_build);
411 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
412 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
413 ha->fw_ver_build);
414
415 if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
416 device_printf(dev, "%s: qla_get_nic_partition failed\n",
417 __func__);
418 goto qla_pci_attach_err;
419 }
420 device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
421 " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
422 __func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
423 ha->pci_reg, ha->pci_reg1, num_rcvq);
424
425 if ((ha->msix_count < 64) || (num_rcvq != 32)) {
426 if (ha->hw.num_sds_rings > 15) {
427 ha->hw.num_sds_rings = 15;
428 }
429 }
430
431 ha->hw.num_rds_rings = ha->hw.num_sds_rings;
432 ha->hw.num_tx_rings = ha->hw.num_sds_rings;
433
434 #ifdef QL_ENABLE_ISCSI_TLV
435 ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
436 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
437
438 ql_hw_add_sysctls(ha);
439
440 ha->msix_count = ha->hw.num_sds_rings + 1;
441
442 if (pci_alloc_msix(dev, &ha->msix_count)) {
443 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
444 ha->msix_count);
445 ha->msix_count = 0;
446 goto qla_pci_attach_err;
447 }
448
449 ha->mbx_irq_rid = 1;
450 ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
451 &ha->mbx_irq_rid,
452 (RF_ACTIVE | RF_SHAREABLE));
453 if (ha->mbx_irq == NULL) {
454 device_printf(dev, "could not allocate mbx interrupt\n");
455 goto qla_pci_attach_err;
456 }
457 if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
458 NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
459 device_printf(dev, "could not setup mbx interrupt\n");
460 goto qla_pci_attach_err;
461 }
462
463 for (i = 0; i < ha->hw.num_sds_rings; i++) {
464 ha->irq_vec[i].sds_idx = i;
465 ha->irq_vec[i].ha = ha;
466 ha->irq_vec[i].irq_rid = 2 + i;
467
468 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
469 &ha->irq_vec[i].irq_rid,
470 (RF_ACTIVE | RF_SHAREABLE));
471
472 if (ha->irq_vec[i].irq == NULL) {
473 device_printf(dev, "could not allocate interrupt\n");
474 goto qla_pci_attach_err;
475 }
476 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
477 (INTR_TYPE_NET | INTR_MPSAFE),
478 NULL, ql_isr, &ha->irq_vec[i],
479 &ha->irq_vec[i].handle)) {
480 device_printf(dev, "could not setup interrupt\n");
481 goto qla_pci_attach_err;
482 }
483
484 ha->tx_fp[i].ha = ha;
485 ha->tx_fp[i].txr_idx = i;
486
487 if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
488 device_printf(dev, "%s: could not allocate tx_br[%d]\n",
489 __func__, i);
490 goto qla_pci_attach_err;
491 }
492 }
493
494 if (qla_create_fp_taskqueues(ha) != 0)
495 goto qla_pci_attach_err;
496
497 printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
498 ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
499
500 ql_read_mac_addr(ha);
501
502 /* allocate parent dma tag */
503 if (qla_alloc_parent_dma_tag(ha)) {
504 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
505 __func__);
506 goto qla_pci_attach_err;
507 }
508
509 /* alloc all dma buffers */
510 if (ql_alloc_dma(ha)) {
511 device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
512 goto qla_pci_attach_err;
513 }
514 qla_get_peer(ha);
515
516 if (ql_minidump_init(ha) != 0) {
517 device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
518 goto qla_pci_attach_err;
519 }
520 ql_alloc_drvr_state_buffer(ha);
521 ql_alloc_sp_log_buffer(ha);
522 /* create the o.s ethernet interface */
523 qla_init_ifnet(dev, ha);
524
525 ha->flags.qla_watchdog_active = 1;
526 ha->qla_watchdog_pause = 0;
527
528 callout_init(&ha->tx_callout, TRUE);
529 ha->flags.qla_callout_init = 1;
530
531 /* create ioctl device interface */
532 if (ql_make_cdev(ha)) {
533 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
534 goto qla_pci_attach_err;
535 }
536
537 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
538 qla_watchdog, ha);
539
540 TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
541 ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
542 taskqueue_thread_enqueue, &ha->err_tq);
543 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
544 device_get_nameunit(ha->pci_dev));
545
546 TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
547 ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
548 taskqueue_thread_enqueue, &ha->async_event_tq);
549 taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
550 device_get_nameunit(ha->pci_dev));
551
552 TASK_INIT(&ha->stats_task, 0, qla_stats, ha);
553 ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT,
554 taskqueue_thread_enqueue, &ha->stats_tq);
555 taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq",
556 device_get_nameunit(ha->pci_dev));
557
558 QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
559 return (0);
560
561 qla_pci_attach_err:
562
563 qla_release(ha);
564
565 if (ha->flags.lock_init) {
566 mtx_destroy(&ha->hw_lock);
567 mtx_destroy(&ha->sp_log_lock);
568 }
569
570 QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
571 return (ENXIO);
572 }
573
574 /*
575 * Name: qla_pci_detach
576 * Function: Unhooks the device from the operating system
577 */
578 static int
qla_pci_detach(device_t dev)579 qla_pci_detach(device_t dev)
580 {
581 qla_host_t *ha = NULL;
582 struct ifnet *ifp;
583
584
585 if ((ha = device_get_softc(dev)) == NULL) {
586 device_printf(dev, "cannot get softc\n");
587 return (ENOMEM);
588 }
589
590 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
591
592 ifp = ha->ifp;
593
594 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
595 QLA_LOCK(ha, __func__, -1, 0);
596
597 ha->qla_detach_active = 1;
598 qla_stop(ha);
599
600 qla_release(ha);
601
602 QLA_UNLOCK(ha, __func__);
603
604 if (ha->flags.lock_init) {
605 mtx_destroy(&ha->hw_lock);
606 mtx_destroy(&ha->sp_log_lock);
607 }
608
609 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
610
611 return (0);
612 }
613
614 /*
615 * SYSCTL Related Callbacks
616 */
617 static int
qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)618 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
619 {
620 int err, ret = 0;
621 qla_host_t *ha;
622
623 err = sysctl_handle_int(oidp, &ret, 0, req);
624
625 if (err || !req->newptr)
626 return (err);
627
628 if (ret == 1) {
629 ha = (qla_host_t *)arg1;
630 ql_hw_link_status(ha);
631 }
632 return (err);
633 }
634
635 /*
636 * Name: qla_release
637 * Function: Releases the resources allocated for the device
638 */
639 static void
qla_release(qla_host_t * ha)640 qla_release(qla_host_t *ha)
641 {
642 device_t dev;
643 int i;
644
645 dev = ha->pci_dev;
646
647 if (ha->async_event_tq) {
648 taskqueue_drain_all(ha->async_event_tq);
649 taskqueue_free(ha->async_event_tq);
650 }
651
652 if (ha->err_tq) {
653 taskqueue_drain_all(ha->err_tq);
654 taskqueue_free(ha->err_tq);
655 }
656
657 if (ha->stats_tq) {
658 taskqueue_drain_all(ha->stats_tq);
659 taskqueue_free(ha->stats_tq);
660 }
661
662 ql_del_cdev(ha);
663
664 if (ha->flags.qla_watchdog_active) {
665 ha->qla_watchdog_exit = 1;
666
667 while (ha->qla_watchdog_exited == 0)
668 qla_mdelay(__func__, 1);
669 }
670
671 if (ha->flags.qla_callout_init)
672 callout_stop(&ha->tx_callout);
673
674 if (ha->ifp != NULL)
675 ether_ifdetach(ha->ifp);
676
677 ql_free_drvr_state_buffer(ha);
678 ql_free_sp_log_buffer(ha);
679 ql_free_dma(ha);
680 qla_free_parent_dma_tag(ha);
681
682 if (ha->mbx_handle)
683 (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
684
685 if (ha->mbx_irq)
686 (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
687 ha->mbx_irq);
688
689 for (i = 0; i < ha->hw.num_sds_rings; i++) {
690
691 if (ha->irq_vec[i].handle) {
692 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
693 ha->irq_vec[i].handle);
694 }
695
696 if (ha->irq_vec[i].irq) {
697 (void)bus_release_resource(dev, SYS_RES_IRQ,
698 ha->irq_vec[i].irq_rid,
699 ha->irq_vec[i].irq);
700 }
701
702 qla_free_tx_br(ha, &ha->tx_fp[i]);
703 }
704 qla_destroy_fp_taskqueues(ha);
705
706 if (ha->msix_count)
707 pci_release_msi(dev);
708
709 if (ha->pci_reg)
710 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
711 ha->pci_reg);
712
713 if (ha->pci_reg1)
714 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
715 ha->pci_reg1);
716
717 return;
718 }
719
720 /*
721 * DMA Related Functions
722 */
723
724 static void
qla_dmamap_callback(void * arg,bus_dma_segment_t * segs,int nsegs,int error)725 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
726 {
727 *((bus_addr_t *)arg) = 0;
728
729 if (error) {
730 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
731 return;
732 }
733
734 *((bus_addr_t *)arg) = segs[0].ds_addr;
735
736 return;
737 }
738
739 int
ql_alloc_dmabuf(qla_host_t * ha,qla_dma_t * dma_buf)740 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
741 {
742 int ret = 0;
743 device_t dev;
744 bus_addr_t b_addr;
745
746 dev = ha->pci_dev;
747
748 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
749
750 ret = bus_dma_tag_create(
751 ha->parent_tag,/* parent */
752 dma_buf->alignment,
753 ((bus_size_t)(1ULL << 32)),/* boundary */
754 BUS_SPACE_MAXADDR, /* lowaddr */
755 BUS_SPACE_MAXADDR, /* highaddr */
756 NULL, NULL, /* filter, filterarg */
757 dma_buf->size, /* maxsize */
758 1, /* nsegments */
759 dma_buf->size, /* maxsegsize */
760 0, /* flags */
761 NULL, NULL, /* lockfunc, lockarg */
762 &dma_buf->dma_tag);
763
764 if (ret) {
765 device_printf(dev, "%s: could not create dma tag\n", __func__);
766 goto ql_alloc_dmabuf_exit;
767 }
768 ret = bus_dmamem_alloc(dma_buf->dma_tag,
769 (void **)&dma_buf->dma_b,
770 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
771 &dma_buf->dma_map);
772 if (ret) {
773 bus_dma_tag_destroy(dma_buf->dma_tag);
774 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
775 goto ql_alloc_dmabuf_exit;
776 }
777
778 ret = bus_dmamap_load(dma_buf->dma_tag,
779 dma_buf->dma_map,
780 dma_buf->dma_b,
781 dma_buf->size,
782 qla_dmamap_callback,
783 &b_addr, BUS_DMA_NOWAIT);
784
785 if (ret || !b_addr) {
786 bus_dma_tag_destroy(dma_buf->dma_tag);
787 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
788 dma_buf->dma_map);
789 ret = -1;
790 goto ql_alloc_dmabuf_exit;
791 }
792
793 dma_buf->dma_addr = b_addr;
794
795 ql_alloc_dmabuf_exit:
796 QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
797 __func__, ret, (void *)dma_buf->dma_tag,
798 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
799 dma_buf->size));
800
801 return ret;
802 }
803
804 void
ql_free_dmabuf(qla_host_t * ha,qla_dma_t * dma_buf)805 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
806 {
807 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
808 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
809 bus_dma_tag_destroy(dma_buf->dma_tag);
810 }
811
812 static int
qla_alloc_parent_dma_tag(qla_host_t * ha)813 qla_alloc_parent_dma_tag(qla_host_t *ha)
814 {
815 int ret;
816 device_t dev;
817
818 dev = ha->pci_dev;
819
820 /*
821 * Allocate parent DMA Tag
822 */
823 ret = bus_dma_tag_create(
824 bus_get_dma_tag(dev), /* parent */
825 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
826 BUS_SPACE_MAXADDR, /* lowaddr */
827 BUS_SPACE_MAXADDR, /* highaddr */
828 NULL, NULL, /* filter, filterarg */
829 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
830 0, /* nsegments */
831 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
832 0, /* flags */
833 NULL, NULL, /* lockfunc, lockarg */
834 &ha->parent_tag);
835
836 if (ret) {
837 device_printf(dev, "%s: could not create parent dma tag\n",
838 __func__);
839 return (-1);
840 }
841
842 ha->flags.parent_tag = 1;
843
844 return (0);
845 }
846
847 static void
qla_free_parent_dma_tag(qla_host_t * ha)848 qla_free_parent_dma_tag(qla_host_t *ha)
849 {
850 if (ha->flags.parent_tag) {
851 bus_dma_tag_destroy(ha->parent_tag);
852 ha->flags.parent_tag = 0;
853 }
854 }
855
856 /*
857 * Name: qla_init_ifnet
858 * Function: Creates the Network Device Interface and Registers it with the O.S
859 */
860
861 static void
qla_init_ifnet(device_t dev,qla_host_t * ha)862 qla_init_ifnet(device_t dev, qla_host_t *ha)
863 {
864 struct ifnet *ifp;
865
866 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
867
868 ifp = ha->ifp = if_alloc(IFT_ETHER);
869
870 if (ifp == NULL)
871 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
872
873 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
874
875 ifp->if_baudrate = IF_Gbps(10);
876 ifp->if_capabilities = IFCAP_LINKSTATE;
877 ifp->if_mtu = ETHERMTU;
878
879 ifp->if_init = qla_init;
880 ifp->if_softc = ha;
881 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
882 ifp->if_ioctl = qla_ioctl;
883
884 ifp->if_transmit = qla_transmit;
885 ifp->if_qflush = qla_qflush;
886
887 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
888 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
889 IFQ_SET_READY(&ifp->if_snd);
890
891 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
892
893 ether_ifattach(ifp, qla_get_mac_addr(ha));
894
895 ifp->if_capabilities |= IFCAP_HWCSUM |
896 IFCAP_TSO4 |
897 IFCAP_TSO6 |
898 IFCAP_JUMBO_MTU |
899 IFCAP_VLAN_HWTAGGING |
900 IFCAP_VLAN_MTU |
901 IFCAP_VLAN_HWTSO |
902 IFCAP_LRO;
903
904 ifp->if_capenable = ifp->if_capabilities;
905
906 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
907
908 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
909
910 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
911 NULL);
912 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
913
914 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
915
916 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
917
918 return;
919 }
920
921 static void
qla_init_locked(qla_host_t * ha)922 qla_init_locked(qla_host_t *ha)
923 {
924 struct ifnet *ifp = ha->ifp;
925
926 ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0);
927
928 qla_stop(ha);
929
930 if (qla_alloc_xmt_bufs(ha) != 0)
931 return;
932
933 qla_confirm_9kb_enable(ha);
934
935 if (qla_alloc_rcv_bufs(ha) != 0)
936 return;
937
938 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
939
940 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
941 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
942
943 ha->stop_rcv = 0;
944 if (ql_init_hw_if(ha) == 0) {
945 ifp = ha->ifp;
946 ifp->if_drv_flags |= IFF_DRV_RUNNING;
947 ha->hw_vlan_tx_frames = 0;
948 ha->tx_tso_frames = 0;
949 ha->qla_interface_up = 1;
950 ql_update_link_state(ha);
951 } else {
952 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE)
953 ha->hw.sp_log_stop = -1;
954 }
955
956 ha->qla_watchdog_pause = 0;
957
958 return;
959 }
960
961 static void
qla_init(void * arg)962 qla_init(void *arg)
963 {
964 qla_host_t *ha;
965
966 ha = (qla_host_t *)arg;
967
968 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
969
970 if (QLA_LOCK(ha, __func__, -1, 0) != 0)
971 return;
972
973 qla_init_locked(ha);
974
975 QLA_UNLOCK(ha, __func__);
976
977 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
978 }
979
980 static int
qla_set_multi(qla_host_t * ha,uint32_t add_multi)981 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
982 {
983 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
984 struct ifmultiaddr *ifma;
985 int mcnt = 0;
986 struct ifnet *ifp = ha->ifp;
987 int ret = 0;
988
989 if_maddr_rlock(ifp);
990
991 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
992
993 if (ifma->ifma_addr->sa_family != AF_LINK)
994 continue;
995
996 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
997 break;
998
999 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1000 &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
1001
1002 mcnt++;
1003 }
1004
1005 if_maddr_runlock(ifp);
1006
1007 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1008 QLA_LOCK_NO_SLEEP) != 0)
1009 return (-1);
1010
1011 ql_sp_log(ha, 12, 4, ifp->if_drv_flags,
1012 (ifp->if_drv_flags & IFF_DRV_RUNNING),
1013 add_multi, (uint32_t)mcnt, 0);
1014
1015 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1016
1017 if (!add_multi) {
1018 ret = qla_hw_del_all_mcast(ha);
1019
1020 if (ret)
1021 device_printf(ha->pci_dev,
1022 "%s: qla_hw_del_all_mcast() failed\n",
1023 __func__);
1024 }
1025
1026 if (!ret)
1027 ret = ql_hw_set_multi(ha, mta, mcnt, 1);
1028
1029 }
1030
1031 QLA_UNLOCK(ha, __func__);
1032
1033 return (ret);
1034 }
1035
1036 static int
qla_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1037 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1038 {
1039 int ret = 0;
1040 struct ifreq *ifr = (struct ifreq *)data;
1041 struct ifaddr *ifa = (struct ifaddr *)data;
1042 qla_host_t *ha;
1043
1044 ha = (qla_host_t *)ifp->if_softc;
1045 if (ha->offline || ha->qla_initiate_recovery)
1046 return (ret);
1047
1048 switch (cmd) {
1049 case SIOCSIFADDR:
1050 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
1051 __func__, cmd));
1052
1053 if (ifa->ifa_addr->sa_family == AF_INET) {
1054
1055 ret = QLA_LOCK(ha, __func__,
1056 QLA_LOCK_DEFAULT_MS_TIMEOUT,
1057 QLA_LOCK_NO_SLEEP);
1058 if (ret)
1059 break;
1060
1061 ifp->if_flags |= IFF_UP;
1062
1063 ql_sp_log(ha, 8, 3, ifp->if_drv_flags,
1064 (ifp->if_drv_flags & IFF_DRV_RUNNING),
1065 ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0);
1066
1067 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1068 qla_init_locked(ha);
1069 }
1070
1071 QLA_UNLOCK(ha, __func__);
1072 QL_DPRINT4(ha, (ha->pci_dev,
1073 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
1074 __func__, cmd,
1075 ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
1076
1077 arp_ifinit(ifp, ifa);
1078 } else {
1079 ether_ioctl(ifp, cmd, data);
1080 }
1081 break;
1082
1083 case SIOCSIFMTU:
1084 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
1085 __func__, cmd));
1086
1087 if (ifr->ifr_mtu > QLA_MAX_MTU) {
1088 ret = EINVAL;
1089 } else {
1090 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1091 QLA_LOCK_NO_SLEEP);
1092
1093 if (ret)
1094 break;
1095
1096 ifp->if_mtu = ifr->ifr_mtu;
1097 ha->max_frame_size =
1098 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1099
1100 ql_sp_log(ha, 9, 4, ifp->if_drv_flags,
1101 (ifp->if_drv_flags & IFF_DRV_RUNNING),
1102 ha->max_frame_size, ifp->if_mtu, 0);
1103
1104 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1105 qla_init_locked(ha);
1106 }
1107
1108 if (ifp->if_mtu > ETHERMTU)
1109 ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
1110 else
1111 ha->std_replenish = QL_STD_REPLENISH_THRES;
1112
1113
1114 QLA_UNLOCK(ha, __func__);
1115 }
1116
1117 break;
1118
1119 case SIOCSIFFLAGS:
1120 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
1121 __func__, cmd));
1122
1123 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1124 QLA_LOCK_NO_SLEEP);
1125
1126 if (ret)
1127 break;
1128
1129 ql_sp_log(ha, 10, 4, ifp->if_drv_flags,
1130 (ifp->if_drv_flags & IFF_DRV_RUNNING),
1131 ha->if_flags, ifp->if_flags, 0);
1132
1133 if (ifp->if_flags & IFF_UP) {
1134
1135 ha->max_frame_size = ifp->if_mtu +
1136 ETHER_HDR_LEN + ETHER_CRC_LEN;
1137 qla_init_locked(ha);
1138
1139 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1140 if ((ifp->if_flags ^ ha->if_flags) &
1141 IFF_PROMISC) {
1142 ret = ql_set_promisc(ha);
1143 } else if ((ifp->if_flags ^ ha->if_flags) &
1144 IFF_ALLMULTI) {
1145 ret = ql_set_allmulti(ha);
1146 }
1147 }
1148 } else {
1149 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1150 qla_stop(ha);
1151 ha->if_flags = ifp->if_flags;
1152 }
1153
1154 QLA_UNLOCK(ha, __func__);
1155 break;
1156
1157 case SIOCADDMULTI:
1158 QL_DPRINT4(ha, (ha->pci_dev,
1159 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1160
1161 if (qla_set_multi(ha, 1))
1162 ret = EINVAL;
1163 break;
1164
1165 case SIOCDELMULTI:
1166 QL_DPRINT4(ha, (ha->pci_dev,
1167 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1168
1169 if (qla_set_multi(ha, 0))
1170 ret = EINVAL;
1171 break;
1172
1173 case SIOCSIFMEDIA:
1174 case SIOCGIFMEDIA:
1175 QL_DPRINT4(ha, (ha->pci_dev,
1176 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1177 __func__, cmd));
1178 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1179 break;
1180
1181 case SIOCSIFCAP:
1182 {
1183 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1184
1185 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1186 __func__, cmd));
1187
1188 if (mask & IFCAP_HWCSUM)
1189 ifp->if_capenable ^= IFCAP_HWCSUM;
1190 if (mask & IFCAP_TSO4)
1191 ifp->if_capenable ^= IFCAP_TSO4;
1192 if (mask & IFCAP_TSO6)
1193 ifp->if_capenable ^= IFCAP_TSO6;
1194 if (mask & IFCAP_VLAN_HWTAGGING)
1195 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1196 if (mask & IFCAP_VLAN_HWTSO)
1197 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1198 if (mask & IFCAP_LRO)
1199 ifp->if_capenable ^= IFCAP_LRO;
1200
1201 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1202 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1203 QLA_LOCK_NO_SLEEP);
1204
1205 if (ret)
1206 break;
1207
1208 ql_sp_log(ha, 11, 4, ifp->if_drv_flags,
1209 (ifp->if_drv_flags & IFF_DRV_RUNNING),
1210 mask, ifp->if_capenable, 0);
1211
1212 qla_init_locked(ha);
1213
1214 QLA_UNLOCK(ha, __func__);
1215
1216 }
1217 VLAN_CAPABILITIES(ifp);
1218 break;
1219 }
1220
1221 default:
1222 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1223 __func__, cmd));
1224 ret = ether_ioctl(ifp, cmd, data);
1225 break;
1226 }
1227
1228 return (ret);
1229 }
1230
1231 static int
qla_media_change(struct ifnet * ifp)1232 qla_media_change(struct ifnet *ifp)
1233 {
1234 qla_host_t *ha;
1235 struct ifmedia *ifm;
1236 int ret = 0;
1237
1238 ha = (qla_host_t *)ifp->if_softc;
1239
1240 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1241
1242 ifm = &ha->media;
1243
1244 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1245 ret = EINVAL;
1246
1247 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1248
1249 return (ret);
1250 }
1251
1252 static void
qla_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)1253 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1254 {
1255 qla_host_t *ha;
1256
1257 ha = (qla_host_t *)ifp->if_softc;
1258
1259 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1260
1261 ifmr->ifm_status = IFM_AVALID;
1262 ifmr->ifm_active = IFM_ETHER;
1263
1264 ql_update_link_state(ha);
1265 if (ha->hw.link_up) {
1266 ifmr->ifm_status |= IFM_ACTIVE;
1267 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1268 }
1269
1270 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1271 (ha->hw.link_up ? "link_up" : "link_down")));
1272
1273 return;
1274 }
1275
1276
1277 static int
qla_send(qla_host_t * ha,struct mbuf ** m_headp,uint32_t txr_idx,uint32_t iscsi_pdu)1278 qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
1279 uint32_t iscsi_pdu)
1280 {
1281 bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
1282 bus_dmamap_t map;
1283 int nsegs;
1284 int ret = -1;
1285 uint32_t tx_idx;
1286 struct mbuf *m_head = *m_headp;
1287
1288 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1289
1290 tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1291
1292 if ((NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) ||
1293 (QL_ERR_INJECT(ha, INJCT_TXBUF_MBUF_NON_NULL))){
1294 QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\
1295 "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\
1296 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
1297
1298 device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d "
1299 "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,
1300 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head);
1301
1302 if (m_head)
1303 m_freem(m_head);
1304 *m_headp = NULL;
1305 QL_INITIATE_RECOVERY(ha);
1306 return (ret);
1307 }
1308
1309 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1310
1311 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1312 BUS_DMA_NOWAIT);
1313
1314 if (ret == EFBIG) {
1315
1316 struct mbuf *m;
1317
1318 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1319 m_head->m_pkthdr.len));
1320
1321 m = m_defrag(m_head, M_NOWAIT);
1322 if (m == NULL) {
1323 ha->err_tx_defrag++;
1324 m_freem(m_head);
1325 *m_headp = NULL;
1326 device_printf(ha->pci_dev,
1327 "%s: m_defrag() = NULL [%d]\n",
1328 __func__, ret);
1329 return (ENOBUFS);
1330 }
1331 m_head = m;
1332 *m_headp = m_head;
1333
1334 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1335 segs, &nsegs, BUS_DMA_NOWAIT))) {
1336
1337 ha->err_tx_dmamap_load++;
1338
1339 device_printf(ha->pci_dev,
1340 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1341 __func__, ret, m_head->m_pkthdr.len);
1342
1343 if (ret != ENOMEM) {
1344 m_freem(m_head);
1345 *m_headp = NULL;
1346 }
1347 return (ret);
1348 }
1349
1350 } else if (ret) {
1351
1352 ha->err_tx_dmamap_load++;
1353
1354 device_printf(ha->pci_dev,
1355 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1356 __func__, ret, m_head->m_pkthdr.len);
1357
1358 if (ret != ENOMEM) {
1359 m_freem(m_head);
1360 *m_headp = NULL;
1361 }
1362 return (ret);
1363 }
1364
1365 QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1366
1367 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1368
1369 if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1370 iscsi_pdu))) {
1371 ha->tx_ring[txr_idx].count++;
1372 if (iscsi_pdu)
1373 ha->tx_ring[txr_idx].iscsi_pkt_count++;
1374 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1375 } else {
1376 bus_dmamap_unload(ha->tx_tag, map);
1377 if (ret == EINVAL) {
1378 if (m_head)
1379 m_freem(m_head);
1380 *m_headp = NULL;
1381 }
1382 }
1383
1384 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1385 return (ret);
1386 }
1387
1388 static int
qla_alloc_tx_br(qla_host_t * ha,qla_tx_fp_t * fp)1389 qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1390 {
1391 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
1392 "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
1393
1394 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
1395
1396 fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
1397 M_NOWAIT, &fp->tx_mtx);
1398 if (fp->tx_br == NULL) {
1399 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
1400 " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
1401 return (-ENOMEM);
1402 }
1403 return 0;
1404 }
1405
1406 static void
qla_free_tx_br(qla_host_t * ha,qla_tx_fp_t * fp)1407 qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1408 {
1409 struct mbuf *mp;
1410 struct ifnet *ifp = ha->ifp;
1411
1412 if (mtx_initialized(&fp->tx_mtx)) {
1413
1414 if (fp->tx_br != NULL) {
1415
1416 mtx_lock(&fp->tx_mtx);
1417
1418 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1419 m_freem(mp);
1420 }
1421
1422 mtx_unlock(&fp->tx_mtx);
1423
1424 buf_ring_free(fp->tx_br, M_DEVBUF);
1425 fp->tx_br = NULL;
1426 }
1427 mtx_destroy(&fp->tx_mtx);
1428 }
1429 return;
1430 }
1431
1432 static void
qla_fp_taskqueue(void * context,int pending)1433 qla_fp_taskqueue(void *context, int pending)
1434 {
1435 qla_tx_fp_t *fp;
1436 qla_host_t *ha;
1437 struct ifnet *ifp;
1438 struct mbuf *mp = NULL;
1439 int ret = 0;
1440 uint32_t txr_idx;
1441 uint32_t iscsi_pdu = 0;
1442 uint32_t rx_pkts_left = -1;
1443
1444 fp = context;
1445
1446 if (fp == NULL)
1447 return;
1448
1449 ha = (qla_host_t *)fp->ha;
1450
1451 ifp = ha->ifp;
1452
1453 txr_idx = fp->txr_idx;
1454
1455 mtx_lock(&fp->tx_mtx);
1456
1457 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
1458 mtx_unlock(&fp->tx_mtx);
1459 goto qla_fp_taskqueue_exit;
1460 }
1461
1462 while (rx_pkts_left && !ha->stop_rcv &&
1463 (ifp->if_drv_flags & IFF_DRV_RUNNING) && ha->hw.link_up) {
1464 rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
1465
1466 #ifdef QL_ENABLE_ISCSI_TLV
1467 ql_hw_tx_done_locked(ha, fp->txr_idx);
1468 ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
1469 #else
1470 ql_hw_tx_done_locked(ha, fp->txr_idx);
1471 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1472
1473 mp = drbr_peek(ifp, fp->tx_br);
1474
1475 while (mp != NULL) {
1476
1477 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
1478 #ifdef QL_ENABLE_ISCSI_TLV
1479 if (ql_iscsi_pdu(ha, mp) == 0) {
1480 txr_idx = txr_idx +
1481 (ha->hw.num_tx_rings >> 1);
1482 iscsi_pdu = 1;
1483 } else {
1484 iscsi_pdu = 0;
1485 txr_idx = fp->txr_idx;
1486 }
1487 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1488 }
1489
1490 ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
1491
1492 if (ret) {
1493 if (mp != NULL)
1494 drbr_putback(ifp, fp->tx_br, mp);
1495 else {
1496 drbr_advance(ifp, fp->tx_br);
1497 }
1498
1499 mtx_unlock(&fp->tx_mtx);
1500
1501 goto qla_fp_taskqueue_exit0;
1502 } else {
1503 drbr_advance(ifp, fp->tx_br);
1504 }
1505
1506 /* Send a copy of the frame to the BPF listener */
1507 ETHER_BPF_MTAP(ifp, mp);
1508
1509 if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
1510 (!ha->hw.link_up))
1511 break;
1512
1513 mp = drbr_peek(ifp, fp->tx_br);
1514 }
1515 }
1516 mtx_unlock(&fp->tx_mtx);
1517
1518 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1519 goto qla_fp_taskqueue_exit;
1520
1521 qla_fp_taskqueue_exit0:
1522
1523 if (rx_pkts_left || ((mp != NULL) && ret)) {
1524 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1525 } else {
1526 if (!ha->stop_rcv) {
1527 QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
1528 }
1529 }
1530
1531 qla_fp_taskqueue_exit:
1532
1533 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1534 return;
1535 }
1536
1537 static int
qla_create_fp_taskqueues(qla_host_t * ha)1538 qla_create_fp_taskqueues(qla_host_t *ha)
1539 {
1540 int i;
1541 uint8_t tq_name[32];
1542
1543 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1544
1545 qla_tx_fp_t *fp = &ha->tx_fp[i];
1546
1547 bzero(tq_name, sizeof (tq_name));
1548 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
1549
1550 TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
1551
1552 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
1553 taskqueue_thread_enqueue,
1554 &fp->fp_taskqueue);
1555
1556 if (fp->fp_taskqueue == NULL)
1557 return (-1);
1558
1559 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
1560 tq_name);
1561
1562 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
1563 fp->fp_taskqueue));
1564 }
1565
1566 return (0);
1567 }
1568
1569 static void
qla_destroy_fp_taskqueues(qla_host_t * ha)1570 qla_destroy_fp_taskqueues(qla_host_t *ha)
1571 {
1572 int i;
1573
1574 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1575
1576 qla_tx_fp_t *fp = &ha->tx_fp[i];
1577
1578 if (fp->fp_taskqueue != NULL) {
1579 taskqueue_drain_all(fp->fp_taskqueue);
1580 taskqueue_free(fp->fp_taskqueue);
1581 fp->fp_taskqueue = NULL;
1582 }
1583 }
1584 return;
1585 }
1586
1587 static void
qla_drain_fp_taskqueues(qla_host_t * ha)1588 qla_drain_fp_taskqueues(qla_host_t *ha)
1589 {
1590 int i;
1591
1592 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1593 qla_tx_fp_t *fp = &ha->tx_fp[i];
1594
1595 if (fp->fp_taskqueue != NULL) {
1596 taskqueue_drain_all(fp->fp_taskqueue);
1597 }
1598 }
1599 return;
1600 }
1601
1602 static int
qla_transmit(struct ifnet * ifp,struct mbuf * mp)1603 qla_transmit(struct ifnet *ifp, struct mbuf *mp)
1604 {
1605 qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1606 qla_tx_fp_t *fp;
1607 int rss_id = 0;
1608 int ret = 0;
1609
1610 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1611
1612 #if __FreeBSD_version >= 1100000
1613 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
1614 #else
1615 if (mp->m_flags & M_FLOWID)
1616 #endif
1617 rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
1618 ha->hw.num_sds_rings;
1619 fp = &ha->tx_fp[rss_id];
1620
1621 if (fp->tx_br == NULL) {
1622 ret = EINVAL;
1623 goto qla_transmit_exit;
1624 }
1625
1626 if (mp != NULL) {
1627 ret = drbr_enqueue(ifp, fp->tx_br, mp);
1628 }
1629
1630 if (fp->fp_taskqueue != NULL)
1631 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1632
1633 ret = 0;
1634
1635 qla_transmit_exit:
1636
1637 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1638 return ret;
1639 }
1640
1641 static void
qla_qflush(struct ifnet * ifp)1642 qla_qflush(struct ifnet *ifp)
1643 {
1644 int i;
1645 qla_tx_fp_t *fp;
1646 struct mbuf *mp;
1647 qla_host_t *ha;
1648
1649 ha = (qla_host_t *)ifp->if_softc;
1650
1651 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1652
1653 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1654
1655 fp = &ha->tx_fp[i];
1656
1657 if (fp == NULL)
1658 continue;
1659
1660 if (fp->tx_br) {
1661 mtx_lock(&fp->tx_mtx);
1662
1663 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1664 m_freem(mp);
1665 }
1666 mtx_unlock(&fp->tx_mtx);
1667 }
1668 }
1669 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1670
1671 return;
1672 }
1673
1674 static void
qla_stop(qla_host_t * ha)1675 qla_stop(qla_host_t *ha)
1676 {
1677 struct ifnet *ifp = ha->ifp;
1678 device_t dev;
1679 int i = 0;
1680
1681 ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0);
1682
1683 dev = ha->pci_dev;
1684
1685 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1686 ha->qla_watchdog_pause = 1;
1687
1688 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1689 qla_tx_fp_t *fp;
1690
1691 fp = &ha->tx_fp[i];
1692
1693 if (fp == NULL)
1694 continue;
1695
1696 if (fp->tx_br != NULL) {
1697 mtx_lock(&fp->tx_mtx);
1698 mtx_unlock(&fp->tx_mtx);
1699 }
1700 }
1701
1702 while (!ha->qla_watchdog_paused)
1703 qla_mdelay(__func__, 1);
1704
1705 ha->qla_interface_up = 0;
1706
1707 qla_drain_fp_taskqueues(ha);
1708
1709 ql_del_hw_if(ha);
1710
1711 qla_free_xmt_bufs(ha);
1712 qla_free_rcv_bufs(ha);
1713
1714 return;
1715 }
1716
1717 /*
1718 * Buffer Management Functions for Transmit and Receive Rings
1719 */
1720 static int
qla_alloc_xmt_bufs(qla_host_t * ha)1721 qla_alloc_xmt_bufs(qla_host_t *ha)
1722 {
1723 int ret = 0;
1724 uint32_t i, j;
1725 qla_tx_buf_t *txb;
1726
1727 if (bus_dma_tag_create(NULL, /* parent */
1728 1, 0, /* alignment, bounds */
1729 BUS_SPACE_MAXADDR, /* lowaddr */
1730 BUS_SPACE_MAXADDR, /* highaddr */
1731 NULL, NULL, /* filter, filterarg */
1732 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
1733 QLA_MAX_SEGMENTS, /* nsegments */
1734 PAGE_SIZE, /* maxsegsize */
1735 BUS_DMA_ALLOCNOW, /* flags */
1736 NULL, /* lockfunc */
1737 NULL, /* lockfuncarg */
1738 &ha->tx_tag)) {
1739 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1740 __func__);
1741 return (ENOMEM);
1742 }
1743
1744 for (i = 0; i < ha->hw.num_tx_rings; i++) {
1745 bzero((void *)ha->tx_ring[i].tx_buf,
1746 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1747 }
1748
1749 for (j = 0; j < ha->hw.num_tx_rings; j++) {
1750 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1751
1752 txb = &ha->tx_ring[j].tx_buf[i];
1753
1754 if ((ret = bus_dmamap_create(ha->tx_tag,
1755 BUS_DMA_NOWAIT, &txb->map))) {
1756
1757 ha->err_tx_dmamap_create++;
1758 device_printf(ha->pci_dev,
1759 "%s: bus_dmamap_create failed[%d]\n",
1760 __func__, ret);
1761
1762 qla_free_xmt_bufs(ha);
1763
1764 return (ret);
1765 }
1766 }
1767 }
1768
1769 return 0;
1770 }
1771
1772 /*
1773 * Release mbuf after it sent on the wire
1774 */
1775 static void
qla_clear_tx_buf(qla_host_t * ha,qla_tx_buf_t * txb)1776 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1777 {
1778 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1779
1780 if (txb->m_head) {
1781 bus_dmamap_sync(ha->tx_tag, txb->map,
1782 BUS_DMASYNC_POSTWRITE);
1783
1784 bus_dmamap_unload(ha->tx_tag, txb->map);
1785
1786 m_freem(txb->m_head);
1787 txb->m_head = NULL;
1788
1789 bus_dmamap_destroy(ha->tx_tag, txb->map);
1790 txb->map = NULL;
1791 }
1792
1793 if (txb->map) {
1794 bus_dmamap_unload(ha->tx_tag, txb->map);
1795 bus_dmamap_destroy(ha->tx_tag, txb->map);
1796 txb->map = NULL;
1797 }
1798
1799 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1800 }
1801
1802 static void
qla_free_xmt_bufs(qla_host_t * ha)1803 qla_free_xmt_bufs(qla_host_t *ha)
1804 {
1805 int i, j;
1806
1807 for (j = 0; j < ha->hw.num_tx_rings; j++) {
1808 for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1809 qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1810 }
1811
1812 if (ha->tx_tag != NULL) {
1813 bus_dma_tag_destroy(ha->tx_tag);
1814 ha->tx_tag = NULL;
1815 }
1816
1817 for (i = 0; i < ha->hw.num_tx_rings; i++) {
1818 bzero((void *)ha->tx_ring[i].tx_buf,
1819 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1820 }
1821 return;
1822 }
1823
1824
1825 static int
qla_alloc_rcv_std(qla_host_t * ha)1826 qla_alloc_rcv_std(qla_host_t *ha)
1827 {
1828 int i, j, k, r, ret = 0;
1829 qla_rx_buf_t *rxb;
1830 qla_rx_ring_t *rx_ring;
1831
1832 for (r = 0; r < ha->hw.num_rds_rings; r++) {
1833
1834 rx_ring = &ha->rx_ring[r];
1835
1836 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1837
1838 rxb = &rx_ring->rx_buf[i];
1839
1840 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1841 &rxb->map);
1842
1843 if (ret) {
1844 device_printf(ha->pci_dev,
1845 "%s: dmamap[%d, %d] failed\n",
1846 __func__, r, i);
1847
1848 for (k = 0; k < r; k++) {
1849 for (j = 0; j < NUM_RX_DESCRIPTORS;
1850 j++) {
1851 rxb = &ha->rx_ring[k].rx_buf[j];
1852 bus_dmamap_destroy(ha->rx_tag,
1853 rxb->map);
1854 }
1855 }
1856
1857 for (j = 0; j < i; j++) {
1858 bus_dmamap_destroy(ha->rx_tag,
1859 rx_ring->rx_buf[j].map);
1860 }
1861 goto qla_alloc_rcv_std_err;
1862 }
1863 }
1864 }
1865
1866 qla_init_hw_rcv_descriptors(ha);
1867
1868
1869 for (r = 0; r < ha->hw.num_rds_rings; r++) {
1870
1871 rx_ring = &ha->rx_ring[r];
1872
1873 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1874 rxb = &rx_ring->rx_buf[i];
1875 rxb->handle = i;
1876 if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1877 /*
1878 * set the physical address in the
1879 * corresponding descriptor entry in the
1880 * receive ring/queue for the hba
1881 */
1882 qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1883 rxb->paddr,
1884 (rxb->m_head)->m_pkthdr.len);
1885 } else {
1886 device_printf(ha->pci_dev,
1887 "%s: ql_get_mbuf [%d, %d] failed\n",
1888 __func__, r, i);
1889 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1890 goto qla_alloc_rcv_std_err;
1891 }
1892 }
1893 }
1894 return 0;
1895
1896 qla_alloc_rcv_std_err:
1897 return (-1);
1898 }
1899
1900 static void
qla_free_rcv_std(qla_host_t * ha)1901 qla_free_rcv_std(qla_host_t *ha)
1902 {
1903 int i, r;
1904 qla_rx_buf_t *rxb;
1905
1906 for (r = 0; r < ha->hw.num_rds_rings; r++) {
1907 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1908 rxb = &ha->rx_ring[r].rx_buf[i];
1909 if (rxb->m_head != NULL) {
1910 bus_dmamap_unload(ha->rx_tag, rxb->map);
1911 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1912 m_freem(rxb->m_head);
1913 rxb->m_head = NULL;
1914 }
1915 }
1916 }
1917 return;
1918 }
1919
1920 static int
qla_alloc_rcv_bufs(qla_host_t * ha)1921 qla_alloc_rcv_bufs(qla_host_t *ha)
1922 {
1923 int i, ret = 0;
1924
1925 if (bus_dma_tag_create(NULL, /* parent */
1926 1, 0, /* alignment, bounds */
1927 BUS_SPACE_MAXADDR, /* lowaddr */
1928 BUS_SPACE_MAXADDR, /* highaddr */
1929 NULL, NULL, /* filter, filterarg */
1930 MJUM9BYTES, /* maxsize */
1931 1, /* nsegments */
1932 MJUM9BYTES, /* maxsegsize */
1933 BUS_DMA_ALLOCNOW, /* flags */
1934 NULL, /* lockfunc */
1935 NULL, /* lockfuncarg */
1936 &ha->rx_tag)) {
1937
1938 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1939 __func__);
1940
1941 return (ENOMEM);
1942 }
1943
1944 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1945
1946 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1947 ha->hw.sds[i].sdsr_next = 0;
1948 ha->hw.sds[i].rxb_free = NULL;
1949 ha->hw.sds[i].rx_free = 0;
1950 }
1951
1952 ret = qla_alloc_rcv_std(ha);
1953
1954 return (ret);
1955 }
1956
1957 static void
qla_free_rcv_bufs(qla_host_t * ha)1958 qla_free_rcv_bufs(qla_host_t *ha)
1959 {
1960 int i;
1961
1962 qla_free_rcv_std(ha);
1963
1964 if (ha->rx_tag != NULL) {
1965 bus_dma_tag_destroy(ha->rx_tag);
1966 ha->rx_tag = NULL;
1967 }
1968
1969 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1970
1971 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1972 ha->hw.sds[i].sdsr_next = 0;
1973 ha->hw.sds[i].rxb_free = NULL;
1974 ha->hw.sds[i].rx_free = 0;
1975 }
1976
1977 return;
1978 }
1979
1980 int
ql_get_mbuf(qla_host_t * ha,qla_rx_buf_t * rxb,struct mbuf * nmp)1981 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1982 {
1983 register struct mbuf *mp = nmp;
1984 struct ifnet *ifp;
1985 int ret = 0;
1986 uint32_t offset;
1987 bus_dma_segment_t segs[1];
1988 int nsegs, mbuf_size;
1989
1990 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1991
1992 ifp = ha->ifp;
1993
1994 if (ha->hw.enable_9kb)
1995 mbuf_size = MJUM9BYTES;
1996 else
1997 mbuf_size = MCLBYTES;
1998
1999 if (mp == NULL) {
2000
2001 if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
2002 return(-1);
2003
2004 if (ha->hw.enable_9kb)
2005 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
2006 else
2007 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2008
2009 if (mp == NULL) {
2010 ha->err_m_getcl++;
2011 ret = ENOBUFS;
2012 device_printf(ha->pci_dev,
2013 "%s: m_getcl failed\n", __func__);
2014 goto exit_ql_get_mbuf;
2015 }
2016 mp->m_len = mp->m_pkthdr.len = mbuf_size;
2017 } else {
2018 mp->m_len = mp->m_pkthdr.len = mbuf_size;
2019 mp->m_data = mp->m_ext.ext_buf;
2020 mp->m_next = NULL;
2021 }
2022
2023 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
2024 if (offset) {
2025 offset = 8 - offset;
2026 m_adj(mp, offset);
2027 }
2028
2029 /*
2030 * Using memory from the mbuf cluster pool, invoke the bus_dma
2031 * machinery to arrange the memory mapping.
2032 */
2033 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
2034 mp, segs, &nsegs, BUS_DMA_NOWAIT);
2035 rxb->paddr = segs[0].ds_addr;
2036
2037 if (ret || !rxb->paddr || (nsegs != 1)) {
2038 m_free(mp);
2039 rxb->m_head = NULL;
2040 device_printf(ha->pci_dev,
2041 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
2042 __func__, ret, (long long unsigned int)rxb->paddr,
2043 nsegs);
2044 ret = -1;
2045 goto exit_ql_get_mbuf;
2046 }
2047 rxb->m_head = mp;
2048 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
2049
2050 exit_ql_get_mbuf:
2051 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
2052 return (ret);
2053 }
2054
2055
2056 static void
qla_get_peer(qla_host_t * ha)2057 qla_get_peer(qla_host_t *ha)
2058 {
2059 device_t *peers;
2060 int count, i, slot;
2061 int my_slot = pci_get_slot(ha->pci_dev);
2062
2063 if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
2064 return;
2065
2066 for (i = 0; i < count; i++) {
2067 slot = pci_get_slot(peers[i]);
2068
2069 if ((slot >= 0) && (slot == my_slot) &&
2070 (pci_get_device(peers[i]) ==
2071 pci_get_device(ha->pci_dev))) {
2072 if (ha->pci_dev != peers[i])
2073 ha->peer_dev = peers[i];
2074 }
2075 }
2076 }
2077
2078 static void
qla_send_msg_to_peer(qla_host_t * ha,uint32_t msg_to_peer)2079 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
2080 {
2081 qla_host_t *ha_peer;
2082
2083 if (ha->peer_dev) {
2084 if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
2085
2086 ha_peer->msg_from_peer = msg_to_peer;
2087 }
2088 }
2089 }
2090
2091 void
qla_set_error_recovery(qla_host_t * ha)2092 qla_set_error_recovery(qla_host_t *ha)
2093 {
2094 struct ifnet *ifp = ha->ifp;
2095
2096 if (!cold && ha->enable_error_recovery) {
2097 if (ifp)
2098 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2099 ha->qla_initiate_recovery = 1;
2100 } else
2101 ha->offline = 1;
2102 return;
2103 }
2104
2105 static void
qla_error_recovery(void * context,int pending)2106 qla_error_recovery(void *context, int pending)
2107 {
2108 qla_host_t *ha = context;
2109 uint32_t msecs_100 = 400;
2110 struct ifnet *ifp = ha->ifp;
2111 int i = 0;
2112
2113 device_printf(ha->pci_dev, "%s: enter\n", __func__);
2114 ha->hw.imd_compl = 1;
2115
2116 taskqueue_drain_all(ha->stats_tq);
2117 taskqueue_drain_all(ha->async_event_tq);
2118
2119 if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2120 return;
2121
2122 device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n",
2123 __func__, qla_get_usec_timestamp());
2124
2125 if (ha->qla_interface_up) {
2126
2127 qla_mdelay(__func__, 300);
2128
2129 //ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2130
2131 for (i = 0; i < ha->hw.num_sds_rings; i++) {
2132 qla_tx_fp_t *fp;
2133
2134 fp = &ha->tx_fp[i];
2135
2136 if (fp == NULL)
2137 continue;
2138
2139 if (fp->tx_br != NULL) {
2140 mtx_lock(&fp->tx_mtx);
2141 mtx_unlock(&fp->tx_mtx);
2142 }
2143 }
2144 }
2145
2146 qla_drain_fp_taskqueues(ha);
2147
2148 if ((ha->pci_func & 0x1) == 0) {
2149
2150 if (!ha->msg_from_peer) {
2151 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2152
2153 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
2154 msecs_100--)
2155 qla_mdelay(__func__, 100);
2156 }
2157
2158 ha->msg_from_peer = 0;
2159
2160 if (ha->enable_minidump)
2161 ql_minidump(ha);
2162
2163 if (ha->enable_driverstate_dump)
2164 ql_capture_drvr_state(ha);
2165
2166 if (ql_init_hw(ha)) {
2167 device_printf(ha->pci_dev,
2168 "%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2169 __func__, qla_get_usec_timestamp());
2170 ha->offline = 1;
2171 goto qla_error_recovery_exit;
2172 }
2173
2174 if (ha->qla_interface_up) {
2175 qla_free_xmt_bufs(ha);
2176 qla_free_rcv_bufs(ha);
2177 }
2178
2179 if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2180 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2181
2182 } else {
2183 if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
2184
2185 ha->msg_from_peer = 0;
2186
2187 if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2188 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2189 } else {
2190 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2191 }
2192
2193 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--)
2194 qla_mdelay(__func__, 100);
2195 ha->msg_from_peer = 0;
2196
2197 if (ha->enable_driverstate_dump)
2198 ql_capture_drvr_state(ha);
2199
2200 if (msecs_100 == 0) {
2201 device_printf(ha->pci_dev,
2202 "%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n",
2203 __func__, qla_get_usec_timestamp());
2204 ha->offline = 1;
2205 goto qla_error_recovery_exit;
2206 }
2207
2208 if (ql_init_hw(ha)) {
2209 device_printf(ha->pci_dev,
2210 "%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2211 __func__, qla_get_usec_timestamp());
2212 ha->offline = 1;
2213 goto qla_error_recovery_exit;
2214 }
2215
2216 if (ha->qla_interface_up) {
2217 qla_free_xmt_bufs(ha);
2218 qla_free_rcv_bufs(ha);
2219 }
2220 }
2221
2222 qla_mdelay(__func__, ha->ms_delay_after_init);
2223
2224 *((uint32_t *)&ha->hw.flags) = 0;
2225 ha->qla_initiate_recovery = 0;
2226
2227 if (ha->qla_interface_up) {
2228
2229 if (qla_alloc_xmt_bufs(ha) != 0) {
2230 ha->offline = 1;
2231 goto qla_error_recovery_exit;
2232 }
2233
2234 qla_confirm_9kb_enable(ha);
2235
2236 if (qla_alloc_rcv_bufs(ha) != 0) {
2237 ha->offline = 1;
2238 goto qla_error_recovery_exit;
2239 }
2240
2241 ha->stop_rcv = 0;
2242
2243 if (ql_init_hw_if(ha) == 0) {
2244 ifp = ha->ifp;
2245 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2246 ha->qla_watchdog_pause = 0;
2247 ql_update_link_state(ha);
2248 } else {
2249 ha->offline = 1;
2250
2251 if (ha->hw.sp_log_stop_events &
2252 Q8_SP_LOG_STOP_IF_START_FAILURE)
2253 ha->hw.sp_log_stop = -1;
2254 }
2255 } else {
2256 ha->qla_watchdog_pause = 0;
2257 }
2258
2259 qla_error_recovery_exit:
2260
2261 if (ha->offline ) {
2262 device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n",
2263 __func__, qla_get_usec_timestamp());
2264 if (ha->hw.sp_log_stop_events &
2265 Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE)
2266 ha->hw.sp_log_stop = -1;
2267 }
2268
2269
2270 QLA_UNLOCK(ha, __func__);
2271
2272 if (!ha->offline)
2273 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
2274 qla_watchdog, ha);
2275
2276 device_printf(ha->pci_dev,
2277 "%s: ts_usecs = %ld exit\n",
2278 __func__, qla_get_usec_timestamp());
2279 return;
2280 }
2281
2282 static void
qla_async_event(void * context,int pending)2283 qla_async_event(void *context, int pending)
2284 {
2285 qla_host_t *ha = context;
2286
2287 if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2288 return;
2289
2290 if (ha->async_event) {
2291 ha->async_event = 0;
2292 qla_hw_async_event(ha);
2293 }
2294
2295 QLA_UNLOCK(ha, __func__);
2296
2297 return;
2298 }
2299
2300 static void
qla_stats(void * context,int pending)2301 qla_stats(void *context, int pending)
2302 {
2303 qla_host_t *ha;
2304
2305 ha = context;
2306
2307 ql_get_stats(ha);
2308
2309 return;
2310 }
2311
2312