1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2013-2016 Qlogic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29 /*
30 * File: ql_ioctl.c
31 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37
38 #include "ql_os.h"
39 #include "ql_hw.h"
40 #include "ql_def.h"
41 #include "ql_inline.h"
42 #include "ql_glbl.h"
43 #include "ql_ioctl.h"
44 #include "ql_ver.h"
45 #include "ql_dbg.h"
46
47 static int ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log);
48 static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state);
49 static uint32_t ql_drvr_state_size(qla_host_t *ha);
50 static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
51 struct thread *td);
52
53 static struct cdevsw qla_cdevsw = {
54 .d_version = D_VERSION,
55 .d_ioctl = ql_eioctl,
56 .d_name = "qlcnic",
57 };
58
59 int
ql_make_cdev(qla_host_t * ha)60 ql_make_cdev(qla_host_t *ha)
61 {
62 ha->ioctl_dev = make_dev(&qla_cdevsw,
63 ha->ifp->if_dunit,
64 UID_ROOT,
65 GID_WHEEL,
66 0600,
67 "%s",
68 if_name(ha->ifp));
69
70 if (ha->ioctl_dev == NULL)
71 return (-1);
72
73 ha->ioctl_dev->si_drv1 = ha;
74
75 return (0);
76 }
77
78 void
ql_del_cdev(qla_host_t * ha)79 ql_del_cdev(qla_host_t *ha)
80 {
81 if (ha->ioctl_dev != NULL)
82 destroy_dev(ha->ioctl_dev);
83 return;
84 }
85
86 static int
ql_eioctl(struct cdev * dev,u_long cmd,caddr_t data,int fflag,struct thread * td)87 ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
88 struct thread *td)
89 {
90 qla_host_t *ha;
91 int rval = 0;
92 device_t pci_dev;
93 struct ifnet *ifp;
94 int count;
95
96 q80_offchip_mem_val_t val;
97 qla_rd_pci_ids_t *pci_ids;
98 qla_rd_fw_dump_t *fw_dump;
99 union {
100 qla_reg_val_t *rv;
101 qla_rd_flash_t *rdf;
102 qla_wr_flash_t *wrf;
103 qla_erase_flash_t *erf;
104 qla_offchip_mem_val_t *mem;
105 } u;
106
107
108 if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
109 return ENXIO;
110
111 pci_dev= ha->pci_dev;
112
113 switch(cmd) {
114
115 case QLA_RDWR_REG:
116
117 u.rv = (qla_reg_val_t *)data;
118
119 if (u.rv->direct) {
120 if (u.rv->rd) {
121 u.rv->val = READ_REG32(ha, u.rv->reg);
122 } else {
123 WRITE_REG32(ha, u.rv->reg, u.rv->val);
124 }
125 } else {
126 if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val,
127 u.rv->rd)))
128 rval = ENXIO;
129 }
130 break;
131
132 case QLA_RD_FLASH:
133
134 if (!ha->hw.flags.fdt_valid) {
135 rval = EIO;
136 break;
137 }
138
139 u.rdf = (qla_rd_flash_t *)data;
140 if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data)))
141 rval = ENXIO;
142 break;
143
144 case QLA_WR_FLASH:
145
146 ifp = ha->ifp;
147
148 if (ifp == NULL) {
149 rval = ENXIO;
150 break;
151 }
152
153 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
154 rval = ENXIO;
155 break;
156 }
157
158 if (!ha->hw.flags.fdt_valid) {
159 rval = EIO;
160 break;
161 }
162
163 u.wrf = (qla_wr_flash_t *)data;
164 if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size,
165 u.wrf->buffer))) {
166 printf("flash write failed[%d]\n", rval);
167 rval = ENXIO;
168 }
169 break;
170
171 case QLA_ERASE_FLASH:
172
173 ifp = ha->ifp;
174
175 if (ifp == NULL) {
176 rval = ENXIO;
177 break;
178 }
179
180 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
181 rval = ENXIO;
182 break;
183 }
184
185 if (!ha->hw.flags.fdt_valid) {
186 rval = EIO;
187 break;
188 }
189
190 u.erf = (qla_erase_flash_t *)data;
191 if ((rval = ql_erase_flash(ha, u.erf->off,
192 u.erf->size))) {
193 printf("flash erase failed[%d]\n", rval);
194 rval = ENXIO;
195 }
196 break;
197
198 case QLA_RDWR_MS_MEM:
199 u.mem = (qla_offchip_mem_val_t *)data;
200
201 if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val,
202 u.mem->rd)))
203 rval = ENXIO;
204 else {
205 u.mem->data_lo = val.data_lo;
206 u.mem->data_hi = val.data_hi;
207 u.mem->data_ulo = val.data_ulo;
208 u.mem->data_uhi = val.data_uhi;
209 }
210
211 break;
212
213 case QLA_RD_FW_DUMP_SIZE:
214
215 if (ha->hw.mdump_init == 0) {
216 rval = EINVAL;
217 break;
218 }
219
220 fw_dump = (qla_rd_fw_dump_t *)data;
221 fw_dump->minidump_size = ha->hw.mdump_buffer_size +
222 ha->hw.mdump_template_size;
223 fw_dump->pci_func = ha->pci_func;
224
225 break;
226
227 case QLA_RD_FW_DUMP:
228
229 if (ha->hw.mdump_init == 0) {
230 device_printf(pci_dev, "%s: minidump not initialized\n", __func__);
231 rval = EINVAL;
232 break;
233 }
234
235 fw_dump = (qla_rd_fw_dump_t *)data;
236
237 if ((fw_dump->minidump == NULL) ||
238 (fw_dump->minidump_size != (ha->hw.mdump_buffer_size +
239 ha->hw.mdump_template_size))) {
240 device_printf(pci_dev,
241 "%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__,
242 fw_dump->minidump, fw_dump->minidump_size,
243 (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size));
244 rval = EINVAL;
245 break;
246 }
247
248 if ((ha->pci_func & 0x1)) {
249 device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__);
250 rval = ENXIO;
251 break;
252 }
253
254 fw_dump->saved = 1;
255
256 if (ha->offline) {
257
258 if (ha->enable_minidump)
259 ql_minidump(ha);
260
261 fw_dump->saved = 0;
262 fw_dump->usec_ts = ha->hw.mdump_usec_ts;
263
264 if (!ha->hw.mdump_done) {
265 device_printf(pci_dev,
266 "%s: port offline minidump failed\n", __func__);
267 rval = ENXIO;
268 break;
269 }
270 } else {
271
272 #define QLA_LOCK_MDUMP_MS_TIMEOUT (QLA_LOCK_DEFAULT_MS_TIMEOUT * 5)
273 if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
274 if (!ha->hw.mdump_done) {
275 fw_dump->saved = 0;
276 QL_INITIATE_RECOVERY(ha);
277 device_printf(pci_dev, "%s: recovery initiated "
278 " to trigger minidump\n",
279 __func__);
280 }
281 QLA_UNLOCK(ha, __func__);
282 } else {
283 device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__);
284 rval = ENXIO;
285 break;
286 }
287
288 #define QLNX_DUMP_WAIT_SECS 30
289
290 count = QLNX_DUMP_WAIT_SECS * 1000;
291
292 while (count) {
293 if (ha->hw.mdump_done)
294 break;
295 qla_mdelay(__func__, 100);
296 count -= 100;
297 }
298
299 if (!ha->hw.mdump_done) {
300 device_printf(pci_dev,
301 "%s: port not offline minidump failed\n", __func__);
302 rval = ENXIO;
303 break;
304 }
305 fw_dump->usec_ts = ha->hw.mdump_usec_ts;
306
307 if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
308 ha->hw.mdump_done = 0;
309 QLA_UNLOCK(ha, __func__);
310 } else {
311 device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__);
312 rval = ENXIO;
313 break;
314 }
315 }
316
317 if ((rval = copyout(ha->hw.mdump_template,
318 fw_dump->minidump, ha->hw.mdump_template_size))) {
319 device_printf(pci_dev, "%s: template copyout failed\n", __func__);
320 rval = ENXIO;
321 break;
322 }
323
324 if ((rval = copyout(ha->hw.mdump_buffer,
325 ((uint8_t *)fw_dump->minidump +
326 ha->hw.mdump_template_size),
327 ha->hw.mdump_buffer_size))) {
328 device_printf(pci_dev, "%s: minidump copyout failed\n", __func__);
329 rval = ENXIO;
330 }
331 break;
332
333 case QLA_RD_DRVR_STATE:
334 rval = ql_drvr_state(ha, (qla_driver_state_t *)data);
335 break;
336
337 case QLA_RD_SLOWPATH_LOG:
338 rval = ql_slowpath_log(ha, (qla_sp_log_t *)data);
339 break;
340
341 case QLA_RD_PCI_IDS:
342 pci_ids = (qla_rd_pci_ids_t *)data;
343 pci_ids->ven_id = pci_get_vendor(pci_dev);
344 pci_ids->dev_id = pci_get_device(pci_dev);
345 pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev);
346 pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev);
347 pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1);
348 break;
349
350 default:
351 break;
352 }
353
354 return rval;
355 }
356
357
358
359 static int
ql_drvr_state(qla_host_t * ha,qla_driver_state_t * state)360 ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
361 {
362 int rval = 0;
363 uint32_t drvr_state_size;
364
365 drvr_state_size = ql_drvr_state_size(ha);
366
367 if (state->buffer == NULL) {
368 state->size = drvr_state_size;
369 return (0);
370 }
371
372 if (state->size < drvr_state_size)
373 return (ENXIO);
374
375 if (ha->hw.drvr_state == NULL)
376 return (ENOMEM);
377
378 ql_capture_drvr_state(ha);
379
380 rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size);
381
382 bzero(ha->hw.drvr_state, drvr_state_size);
383
384 return (rval);
385 }
386
387 static uint32_t
ql_drvr_state_size(qla_host_t * ha)388 ql_drvr_state_size(qla_host_t *ha)
389 {
390 uint32_t drvr_state_size;
391 uint32_t size;
392
393 size = sizeof (qla_drvr_state_hdr_t);
394 drvr_state_size = QL_ALIGN(size, 64);
395
396 size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
397 drvr_state_size += QL_ALIGN(size, 64);
398
399 size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
400 drvr_state_size += QL_ALIGN(size, 64);
401
402 size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
403 drvr_state_size += QL_ALIGN(size, 64);
404
405 size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings;
406 drvr_state_size += QL_ALIGN(size, 64);
407
408 size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings;
409 drvr_state_size += QL_ALIGN(size, 64);
410
411 size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS *
412 ha->hw.num_sds_rings;
413 drvr_state_size += QL_ALIGN(size, 64);
414
415 return (drvr_state_size);
416 }
417
418 static void
ql_get_tx_state(qla_host_t * ha,qla_drvr_state_tx_t * tx_state)419 ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state)
420 {
421 int i;
422
423 for (i = 0; i < ha->hw.num_tx_rings; i++) {
424 tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr;
425 tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr;
426 tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg;
427 tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id;
428 tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free;
429 tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next;
430 tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp;
431 tx_state++;
432 }
433 return;
434 }
435
436 static void
ql_get_rx_state(qla_host_t * ha,qla_drvr_state_rx_t * rx_state)437 ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state)
438 {
439 int i;
440
441 for (i = 0; i < ha->hw.num_rds_rings; i++) {
442 rx_state->prod_std = ha->hw.rds[i].prod_std;
443 rx_state->rx_next = ha->hw.rds[i].rx_next;
444 rx_state++;
445 }
446 return;
447 }
448
449 static void
ql_get_sds_state(qla_host_t * ha,qla_drvr_state_sds_t * sds_state)450 ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state)
451 {
452 int i;
453
454 for (i = 0; i < ha->hw.num_sds_rings; i++) {
455 sds_state->sdsr_next = ha->hw.sds[i].sdsr_next;
456 sds_state->sds_consumer = ha->hw.sds[i].sds_consumer;
457 sds_state++;
458 }
459 return;
460 }
461
462 void
ql_capture_drvr_state(qla_host_t * ha)463 ql_capture_drvr_state(qla_host_t *ha)
464 {
465 uint8_t *state_buffer;
466 uint8_t *ptr;
467 qla_drvr_state_hdr_t *hdr;
468 uint32_t size;
469 int i;
470
471 state_buffer = ha->hw.drvr_state;
472
473 if (state_buffer == NULL)
474 return;
475
476 hdr = (qla_drvr_state_hdr_t *)state_buffer;
477
478 hdr->saved = 0;
479
480 if (hdr->drvr_version_major) {
481 hdr->saved = 1;
482 return;
483 }
484
485 hdr->usec_ts = qla_get_usec_timestamp();
486
487 hdr->drvr_version_major = QLA_VERSION_MAJOR;
488 hdr->drvr_version_minor = QLA_VERSION_MINOR;
489 hdr->drvr_version_build = QLA_VERSION_BUILD;
490
491 bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN);
492
493 hdr->link_speed = ha->hw.link_speed;
494 hdr->cable_length = ha->hw.cable_length;
495 hdr->cable_oui = ha->hw.cable_oui;
496 hdr->link_up = ha->hw.link_up;
497 hdr->module_type = ha->hw.module_type;
498 hdr->link_faults = ha->hw.link_faults;
499 hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce;
500 hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce;
501
502 size = sizeof (qla_drvr_state_hdr_t);
503 hdr->tx_state_offset = QL_ALIGN(size, 64);
504
505 ptr = state_buffer + hdr->tx_state_offset;
506
507 ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr);
508
509 size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
510 hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64);
511 ptr = state_buffer + hdr->rx_state_offset;
512
513 ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr);
514
515 size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
516 hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64);
517 ptr = state_buffer + hdr->sds_state_offset;
518
519 ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr);
520
521 size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
522 hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64);
523 ptr = state_buffer + hdr->txr_offset;
524
525 hdr->num_tx_rings = ha->hw.num_tx_rings;
526 hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS;
527 hdr->txr_entries = NUM_TX_DESCRIPTORS;
528
529 size = hdr->num_tx_rings * hdr->txr_size;
530 bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
531
532 hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64);
533 ptr = state_buffer + hdr->rxr_offset;
534
535 hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS;
536 hdr->rxr_entries = NUM_RX_DESCRIPTORS;
537 hdr->num_rx_rings = ha->hw.num_rds_rings;
538
539 for (i = 0; i < ha->hw.num_rds_rings; i++) {
540 bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size);
541 ptr += hdr->rxr_size;
542 }
543
544 size = hdr->rxr_size * hdr->num_rx_rings;
545 hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64);
546 hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS;
547 hdr->sds_entries = NUM_STATUS_DESCRIPTORS;
548 hdr->num_sds_rings = ha->hw.num_sds_rings;
549
550 ptr = state_buffer + hdr->sds_offset;
551 for (i = 0; i < ha->hw.num_sds_rings; i++) {
552 bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size);
553 ptr += hdr->sds_ring_size;
554 }
555 return;
556 }
557
558 void
ql_alloc_drvr_state_buffer(qla_host_t * ha)559 ql_alloc_drvr_state_buffer(qla_host_t *ha)
560 {
561 uint32_t drvr_state_size;
562
563 drvr_state_size = ql_drvr_state_size(ha);
564
565 ha->hw.drvr_state = malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT);
566
567 if (ha->hw.drvr_state != NULL)
568 bzero(ha->hw.drvr_state, drvr_state_size);
569
570 return;
571 }
572
573 void
ql_free_drvr_state_buffer(qla_host_t * ha)574 ql_free_drvr_state_buffer(qla_host_t *ha)
575 {
576 if (ha->hw.drvr_state != NULL)
577 free(ha->hw.drvr_state, M_QLA83XXBUF);
578 return;
579 }
580
581 void
ql_sp_log(qla_host_t * ha,uint16_t fmtstr_idx,uint16_t num_params,uint32_t param0,uint32_t param1,uint32_t param2,uint32_t param3,uint32_t param4)582 ql_sp_log(qla_host_t *ha, uint16_t fmtstr_idx, uint16_t num_params,
583 uint32_t param0, uint32_t param1, uint32_t param2, uint32_t param3,
584 uint32_t param4)
585 {
586 qla_sp_log_entry_t *sp_e, *sp_log;
587
588 if (((sp_log = ha->hw.sp_log) == NULL) || ha->hw.sp_log_stop)
589 return;
590
591 mtx_lock(&ha->sp_log_lock);
592
593 sp_e = &sp_log[ha->hw.sp_log_index];
594
595 bzero(sp_e, sizeof (qla_sp_log_entry_t));
596
597 sp_e->fmtstr_idx = fmtstr_idx;
598 sp_e->num_params = num_params;
599
600 sp_e->usec_ts = qla_get_usec_timestamp();
601
602 sp_e->params[0] = param0;
603 sp_e->params[1] = param1;
604 sp_e->params[2] = param2;
605 sp_e->params[3] = param3;
606 sp_e->params[4] = param4;
607
608 ha->hw.sp_log_index = (ha->hw.sp_log_index + 1) & (NUM_LOG_ENTRIES - 1);
609
610 if (ha->hw.sp_log_num_entries < NUM_LOG_ENTRIES)
611 ha->hw.sp_log_num_entries++;
612
613 mtx_unlock(&ha->sp_log_lock);
614
615 return;
616 }
617
618 void
ql_alloc_sp_log_buffer(qla_host_t * ha)619 ql_alloc_sp_log_buffer(qla_host_t *ha)
620 {
621 uint32_t size;
622
623 size = (sizeof(qla_sp_log_entry_t)) * NUM_LOG_ENTRIES;
624
625 ha->hw.sp_log = malloc(size, M_QLA83XXBUF, M_NOWAIT);
626
627 if (ha->hw.sp_log != NULL)
628 bzero(ha->hw.sp_log, size);
629
630 ha->hw.sp_log_index = 0;
631 ha->hw.sp_log_num_entries = 0;
632
633 return;
634 }
635
636 void
ql_free_sp_log_buffer(qla_host_t * ha)637 ql_free_sp_log_buffer(qla_host_t *ha)
638 {
639 if (ha->hw.sp_log != NULL)
640 free(ha->hw.sp_log, M_QLA83XXBUF);
641 return;
642 }
643
644 static int
ql_slowpath_log(qla_host_t * ha,qla_sp_log_t * log)645 ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log)
646 {
647 int rval = 0;
648 uint32_t size;
649
650 if ((ha->hw.sp_log == NULL) || (log->buffer == NULL))
651 return (EINVAL);
652
653 size = (sizeof(qla_sp_log_entry_t) * NUM_LOG_ENTRIES);
654
655 mtx_lock(&ha->sp_log_lock);
656
657 rval = copyout(ha->hw.sp_log, log->buffer, size);
658
659 if (!rval) {
660 log->next_idx = ha->hw.sp_log_index;
661 log->num_entries = ha->hw.sp_log_num_entries;
662 }
663 device_printf(ha->pci_dev,
664 "%s: exit [rval = %d][%p, next_idx = %d, %d entries, %d bytes]\n",
665 __func__, rval, log->buffer, log->next_idx, log->num_entries, size);
666 mtx_unlock(&ha->sp_log_lock);
667
668 return (rval);
669 }
670
671