1 /*
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
5 * Support: <[email protected]>
6 *
7 * Authors: Sumit Saxena <[email protected]>
8 * Chandrakanth Patil <[email protected]>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met:
13 *
14 * 1. Redistributions of source code must retain the above copyright notice,
15 * this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright notice,
17 * this list of conditions and the following disclaimer in the documentation and/or other
18 * materials provided with the distribution.
19 * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20 * may be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 *
35 * The views and conclusions contained in the software and documentation are
36 * those of the authors and should not be interpreted as representing
37 * official policies,either expressed or implied, of the FreeBSD Project.
38 *
39 * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40 *
41 * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42 */
43
44 #include <sys/cdefs.h>
45 #include <sys/types.h>
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/module.h>
50 #include <sys/bus.h>
51 #include <sys/conf.h>
52 #include <sys/malloc.h>
53 #include <sys/sysctl.h>
54 #include <sys/uio.h>
55
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pci_private.h>
63
64 #include <cam/cam.h>
65 #include <cam/cam_ccb.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_periph.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 #include <cam/scsi/smp_all.h>
74 #include <sys/queue.h>
75 #include <sys/kthread.h>
76 #include "mpi3mr.h"
77 #include "mpi3mr_cam.h"
78 #include "mpi3mr_app.h"
79
80 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
81 U64 reply_dma);
82 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc);
83 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
84 struct mpi3mr_drvr_cmd *drvrcmd);
85 static void mpi3mr_flush_io(struct mpi3mr_softc *sc);
86 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
87 U32 reset_reason);
88 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
89 struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc);
90 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
91 struct mpi3mr_drvr_cmd *drv_cmd);
92 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
93 struct mpi3mr_drvr_cmd *drv_cmd);
94 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
95 struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx);
96 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc);
97 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc);
98 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code);
99
100 void
mpi3mr_hexdump(void * buf,int sz,int format)101 mpi3mr_hexdump(void *buf, int sz, int format)
102 {
103 int i;
104 U32 *buf_loc = (U32 *)buf;
105
106 for (i = 0; i < (sz / sizeof(U32)); i++) {
107 if ((i % format) == 0) {
108 if (i != 0)
109 printf("\n");
110 printf("%08x: ", (i * 4));
111 }
112 printf("%08x ", buf_loc[i]);
113 }
114 printf("\n");
115 }
116
117 void
init_completion(struct completion * completion)118 init_completion(struct completion *completion)
119 {
120 completion->done = 0;
121 }
122
123 void
complete(struct completion * completion)124 complete(struct completion *completion)
125 {
126 completion->done = 1;
127 wakeup(complete);
128 }
129
wait_for_completion_timeout(struct completion * completion,U32 timeout)130 void wait_for_completion_timeout(struct completion *completion,
131 U32 timeout)
132 {
133 U32 count = timeout * 1000;
134
135 while ((completion->done == 0) && count) {
136 DELAY(1000);
137 count--;
138 }
139
140 if (completion->done == 0) {
141 printf("%s: Command is timedout\n", __func__);
142 completion->done = 1;
143 }
144 }
wait_for_completion_timeout_tm(struct completion * completion,U32 timeout,struct mpi3mr_softc * sc)145 void wait_for_completion_timeout_tm(struct completion *completion,
146 U32 timeout, struct mpi3mr_softc *sc)
147 {
148 U32 count = timeout * 1000;
149
150 while ((completion->done == 0) && count) {
151 msleep(&sc->tm_chan, &sc->mpi3mr_mtx, PRIBIO,
152 "TM command", 1 * hz);
153 count--;
154 }
155
156 if (completion->done == 0) {
157 printf("%s: Command is timedout\n", __func__);
158 completion->done = 1;
159 }
160 }
161
162
163 void
poll_for_command_completion(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * cmd,U16 wait)164 poll_for_command_completion(struct mpi3mr_softc *sc,
165 struct mpi3mr_drvr_cmd *cmd, U16 wait)
166 {
167 int wait_time = wait * 1000;
168 while (wait_time) {
169 mpi3mr_complete_admin_cmd(sc);
170 if (cmd->state & MPI3MR_CMD_COMPLETE)
171 break;
172 DELAY(1000);
173 wait_time--;
174 }
175 }
176
177 /**
178 * mpi3mr_trigger_snapdump - triggers firmware snapdump
179 * @sc: Adapter instance reference
180 * @reason_code: reason code for the fault.
181 *
182 * This routine will trigger the snapdump and wait for it to
183 * complete or timeout before it returns.
184 * This will be called during initilaization time faults/resets/timeouts
185 * before soft reset invocation.
186 *
187 * Return: None.
188 */
189 static void
mpi3mr_trigger_snapdump(struct mpi3mr_softc * sc,U32 reason_code)190 mpi3mr_trigger_snapdump(struct mpi3mr_softc *sc, U32 reason_code)
191 {
192 U32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
193
194 mpi3mr_dprint(sc, MPI3MR_INFO, "snapdump triggered: reason code: %s\n",
195 mpi3mr_reset_rc_name(reason_code));
196
197 mpi3mr_set_diagsave(sc);
198 mpi3mr_issue_reset(sc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
199 reason_code);
200
201 do {
202 host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
203 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
204 break;
205 DELAY(100 * 1000);
206 } while (--timeout);
207
208 return;
209 }
210
211 /**
212 * mpi3mr_check_rh_fault_ioc - check reset history and fault
213 * controller
214 * @sc: Adapter instance reference
215 * @reason_code, reason code for the fault.
216 *
217 * This routine will fault the controller with
218 * the given reason code if it is not already in the fault or
219 * not asynchronosuly reset. This will be used to handle
220 * initilaization time faults/resets/timeout as in those cases
221 * immediate soft reset invocation is not required.
222 *
223 * Return: None.
224 */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc * sc,U32 reason_code)225 static void mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc *sc, U32 reason_code)
226 {
227 U32 ioc_status;
228
229 if (sc->unrecoverable) {
230 mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is unrecoverable\n");
231 return;
232 }
233
234 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
235 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
236 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
237 mpi3mr_print_fault_info(sc);
238 return;
239 }
240
241 mpi3mr_trigger_snapdump(sc, reason_code);
242
243 return;
244 }
245
mpi3mr_get_reply_virt_addr(struct mpi3mr_softc * sc,bus_addr_t phys_addr)246 static void * mpi3mr_get_reply_virt_addr(struct mpi3mr_softc *sc,
247 bus_addr_t phys_addr)
248 {
249 if (!phys_addr)
250 return NULL;
251 if ((phys_addr < sc->reply_buf_dma_min_address) ||
252 (phys_addr > sc->reply_buf_dma_max_address))
253 return NULL;
254
255 return sc->reply_buf + (phys_addr - sc->reply_buf_phys);
256 }
257
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_softc * sc,bus_addr_t phys_addr)258 static void * mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_softc *sc,
259 bus_addr_t phys_addr)
260 {
261 if (!phys_addr)
262 return NULL;
263 return sc->sense_buf + (phys_addr - sc->sense_buf_phys);
264 }
265
mpi3mr_repost_reply_buf(struct mpi3mr_softc * sc,U64 reply_dma)266 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
267 U64 reply_dma)
268 {
269 U32 old_idx = 0;
270
271 mtx_lock_spin(&sc->reply_free_q_lock);
272 old_idx = sc->reply_free_q_host_index;
273 sc->reply_free_q_host_index = ((sc->reply_free_q_host_index ==
274 (sc->reply_free_q_sz - 1)) ? 0 :
275 (sc->reply_free_q_host_index + 1));
276 sc->reply_free_q[old_idx] = reply_dma;
277 mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
278 sc->reply_free_q_host_index);
279 mtx_unlock_spin(&sc->reply_free_q_lock);
280 }
281
mpi3mr_repost_sense_buf(struct mpi3mr_softc * sc,U64 sense_buf_phys)282 static void mpi3mr_repost_sense_buf(struct mpi3mr_softc *sc,
283 U64 sense_buf_phys)
284 {
285 U32 old_idx = 0;
286
287 mtx_lock_spin(&sc->sense_buf_q_lock);
288 old_idx = sc->sense_buf_q_host_index;
289 sc->sense_buf_q_host_index = ((sc->sense_buf_q_host_index ==
290 (sc->sense_buf_q_sz - 1)) ? 0 :
291 (sc->sense_buf_q_host_index + 1));
292 sc->sense_buf_q[old_idx] = sense_buf_phys;
293 mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
294 sc->sense_buf_q_host_index);
295 mtx_unlock_spin(&sc->sense_buf_q_lock);
296
297 }
298
mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc * sc,struct mpi3mr_throttle_group_info * tg,U8 divert_value)299 void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc *sc,
300 struct mpi3mr_throttle_group_info *tg, U8 divert_value)
301 {
302 struct mpi3mr_target *target;
303
304 mtx_lock_spin(&sc->target_lock);
305 TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
306 if (target->throttle_group == tg)
307 target->io_divert = divert_value;
308 }
309 mtx_unlock_spin(&sc->target_lock);
310 }
311
312 /**
313 * mpi3mr_submit_admin_cmd - Submit request to admin queue
314 * @mrioc: Adapter reference
315 * @admin_req: MPI3 request
316 * @admin_req_sz: Request size
317 *
318 * Post the MPI3 request into admin request queue and
319 * inform the controller, if the queue is full return
320 * appropriate error.
321 *
322 * Return: 0 on success, non-zero on failure.
323 */
mpi3mr_submit_admin_cmd(struct mpi3mr_softc * sc,void * admin_req,U16 admin_req_sz)324 int mpi3mr_submit_admin_cmd(struct mpi3mr_softc *sc, void *admin_req,
325 U16 admin_req_sz)
326 {
327 U16 areq_pi = 0, areq_ci = 0, max_entries = 0;
328 int retval = 0;
329 U8 *areq_entry;
330
331 mtx_lock_spin(&sc->admin_req_lock);
332 areq_pi = sc->admin_req_pi;
333 areq_ci = sc->admin_req_ci;
334 max_entries = sc->num_admin_reqs;
335
336 if (sc->unrecoverable)
337 return -EFAULT;
338
339 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
340 (areq_pi == (max_entries - 1)))) {
341 printf(IOCNAME "AdminReqQ full condition detected\n",
342 sc->name);
343 retval = -EAGAIN;
344 goto out;
345 }
346 areq_entry = (U8 *)sc->admin_req + (areq_pi *
347 MPI3MR_AREQ_FRAME_SZ);
348 memset(areq_entry, 0, MPI3MR_AREQ_FRAME_SZ);
349 memcpy(areq_entry, (U8 *)admin_req, admin_req_sz);
350
351 if (++areq_pi == max_entries)
352 areq_pi = 0;
353 sc->admin_req_pi = areq_pi;
354
355 mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
356
357 out:
358 mtx_unlock_spin(&sc->admin_req_lock);
359 return retval;
360 }
361
362 /**
363 * mpi3mr_check_req_qfull - Check request queue is full or not
364 * @op_req_q: Operational reply queue info
365 *
366 * Return: true when queue full, false otherwise.
367 */
368 static inline bool
mpi3mr_check_req_qfull(struct mpi3mr_op_req_queue * op_req_q)369 mpi3mr_check_req_qfull(struct mpi3mr_op_req_queue *op_req_q)
370 {
371 U16 pi, ci, max_entries;
372 bool is_qfull = false;
373
374 pi = op_req_q->pi;
375 ci = op_req_q->ci;
376 max_entries = op_req_q->num_reqs;
377
378 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
379 is_qfull = true;
380
381 return is_qfull;
382 }
383
384 /**
385 * mpi3mr_submit_io - Post IO command to firmware
386 * @sc: Adapter instance reference
387 * @op_req_q: Operational Request queue reference
388 * @req: MPT request data
389 *
390 * This function submits IO command to firmware.
391 *
392 * Return: Nothing
393 */
mpi3mr_submit_io(struct mpi3mr_softc * sc,struct mpi3mr_op_req_queue * op_req_q,U8 * req)394 int mpi3mr_submit_io(struct mpi3mr_softc *sc,
395 struct mpi3mr_op_req_queue *op_req_q, U8 *req)
396 {
397 U16 pi, max_entries;
398 int retval = 0;
399 U8 *req_entry;
400 U16 req_sz = sc->facts.op_req_sz;
401 struct mpi3mr_irq_context *irq_ctx;
402
403 mtx_lock_spin(&op_req_q->q_lock);
404
405 pi = op_req_q->pi;
406 max_entries = op_req_q->num_reqs;
407 if (mpi3mr_check_req_qfull(op_req_q)) {
408 irq_ctx = &sc->irq_ctx[op_req_q->reply_qid - 1];
409 mpi3mr_complete_io_cmd(sc, irq_ctx);
410
411 if (mpi3mr_check_req_qfull(op_req_q)) {
412 printf(IOCNAME "OpReqQ full condition detected\n",
413 sc->name);
414 retval = -EBUSY;
415 goto out;
416 }
417 }
418
419 req_entry = (U8 *)op_req_q->q_base + (pi * req_sz);
420 memset(req_entry, 0, req_sz);
421 memcpy(req_entry, req, MPI3MR_AREQ_FRAME_SZ);
422 if (++pi == max_entries)
423 pi = 0;
424 op_req_q->pi = pi;
425
426 mpi3mr_atomic_inc(&sc->op_reply_q[op_req_q->reply_qid - 1].pend_ios);
427
428 mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(op_req_q->qid), op_req_q->pi);
429 if (sc->mpi3mr_debug & MPI3MR_TRACE) {
430 device_printf(sc->mpi3mr_dev, "IO submission: QID:%d PI:0x%x\n", op_req_q->qid, op_req_q->pi);
431 mpi3mr_hexdump(req_entry, MPI3MR_AREQ_FRAME_SZ, 8);
432 }
433
434 out:
435 mtx_unlock_spin(&op_req_q->q_lock);
436 return retval;
437 }
438
439 inline void
mpi3mr_add_sg_single(void * paddr,U8 flags,U32 length,bus_addr_t dma_addr)440 mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
441 bus_addr_t dma_addr)
442 {
443 Mpi3SGESimple_t *sgel = paddr;
444
445 sgel->Flags = flags;
446 sgel->Length = (length);
447 sgel->Address = (U64)dma_addr;
448 }
449
mpi3mr_build_zero_len_sge(void * paddr)450 void mpi3mr_build_zero_len_sge(void *paddr)
451 {
452 U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
453 MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_LIST);
454
455 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
456
457 }
458
mpi3mr_enable_interrupts(struct mpi3mr_softc * sc)459 void mpi3mr_enable_interrupts(struct mpi3mr_softc *sc)
460 {
461 sc->intr_enabled = 1;
462 }
463
mpi3mr_disable_interrupts(struct mpi3mr_softc * sc)464 void mpi3mr_disable_interrupts(struct mpi3mr_softc *sc)
465 {
466 sc->intr_enabled = 0;
467 }
468
469 void
mpi3mr_memaddr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)470 mpi3mr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
471 {
472 bus_addr_t *addr;
473
474 addr = arg;
475 *addr = segs[0].ds_addr;
476 }
477
mpi3mr_delete_op_reply_queue(struct mpi3mr_softc * sc,U16 qid)478 static int mpi3mr_delete_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
479 {
480 Mpi3DeleteReplyQueueRequest_t delq_req;
481 struct mpi3mr_op_reply_queue *op_reply_q;
482 int retval = 0;
483
484
485 op_reply_q = &sc->op_reply_q[qid - 1];
486
487 if (!op_reply_q->qid)
488 {
489 retval = -1;
490 printf(IOCNAME "Issue DelRepQ: called with invalid Reply QID\n",
491 sc->name);
492 goto out;
493 }
494
495 memset(&delq_req, 0, sizeof(delq_req));
496
497 mtx_lock(&sc->init_cmds.completion.lock);
498 if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
499 retval = -1;
500 printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
501 sc->name);
502 mtx_unlock(&sc->init_cmds.completion.lock);
503 goto out;
504 }
505
506 if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
507 retval = -1;
508 printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
509 sc->name);
510 goto out;
511 }
512 sc->init_cmds.state = MPI3MR_CMD_PENDING;
513 sc->init_cmds.is_waiting = 1;
514 sc->init_cmds.callback = NULL;
515 delq_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
516 delq_req.Function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
517 delq_req.QueueID = qid;
518
519 init_completion(&sc->init_cmds.completion);
520 retval = mpi3mr_submit_admin_cmd(sc, &delq_req, sizeof(delq_req));
521 if (retval) {
522 printf(IOCNAME "Issue DelRepQ: Admin Post failed\n",
523 sc->name);
524 goto out_unlock;
525 }
526 wait_for_completion_timeout(&sc->init_cmds.completion,
527 (MPI3MR_INTADMCMD_TIMEOUT));
528 if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
529 printf(IOCNAME "Issue DelRepQ: command timed out\n",
530 sc->name);
531 mpi3mr_check_rh_fault_ioc(sc,
532 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
533 sc->unrecoverable = 1;
534
535 retval = -1;
536 goto out_unlock;
537 }
538 if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
539 != MPI3_IOCSTATUS_SUCCESS ) {
540 printf(IOCNAME "Issue DelRepQ: Failed IOCStatus(0x%04x) "
541 " Loginfo(0x%08x) \n" , sc->name,
542 (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
543 sc->init_cmds.ioc_loginfo);
544 retval = -1;
545 goto out_unlock;
546 }
547 sc->irq_ctx[qid - 1].op_reply_q = NULL;
548
549 if (sc->op_reply_q[qid - 1].q_base_phys != 0)
550 bus_dmamap_unload(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base_dmamap);
551 if (sc->op_reply_q[qid - 1].q_base != NULL)
552 bus_dmamem_free(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base, sc->op_reply_q[qid - 1].q_base_dmamap);
553 if (sc->op_reply_q[qid - 1].q_base_tag != NULL)
554 bus_dma_tag_destroy(sc->op_reply_q[qid - 1].q_base_tag);
555
556 sc->op_reply_q[qid - 1].q_base = NULL;
557 sc->op_reply_q[qid - 1].qid = 0;
558 out_unlock:
559 sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
560 mtx_unlock(&sc->init_cmds.completion.lock);
561 out:
562 return retval;
563 }
564
565 /**
566 * mpi3mr_create_op_reply_queue - create operational reply queue
567 * @sc: Adapter instance reference
568 * @qid: operational reply queue id
569 *
570 * Create operatinal reply queue by issuing MPI request
571 * through admin queue.
572 *
573 * Return: 0 on success, non-zero on failure.
574 */
mpi3mr_create_op_reply_queue(struct mpi3mr_softc * sc,U16 qid)575 static int mpi3mr_create_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
576 {
577 Mpi3CreateReplyQueueRequest_t create_req;
578 struct mpi3mr_op_reply_queue *op_reply_q;
579 int retval = 0;
580 char q_lock_name[32];
581
582 op_reply_q = &sc->op_reply_q[qid - 1];
583
584 if (op_reply_q->qid)
585 {
586 retval = -1;
587 printf(IOCNAME "CreateRepQ: called for duplicate qid %d\n",
588 sc->name, op_reply_q->qid);
589 return retval;
590 }
591
592 op_reply_q->ci = 0;
593 if (pci_get_revid(sc->mpi3mr_dev) == SAS4116_CHIP_REV_A0)
594 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD_A0;
595 else
596 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
597
598 op_reply_q->qsz = op_reply_q->num_replies * sc->op_reply_sz;
599 op_reply_q->ephase = 1;
600
601 if (!op_reply_q->q_base) {
602 snprintf(q_lock_name, 32, "Reply Queue Lock[%d]", qid);
603 mtx_init(&op_reply_q->q_lock, q_lock_name, NULL, MTX_SPIN);
604
605 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
606 4, 0, /* algnmnt, boundary */
607 sc->dma_loaddr, /* lowaddr */
608 BUS_SPACE_MAXADDR, /* highaddr */
609 NULL, NULL, /* filter, filterarg */
610 op_reply_q->qsz, /* maxsize */
611 1, /* nsegments */
612 op_reply_q->qsz, /* maxsegsize */
613 0, /* flags */
614 NULL, NULL, /* lockfunc, lockarg */
615 &op_reply_q->q_base_tag)) {
616 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Operational reply DMA tag\n");
617 return (ENOMEM);
618 }
619
620 if (bus_dmamem_alloc(op_reply_q->q_base_tag, (void **)&op_reply_q->q_base,
621 BUS_DMA_NOWAIT, &op_reply_q->q_base_dmamap)) {
622 mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
623 return (ENOMEM);
624 }
625 bzero(op_reply_q->q_base, op_reply_q->qsz);
626 bus_dmamap_load(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap, op_reply_q->q_base, op_reply_q->qsz,
627 mpi3mr_memaddr_cb, &op_reply_q->q_base_phys, BUS_DMA_NOWAIT);
628 mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Reply queue ID: %d phys addr= %#016jx virt_addr: %pa size= %d\n",
629 qid, (uintmax_t)op_reply_q->q_base_phys, op_reply_q->q_base, op_reply_q->qsz);
630
631 if (!op_reply_q->q_base)
632 {
633 retval = -1;
634 printf(IOCNAME "CreateRepQ: memory alloc failed for qid %d\n",
635 sc->name, qid);
636 goto out;
637 }
638 }
639
640 memset(&create_req, 0, sizeof(create_req));
641
642 mtx_lock(&sc->init_cmds.completion.lock);
643 if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
644 retval = -1;
645 printf(IOCNAME "CreateRepQ: Init command is in use\n",
646 sc->name);
647 mtx_unlock(&sc->init_cmds.completion.lock);
648 goto out;
649 }
650
651 sc->init_cmds.state = MPI3MR_CMD_PENDING;
652 sc->init_cmds.is_waiting = 1;
653 sc->init_cmds.callback = NULL;
654 create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
655 create_req.Function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
656 create_req.QueueID = qid;
657 create_req.Flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
658 create_req.MSIxIndex = sc->irq_ctx[qid - 1].msix_index;
659 create_req.BaseAddress = (U64)op_reply_q->q_base_phys;
660 create_req.Size = op_reply_q->num_replies;
661
662 init_completion(&sc->init_cmds.completion);
663 retval = mpi3mr_submit_admin_cmd(sc, &create_req,
664 sizeof(create_req));
665 if (retval) {
666 printf(IOCNAME "CreateRepQ: Admin Post failed\n",
667 sc->name);
668 goto out_unlock;
669 }
670
671 wait_for_completion_timeout(&sc->init_cmds.completion,
672 MPI3MR_INTADMCMD_TIMEOUT);
673 if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
674 printf(IOCNAME "CreateRepQ: command timed out\n",
675 sc->name);
676 mpi3mr_check_rh_fault_ioc(sc,
677 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
678 sc->unrecoverable = 1;
679 retval = -1;
680 goto out_unlock;
681 }
682
683 if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
684 != MPI3_IOCSTATUS_SUCCESS ) {
685 printf(IOCNAME "CreateRepQ: Failed IOCStatus(0x%04x) "
686 " Loginfo(0x%08x) \n" , sc->name,
687 (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
688 sc->init_cmds.ioc_loginfo);
689 retval = -1;
690 goto out_unlock;
691 }
692 op_reply_q->qid = qid;
693 sc->irq_ctx[qid - 1].op_reply_q = op_reply_q;
694
695 out_unlock:
696 sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
697 mtx_unlock(&sc->init_cmds.completion.lock);
698 out:
699 if (retval) {
700 if (op_reply_q->q_base_phys != 0)
701 bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
702 if (op_reply_q->q_base != NULL)
703 bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
704 if (op_reply_q->q_base_tag != NULL)
705 bus_dma_tag_destroy(op_reply_q->q_base_tag);
706 op_reply_q->q_base = NULL;
707 op_reply_q->qid = 0;
708 }
709
710 return retval;
711 }
712
713 /**
714 * mpi3mr_create_op_req_queue - create operational request queue
715 * @sc: Adapter instance reference
716 * @req_qid: operational request queue id
717 * @reply_qid: Reply queue ID
718 *
719 * Create operatinal request queue by issuing MPI request
720 * through admin queue.
721 *
722 * Return: 0 on success, non-zero on failure.
723 */
mpi3mr_create_op_req_queue(struct mpi3mr_softc * sc,U16 req_qid,U8 reply_qid)724 static int mpi3mr_create_op_req_queue(struct mpi3mr_softc *sc, U16 req_qid, U8 reply_qid)
725 {
726 Mpi3CreateRequestQueueRequest_t create_req;
727 struct mpi3mr_op_req_queue *op_req_q;
728 int retval = 0;
729 char q_lock_name[32];
730
731 op_req_q = &sc->op_req_q[req_qid - 1];
732
733 if (op_req_q->qid)
734 {
735 retval = -1;
736 printf(IOCNAME "CreateReqQ: called for duplicate qid %d\n",
737 sc->name, op_req_q->qid);
738 return retval;
739 }
740
741 op_req_q->ci = 0;
742 op_req_q->pi = 0;
743 op_req_q->num_reqs = MPI3MR_OP_REQ_Q_QD;
744 op_req_q->qsz = op_req_q->num_reqs * sc->facts.op_req_sz;
745 op_req_q->reply_qid = reply_qid;
746
747 if (!op_req_q->q_base) {
748 snprintf(q_lock_name, 32, "Request Queue Lock[%d]", req_qid);
749 mtx_init(&op_req_q->q_lock, q_lock_name, NULL, MTX_SPIN);
750
751 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
752 4, 0, /* algnmnt, boundary */
753 sc->dma_loaddr, /* lowaddr */
754 BUS_SPACE_MAXADDR, /* highaddr */
755 NULL, NULL, /* filter, filterarg */
756 op_req_q->qsz, /* maxsize */
757 1, /* nsegments */
758 op_req_q->qsz, /* maxsegsize */
759 0, /* flags */
760 NULL, NULL, /* lockfunc, lockarg */
761 &op_req_q->q_base_tag)) {
762 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
763 return (ENOMEM);
764 }
765
766 if (bus_dmamem_alloc(op_req_q->q_base_tag, (void **)&op_req_q->q_base,
767 BUS_DMA_NOWAIT, &op_req_q->q_base_dmamap)) {
768 mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
769 return (ENOMEM);
770 }
771
772 bzero(op_req_q->q_base, op_req_q->qsz);
773
774 bus_dmamap_load(op_req_q->q_base_tag, op_req_q->q_base_dmamap, op_req_q->q_base, op_req_q->qsz,
775 mpi3mr_memaddr_cb, &op_req_q->q_base_phys, BUS_DMA_NOWAIT);
776
777 mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Request QID: %d phys addr= %#016jx virt addr= %pa size= %d associated Reply QID: %d\n",
778 req_qid, (uintmax_t)op_req_q->q_base_phys, op_req_q->q_base, op_req_q->qsz, reply_qid);
779
780 if (!op_req_q->q_base) {
781 retval = -1;
782 printf(IOCNAME "CreateReqQ: memory alloc failed for qid %d\n",
783 sc->name, req_qid);
784 goto out;
785 }
786 }
787
788 memset(&create_req, 0, sizeof(create_req));
789
790 mtx_lock(&sc->init_cmds.completion.lock);
791 if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
792 retval = -1;
793 printf(IOCNAME "CreateReqQ: Init command is in use\n",
794 sc->name);
795 mtx_unlock(&sc->init_cmds.completion.lock);
796 goto out;
797 }
798
799 sc->init_cmds.state = MPI3MR_CMD_PENDING;
800 sc->init_cmds.is_waiting = 1;
801 sc->init_cmds.callback = NULL;
802 create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
803 create_req.Function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
804 create_req.QueueID = req_qid;
805 create_req.Flags = 0;
806 create_req.ReplyQueueID = reply_qid;
807 create_req.BaseAddress = (U64)op_req_q->q_base_phys;
808 create_req.Size = op_req_q->num_reqs;
809
810 init_completion(&sc->init_cmds.completion);
811 retval = mpi3mr_submit_admin_cmd(sc, &create_req,
812 sizeof(create_req));
813 if (retval) {
814 printf(IOCNAME "CreateReqQ: Admin Post failed\n",
815 sc->name);
816 goto out_unlock;
817 }
818
819 wait_for_completion_timeout(&sc->init_cmds.completion,
820 (MPI3MR_INTADMCMD_TIMEOUT));
821
822 if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
823 printf(IOCNAME "CreateReqQ: command timed out\n",
824 sc->name);
825 mpi3mr_check_rh_fault_ioc(sc,
826 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
827 sc->unrecoverable = 1;
828 retval = -1;
829 goto out_unlock;
830 }
831
832 if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
833 != MPI3_IOCSTATUS_SUCCESS ) {
834 printf(IOCNAME "CreateReqQ: Failed IOCStatus(0x%04x) "
835 " Loginfo(0x%08x) \n" , sc->name,
836 (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
837 sc->init_cmds.ioc_loginfo);
838 retval = -1;
839 goto out_unlock;
840 }
841 op_req_q->qid = req_qid;
842
843 out_unlock:
844 sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
845 mtx_unlock(&sc->init_cmds.completion.lock);
846 out:
847 if (retval) {
848 if (op_req_q->q_base_phys != 0)
849 bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
850 if (op_req_q->q_base != NULL)
851 bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
852 if (op_req_q->q_base_tag != NULL)
853 bus_dma_tag_destroy(op_req_q->q_base_tag);
854 op_req_q->q_base = NULL;
855 op_req_q->qid = 0;
856 }
857 return retval;
858 }
859
860 /**
861 * mpi3mr_create_op_queues - create operational queues
862 * @sc: Adapter instance reference
863 *
864 * Create operatinal queues(request queues and reply queues).
865 * Return: 0 on success, non-zero on failure.
866 */
mpi3mr_create_op_queues(struct mpi3mr_softc * sc)867 static int mpi3mr_create_op_queues(struct mpi3mr_softc *sc)
868 {
869 int retval = 0;
870 U16 num_queues = 0, i = 0, qid;
871
872 num_queues = min(sc->facts.max_op_reply_q,
873 sc->facts.max_op_req_q);
874 num_queues = min(num_queues, sc->msix_count);
875
876 /*
877 * During reset set the num_queues to the number of queues
878 * that was set before the reset.
879 */
880 if (sc->num_queues)
881 num_queues = sc->num_queues;
882
883 mpi3mr_dprint(sc, MPI3MR_XINFO, "Trying to create %d Operational Q pairs\n",
884 num_queues);
885
886 if (!sc->op_req_q) {
887 sc->op_req_q = malloc(sizeof(struct mpi3mr_op_req_queue) *
888 num_queues, M_MPI3MR, M_NOWAIT | M_ZERO);
889
890 if (!sc->op_req_q) {
891 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Request queue info\n");
892 retval = -1;
893 goto out_failed;
894 }
895 }
896
897 if (!sc->op_reply_q) {
898 sc->op_reply_q = malloc(sizeof(struct mpi3mr_op_reply_queue) * num_queues,
899 M_MPI3MR, M_NOWAIT | M_ZERO);
900
901 if (!sc->op_reply_q) {
902 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Reply queue info\n");
903 retval = -1;
904 goto out_failed;
905 }
906 }
907
908 sc->num_hosttag_op_req_q = (sc->max_host_ios + 1) / num_queues;
909
910 /*Operational Request and reply queue ID starts with 1*/
911 for (i = 0; i < num_queues; i++) {
912 qid = i + 1;
913 if (mpi3mr_create_op_reply_queue(sc, qid)) {
914 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Reply queue %d\n",
915 qid);
916 break;
917 }
918 if (mpi3mr_create_op_req_queue(sc, qid,
919 sc->op_reply_q[qid - 1].qid)) {
920 mpi3mr_delete_op_reply_queue(sc, qid);
921 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Request queue %d\n",
922 qid);
923 break;
924 }
925
926 }
927
928 /* Not even one queue is created successfully*/
929 if (i == 0) {
930 retval = -1;
931 goto out_failed;
932 }
933
934 if (!sc->num_queues) {
935 sc->num_queues = i;
936 } else {
937 if (num_queues != i) {
938 mpi3mr_dprint(sc, MPI3MR_ERROR, "Number of queues (%d) post reset are not same as"
939 "queues allocated (%d) during driver init\n", i, num_queues);
940 goto out_failed;
941 }
942 }
943
944 mpi3mr_dprint(sc, MPI3MR_INFO, "Successfully created %d Operational Queue pairs\n",
945 sc->num_queues);
946 mpi3mr_dprint(sc, MPI3MR_INFO, "Request Queue QD: %d Reply queue QD: %d\n",
947 sc->op_req_q[0].num_reqs, sc->op_reply_q[0].num_replies);
948
949 return retval;
950 out_failed:
951 if (sc->op_req_q) {
952 free(sc->op_req_q, M_MPI3MR);
953 sc->op_req_q = NULL;
954 }
955 if (sc->op_reply_q) {
956 free(sc->op_reply_q, M_MPI3MR);
957 sc->op_reply_q = NULL;
958 }
959 return retval;
960 }
961
962 /**
963 * mpi3mr_setup_admin_qpair - Setup admin queue pairs
964 * @sc: Adapter instance reference
965 *
966 * Allocation and setup admin queues(request queues and reply queues).
967 * Return: 0 on success, non-zero on failure.
968 */
mpi3mr_setup_admin_qpair(struct mpi3mr_softc * sc)969 static int mpi3mr_setup_admin_qpair(struct mpi3mr_softc *sc)
970 {
971 int retval = 0;
972 U32 num_adm_entries = 0;
973
974 sc->admin_req_q_sz = MPI3MR_AREQQ_SIZE;
975 sc->num_admin_reqs = sc->admin_req_q_sz / MPI3MR_AREQ_FRAME_SZ;
976 sc->admin_req_ci = sc->admin_req_pi = 0;
977
978 sc->admin_reply_q_sz = MPI3MR_AREPQ_SIZE;
979 sc->num_admin_replies = sc->admin_reply_q_sz/ MPI3MR_AREP_FRAME_SZ;
980 sc->admin_reply_ci = 0;
981 sc->admin_reply_ephase = 1;
982
983 if (!sc->admin_req) {
984 /*
985 * We need to create the tag for the admin queue to get the
986 * iofacts to see how many bits the controller decodes. Solve
987 * this chicken and egg problem by only doing lower 4GB DMA.
988 */
989 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
990 4, 0, /* algnmnt, boundary */
991 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
992 BUS_SPACE_MAXADDR, /* highaddr */
993 NULL, NULL, /* filter, filterarg */
994 sc->admin_req_q_sz, /* maxsize */
995 1, /* nsegments */
996 sc->admin_req_q_sz, /* maxsegsize */
997 0, /* flags */
998 NULL, NULL, /* lockfunc, lockarg */
999 &sc->admin_req_tag)) {
1000 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1001 return (ENOMEM);
1002 }
1003
1004 if (bus_dmamem_alloc(sc->admin_req_tag, (void **)&sc->admin_req,
1005 BUS_DMA_NOWAIT, &sc->admin_req_dmamap)) {
1006 mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
1007 return (ENOMEM);
1008 }
1009 bzero(sc->admin_req, sc->admin_req_q_sz);
1010 bus_dmamap_load(sc->admin_req_tag, sc->admin_req_dmamap, sc->admin_req, sc->admin_req_q_sz,
1011 mpi3mr_memaddr_cb, &sc->admin_req_phys, BUS_DMA_NOWAIT);
1012 mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Req queue phys addr= %#016jx size= %d\n",
1013 (uintmax_t)sc->admin_req_phys, sc->admin_req_q_sz);
1014
1015 if (!sc->admin_req)
1016 {
1017 retval = -1;
1018 printf(IOCNAME "Memory alloc for AdminReqQ: failed\n",
1019 sc->name);
1020 goto out_failed;
1021 }
1022 }
1023
1024 if (!sc->admin_reply) {
1025 mtx_init(&sc->admin_reply_lock, "Admin Reply Queue Lock", NULL, MTX_SPIN);
1026
1027 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
1028 4, 0, /* algnmnt, boundary */
1029 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1030 BUS_SPACE_MAXADDR, /* highaddr */
1031 NULL, NULL, /* filter, filterarg */
1032 sc->admin_reply_q_sz, /* maxsize */
1033 1, /* nsegments */
1034 sc->admin_reply_q_sz, /* maxsegsize */
1035 0, /* flags */
1036 NULL, NULL, /* lockfunc, lockarg */
1037 &sc->admin_reply_tag)) {
1038 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply DMA tag\n");
1039 return (ENOMEM);
1040 }
1041
1042 if (bus_dmamem_alloc(sc->admin_reply_tag, (void **)&sc->admin_reply,
1043 BUS_DMA_NOWAIT, &sc->admin_reply_dmamap)) {
1044 mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
1045 return (ENOMEM);
1046 }
1047 bzero(sc->admin_reply, sc->admin_reply_q_sz);
1048 bus_dmamap_load(sc->admin_reply_tag, sc->admin_reply_dmamap, sc->admin_reply, sc->admin_reply_q_sz,
1049 mpi3mr_memaddr_cb, &sc->admin_reply_phys, BUS_DMA_NOWAIT);
1050 mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Reply queue phys addr= %#016jx size= %d\n",
1051 (uintmax_t)sc->admin_reply_phys, sc->admin_req_q_sz);
1052
1053
1054 if (!sc->admin_reply)
1055 {
1056 retval = -1;
1057 printf(IOCNAME "Memory alloc for AdminRepQ: failed\n",
1058 sc->name);
1059 goto out_failed;
1060 }
1061 }
1062
1063 num_adm_entries = (sc->num_admin_replies << 16) |
1064 (sc->num_admin_reqs);
1065 mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET, num_adm_entries);
1066 mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET, sc->admin_req_phys);
1067 mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET, sc->admin_reply_phys);
1068 mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
1069 mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, sc->admin_reply_ci);
1070
1071 return retval;
1072
1073 out_failed:
1074 /* Free Admin reply*/
1075 if (sc->admin_reply_phys)
1076 bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
1077
1078 if (sc->admin_reply != NULL)
1079 bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
1080 sc->admin_reply_dmamap);
1081
1082 if (sc->admin_reply_tag != NULL)
1083 bus_dma_tag_destroy(sc->admin_reply_tag);
1084
1085 /* Free Admin request*/
1086 if (sc->admin_req_phys)
1087 bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
1088
1089 if (sc->admin_req != NULL)
1090 bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
1091 sc->admin_req_dmamap);
1092
1093 if (sc->admin_req_tag != NULL)
1094 bus_dma_tag_destroy(sc->admin_req_tag);
1095
1096 return retval;
1097 }
1098
1099 /**
1100 * mpi3mr_print_fault_info - Display fault information
1101 * @sc: Adapter instance reference
1102 *
1103 * Display the controller fault information if there is a
1104 * controller fault.
1105 *
1106 * Return: Nothing.
1107 */
mpi3mr_print_fault_info(struct mpi3mr_softc * sc)1108 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc)
1109 {
1110 U32 ioc_status, code, code1, code2, code3;
1111
1112 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1113
1114 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1115 code = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
1116 MPI3_SYSIF_FAULT_CODE_MASK;
1117 code1 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO0_OFFSET);
1118 code2 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO1_OFFSET);
1119 code3 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO2_OFFSET);
1120 printf(IOCNAME "fault codes 0x%04x:0x%04x:0x%04x:0x%04x\n",
1121 sc->name, code, code1, code2, code3);
1122 }
1123 }
1124
mpi3mr_get_iocstate(struct mpi3mr_softc * sc)1125 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_softc *sc)
1126 {
1127 U32 ioc_status, ioc_control;
1128 U8 ready, enabled;
1129
1130 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1131 ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1132
1133 if(sc->unrecoverable)
1134 return MRIOC_STATE_UNRECOVERABLE;
1135 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1136 return MRIOC_STATE_FAULT;
1137
1138 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1139 enabled = (ioc_control & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1140
1141 if (ready && enabled)
1142 return MRIOC_STATE_READY;
1143 if ((!ready) && (!enabled))
1144 return MRIOC_STATE_RESET;
1145 if ((!ready) && (enabled))
1146 return MRIOC_STATE_BECOMING_READY;
1147
1148 return MRIOC_STATE_RESET_REQUESTED;
1149 }
1150
mpi3mr_clear_resethistory(struct mpi3mr_softc * sc)1151 static inline void mpi3mr_clear_resethistory(struct mpi3mr_softc *sc)
1152 {
1153 U32 ioc_status;
1154
1155 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1156 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1157 mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
1158
1159 }
1160
1161 /**
1162 * mpi3mr_mur_ioc - Message unit Reset handler
1163 * @sc: Adapter instance reference
1164 * @reset_reason: Reset reason code
1165 *
1166 * Issue Message unit Reset to the controller and wait for it to
1167 * be complete.
1168 *
1169 * Return: 0 on success, -1 on failure.
1170 */
mpi3mr_mur_ioc(struct mpi3mr_softc * sc,U32 reset_reason)1171 static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U32 reset_reason)
1172 {
1173 U32 ioc_config, timeout, ioc_status;
1174 int retval = -1;
1175
1176 mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Message Unit Reset(MUR)\n");
1177 if (sc->unrecoverable) {
1178 mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC is unrecoverable MUR not issued\n");
1179 return retval;
1180 }
1181 mpi3mr_clear_resethistory(sc);
1182 mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
1183 ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1184 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1185 mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1186
1187 timeout = MPI3MR_MUR_TIMEOUT * 10;
1188 do {
1189 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1190 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1191 mpi3mr_clear_resethistory(sc);
1192 ioc_config =
1193 mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1194 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1195 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1196 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
1197 retval = 0;
1198 break;
1199 }
1200 }
1201 DELAY(100 * 1000);
1202 } while (--timeout);
1203
1204 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1205 ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1206
1207 mpi3mr_dprint(sc, MPI3MR_INFO, "IOC Status/Config after %s MUR is (0x%x)/(0x%x)\n",
1208 !retval ? "successful":"failed", ioc_status, ioc_config);
1209 return retval;
1210 }
1211
1212 /**
1213 * mpi3mr_bring_ioc_ready - Bring controller to ready state
1214 * @sc: Adapter instance reference
1215 *
1216 * Set Enable IOC bit in IOC configuration register and wait for
1217 * the controller to become ready.
1218 *
1219 * Return: 0 on success, appropriate error on failure.
1220 */
mpi3mr_bring_ioc_ready(struct mpi3mr_softc * sc)1221 static int mpi3mr_bring_ioc_ready(struct mpi3mr_softc *sc)
1222 {
1223 U32 ioc_config, timeout;
1224 enum mpi3mr_iocstate current_state;
1225
1226 ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1227 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1228 mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1229
1230 timeout = sc->ready_timeout * 10;
1231 do {
1232 current_state = mpi3mr_get_iocstate(sc);
1233 if (current_state == MRIOC_STATE_READY)
1234 return 0;
1235 DELAY(100 * 1000);
1236 } while (--timeout);
1237
1238 return -1;
1239 }
1240
1241 static const struct {
1242 enum mpi3mr_iocstate value;
1243 char *name;
1244 } mrioc_states[] = {
1245 { MRIOC_STATE_READY, "ready" },
1246 { MRIOC_STATE_FAULT, "fault" },
1247 { MRIOC_STATE_RESET, "reset" },
1248 { MRIOC_STATE_BECOMING_READY, "becoming ready" },
1249 { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
1250 { MRIOC_STATE_COUNT, "Count" },
1251 };
1252
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)1253 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
1254 {
1255 int i;
1256 char *name = NULL;
1257
1258 for (i = 0; i < MRIOC_STATE_COUNT; i++) {
1259 if (mrioc_states[i].value == mrioc_state){
1260 name = mrioc_states[i].name;
1261 break;
1262 }
1263 }
1264 return name;
1265 }
1266
1267 /* Reset reason to name mapper structure*/
1268 static const struct {
1269 enum mpi3mr_reset_reason value;
1270 char *name;
1271 } mpi3mr_reset_reason_codes[] = {
1272 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
1273 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
1274 { MPI3MR_RESET_FROM_IOCTL, "application" },
1275 { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
1276 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
1277 { MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
1278 { MPI3MR_RESET_FROM_SCSIIO_TIMEOUT, "SCSIIO timeout" },
1279 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
1280 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
1281 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
1282 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
1283 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
1284 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
1285 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
1286 {
1287 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
1288 "create request queue timeout"
1289 },
1290 {
1291 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
1292 "create reply queue timeout"
1293 },
1294 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
1295 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
1296 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
1297 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
1298 {
1299 MPI3MR_RESET_FROM_CIACTVRST_TIMER,
1300 "component image activation timeout"
1301 },
1302 {
1303 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
1304 "get package version timeout"
1305 },
1306 {
1307 MPI3MR_RESET_FROM_PELABORT_TIMEOUT,
1308 "persistent event log abort timeout"
1309 },
1310 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
1311 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
1312 {
1313 MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
1314 "diagnostic buffer post timeout"
1315 },
1316 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" },
1317 { MPI3MR_RESET_REASON_COUNT, "Reset reason count" },
1318 };
1319
1320 /**
1321 * mpi3mr_reset_rc_name - get reset reason code name
1322 * @reason_code: reset reason code value
1323 *
1324 * Map reset reason to an NULL terminated ASCII string
1325 *
1326 * Return: Name corresponding to reset reason value or NULL.
1327 */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)1328 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1329 {
1330 int i;
1331 char *name = NULL;
1332
1333 for (i = 0; i < MPI3MR_RESET_REASON_COUNT; i++) {
1334 if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1335 name = mpi3mr_reset_reason_codes[i].name;
1336 break;
1337 }
1338 }
1339 return name;
1340 }
1341
1342 #define MAX_RESET_TYPE 3
1343 /* Reset type to name mapper structure*/
1344 static const struct {
1345 U16 reset_type;
1346 char *name;
1347 } mpi3mr_reset_types[] = {
1348 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1349 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1350 { MAX_RESET_TYPE, "count"}
1351 };
1352
1353 /**
1354 * mpi3mr_reset_type_name - get reset type name
1355 * @reset_type: reset type value
1356 *
1357 * Map reset type to an NULL terminated ASCII string
1358 *
1359 * Return: Name corresponding to reset type value or NULL.
1360 */
mpi3mr_reset_type_name(U16 reset_type)1361 static const char *mpi3mr_reset_type_name(U16 reset_type)
1362 {
1363 int i;
1364 char *name = NULL;
1365
1366 for (i = 0; i < MAX_RESET_TYPE; i++) {
1367 if (mpi3mr_reset_types[i].reset_type == reset_type) {
1368 name = mpi3mr_reset_types[i].name;
1369 break;
1370 }
1371 }
1372 return name;
1373 }
1374
1375 /**
1376 * mpi3mr_soft_reset_success - Check softreset is success or not
1377 * @ioc_status: IOC status register value
1378 * @ioc_config: IOC config register value
1379 *
1380 * Check whether the soft reset is successful or not based on
1381 * IOC status and IOC config register values.
1382 *
1383 * Return: True when the soft reset is success, false otherwise.
1384 */
1385 static inline bool
mpi3mr_soft_reset_success(U32 ioc_status,U32 ioc_config)1386 mpi3mr_soft_reset_success(U32 ioc_status, U32 ioc_config)
1387 {
1388 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1389 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1390 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1391 return true;
1392 return false;
1393 }
1394
1395 /**
1396 * mpi3mr_diagfault_success - Check diag fault is success or not
1397 * @sc: Adapter reference
1398 * @ioc_status: IOC status register value
1399 *
1400 * Check whether the controller hit diag reset fault code.
1401 *
1402 * Return: True when there is diag fault, false otherwise.
1403 */
mpi3mr_diagfault_success(struct mpi3mr_softc * sc,U32 ioc_status)1404 static inline bool mpi3mr_diagfault_success(struct mpi3mr_softc *sc,
1405 U32 ioc_status)
1406 {
1407 U32 fault;
1408
1409 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1410 return false;
1411 fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) & MPI3_SYSIF_FAULT_CODE_MASK;
1412 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
1413 return true;
1414 return false;
1415 }
1416
1417 /**
1418 * mpi3mr_issue_iocfacts - Send IOC Facts
1419 * @sc: Adapter instance reference
1420 * @facts_data: Cached IOC facts data
1421 *
1422 * Issue IOC Facts MPI request through admin queue and wait for
1423 * the completion of it or time out.
1424 *
1425 * Return: 0 on success, non-zero on failures.
1426 */
mpi3mr_issue_iocfacts(struct mpi3mr_softc * sc,Mpi3IOCFactsData_t * facts_data)1427 static int mpi3mr_issue_iocfacts(struct mpi3mr_softc *sc,
1428 Mpi3IOCFactsData_t *facts_data)
1429 {
1430 Mpi3IOCFactsRequest_t iocfacts_req;
1431 bus_dma_tag_t data_tag = NULL;
1432 bus_dmamap_t data_map = NULL;
1433 bus_addr_t data_phys = 0;
1434 void *data = NULL;
1435 U32 data_len = sizeof(*facts_data);
1436 int retval = 0;
1437
1438 U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
1439 MPI3_SGE_FLAGS_DLAS_SYSTEM |
1440 MPI3_SGE_FLAGS_END_OF_LIST);
1441
1442
1443 /*
1444 * We can't use sc->dma_loaddr here. We set those only after we get the
1445 * iocfacts. So allocate in the lower 4GB. The amount of data is tiny
1446 * and we don't do this that often, so any bouncing we might have to do
1447 * isn't a cause for concern.
1448 */
1449 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
1450 4, 0, /* algnmnt, boundary */
1451 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1452 BUS_SPACE_MAXADDR, /* highaddr */
1453 NULL, NULL, /* filter, filterarg */
1454 data_len, /* maxsize */
1455 1, /* nsegments */
1456 data_len, /* maxsegsize */
1457 0, /* flags */
1458 NULL, NULL, /* lockfunc, lockarg */
1459 &data_tag)) {
1460 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1461 return (ENOMEM);
1462 }
1463
1464 if (bus_dmamem_alloc(data_tag, (void **)&data,
1465 BUS_DMA_NOWAIT, &data_map)) {
1466 mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data DMA mem alloc failed\n",
1467 __func__, __LINE__);
1468 return (ENOMEM);
1469 }
1470
1471 bzero(data, data_len);
1472 bus_dmamap_load(data_tag, data_map, data, data_len,
1473 mpi3mr_memaddr_cb, &data_phys, BUS_DMA_NOWAIT);
1474 mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts data phys addr= %#016jx size= %d\n",
1475 __func__, __LINE__, (uintmax_t)data_phys, data_len);
1476
1477 if (!data)
1478 {
1479 retval = -1;
1480 printf(IOCNAME "Memory alloc for IOCFactsData: failed\n",
1481 sc->name);
1482 goto out;
1483 }
1484
1485 mtx_lock(&sc->init_cmds.completion.lock);
1486 memset(&iocfacts_req, 0, sizeof(iocfacts_req));
1487
1488 if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
1489 retval = -1;
1490 printf(IOCNAME "Issue IOCFacts: Init command is in use\n",
1491 sc->name);
1492 mtx_unlock(&sc->init_cmds.completion.lock);
1493 goto out;
1494 }
1495
1496 sc->init_cmds.state = MPI3MR_CMD_PENDING;
1497 sc->init_cmds.is_waiting = 1;
1498 sc->init_cmds.callback = NULL;
1499 iocfacts_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
1500 iocfacts_req.Function = MPI3_FUNCTION_IOC_FACTS;
1501
1502 mpi3mr_add_sg_single(&iocfacts_req.SGL, sgl_flags, data_len,
1503 data_phys);
1504
1505 init_completion(&sc->init_cmds.completion);
1506
1507 retval = mpi3mr_submit_admin_cmd(sc, &iocfacts_req,
1508 sizeof(iocfacts_req));
1509
1510 if (retval) {
1511 printf(IOCNAME "Issue IOCFacts: Admin Post failed\n",
1512 sc->name);
1513 goto out_unlock;
1514 }
1515
1516 wait_for_completion_timeout(&sc->init_cmds.completion,
1517 (MPI3MR_INTADMCMD_TIMEOUT));
1518 if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1519 printf(IOCNAME "Issue IOCFacts: command timed out\n",
1520 sc->name);
1521 mpi3mr_check_rh_fault_ioc(sc,
1522 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
1523 sc->unrecoverable = 1;
1524 retval = -1;
1525 goto out_unlock;
1526 }
1527
1528 if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1529 != MPI3_IOCSTATUS_SUCCESS ) {
1530 printf(IOCNAME "Issue IOCFacts: Failed IOCStatus(0x%04x) "
1531 " Loginfo(0x%08x) \n" , sc->name,
1532 (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1533 sc->init_cmds.ioc_loginfo);
1534 retval = -1;
1535 goto out_unlock;
1536 }
1537
1538 memcpy(facts_data, (U8 *)data, data_len);
1539 out_unlock:
1540 sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1541 mtx_unlock(&sc->init_cmds.completion.lock);
1542
1543 out:
1544 if (data_phys != 0)
1545 bus_dmamap_unload(data_tag, data_map);
1546 if (data != NULL)
1547 bus_dmamem_free(data_tag, data, data_map);
1548 if (data_tag != NULL)
1549 bus_dma_tag_destroy(data_tag);
1550 return retval;
1551 }
1552
1553 /**
1554 * mpi3mr_process_factsdata - Process IOC facts data
1555 * @sc: Adapter instance reference
1556 * @facts_data: Cached IOC facts data
1557 *
1558 * Convert IOC facts data into cpu endianness and cache it in
1559 * the driver .
1560 *
1561 * Return: Nothing.
1562 */
mpi3mr_process_factsdata(struct mpi3mr_softc * sc,Mpi3IOCFactsData_t * facts_data)1563 static int mpi3mr_process_factsdata(struct mpi3mr_softc *sc,
1564 Mpi3IOCFactsData_t *facts_data)
1565 {
1566 int retval = 0;
1567 U32 ioc_config, req_sz, facts_flags;
1568 struct mpi3mr_compimg_ver *fwver;
1569
1570 if (le16toh(facts_data->IOCFactsDataLength) !=
1571 (sizeof(*facts_data) / 4)) {
1572 mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data length mismatch "
1573 " driver_sz(%ld) firmware_sz(%d) \n",
1574 sizeof(*facts_data),
1575 facts_data->IOCFactsDataLength);
1576 }
1577
1578 ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1579 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
1580 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
1581
1582 if (facts_data->IOCRequestFrameSize != (req_sz/4)) {
1583 mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data reqFrameSize mismatch "
1584 " hw_size(%d) firmware_sz(%d) \n" , req_sz/4,
1585 facts_data->IOCRequestFrameSize);
1586 }
1587
1588 memset(&sc->facts, 0, sizeof(sc->facts));
1589
1590 facts_flags = le32toh(facts_data->Flags);
1591 sc->facts.op_req_sz = req_sz;
1592 sc->op_reply_sz = 1 << ((ioc_config &
1593 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
1594 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
1595
1596 sc->facts.ioc_num = facts_data->IOCNumber;
1597 sc->facts.who_init = facts_data->WhoInit;
1598 sc->facts.max_msix_vectors = facts_data->MaxMSIxVectors;
1599 sc->facts.personality = (facts_flags &
1600 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
1601 sc->facts.dma_mask = (facts_flags &
1602 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
1603 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
1604 sc->facts.protocol_flags = facts_data->ProtocolFlags;
1605 sc->facts.mpi_version = (facts_data->MPIVersion.Word);
1606 sc->facts.max_reqs = (facts_data->MaxOutstandingRequests);
1607 sc->facts.product_id = (facts_data->ProductID);
1608 sc->facts.reply_sz = (facts_data->ReplyFrameSize) * 4;
1609 sc->facts.exceptions = (facts_data->IOCExceptions);
1610 sc->facts.max_perids = (facts_data->MaxPersistentID);
1611 sc->facts.max_vds = (facts_data->MaxVDs);
1612 sc->facts.max_hpds = (facts_data->MaxHostPDs);
1613 sc->facts.max_advhpds = (facts_data->MaxAdvHostPDs);
1614 sc->facts.max_raidpds = (facts_data->MaxRAIDPDs);
1615 sc->facts.max_nvme = (facts_data->MaxNVMe);
1616 sc->facts.max_pcieswitches =
1617 (facts_data->MaxPCIeSwitches);
1618 sc->facts.max_sasexpanders =
1619 (facts_data->MaxSASExpanders);
1620 sc->facts.max_sasinitiators =
1621 (facts_data->MaxSASInitiators);
1622 sc->facts.max_enclosures = (facts_data->MaxEnclosures);
1623 sc->facts.min_devhandle = (facts_data->MinDevHandle);
1624 sc->facts.max_devhandle = (facts_data->MaxDevHandle);
1625 sc->facts.max_op_req_q =
1626 (facts_data->MaxOperationalRequestQueues);
1627 sc->facts.max_op_reply_q =
1628 (facts_data->MaxOperationalReplyQueues);
1629 sc->facts.ioc_capabilities =
1630 (facts_data->IOCCapabilities);
1631 sc->facts.fw_ver.build_num =
1632 (facts_data->FWVersion.BuildNum);
1633 sc->facts.fw_ver.cust_id =
1634 (facts_data->FWVersion.CustomerID);
1635 sc->facts.fw_ver.ph_minor = facts_data->FWVersion.PhaseMinor;
1636 sc->facts.fw_ver.ph_major = facts_data->FWVersion.PhaseMajor;
1637 sc->facts.fw_ver.gen_minor = facts_data->FWVersion.GenMinor;
1638 sc->facts.fw_ver.gen_major = facts_data->FWVersion.GenMajor;
1639 sc->max_msix_vectors = min(sc->max_msix_vectors,
1640 sc->facts.max_msix_vectors);
1641 sc->facts.sge_mod_mask = facts_data->SGEModifierMask;
1642 sc->facts.sge_mod_value = facts_data->SGEModifierValue;
1643 sc->facts.sge_mod_shift = facts_data->SGEModifierShift;
1644 sc->facts.shutdown_timeout =
1645 (facts_data->ShutdownTimeout);
1646 sc->facts.max_dev_per_tg = facts_data->MaxDevicesPerThrottleGroup;
1647 sc->facts.io_throttle_data_length =
1648 facts_data->IOThrottleDataLength;
1649 sc->facts.max_io_throttle_group =
1650 facts_data->MaxIOThrottleGroup;
1651 sc->facts.io_throttle_low = facts_data->IOThrottleLow;
1652 sc->facts.io_throttle_high = facts_data->IOThrottleHigh;
1653
1654 /*Store in 512b block count*/
1655 if (sc->facts.io_throttle_data_length)
1656 sc->io_throttle_data_length =
1657 (sc->facts.io_throttle_data_length * 2 * 4);
1658 else
1659 /* set the length to 1MB + 1K to disable throttle*/
1660 sc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
1661
1662 sc->io_throttle_high = (sc->facts.io_throttle_high * 2 * 1024);
1663 sc->io_throttle_low = (sc->facts.io_throttle_low * 2 * 1024);
1664
1665 fwver = &sc->facts.fw_ver;
1666 snprintf(sc->fw_version, sizeof(sc->fw_version),
1667 "%d.%d.%d.%d.%05d-%05d",
1668 fwver->gen_major, fwver->gen_minor, fwver->ph_major,
1669 fwver->ph_minor, fwver->cust_id, fwver->build_num);
1670
1671 mpi3mr_dprint(sc, MPI3MR_INFO, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),"
1672 "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
1673 sc->facts.ioc_num, sc->facts.max_op_req_q,
1674 sc->facts.max_op_reply_q, sc->facts.max_devhandle,
1675 sc->facts.max_reqs, sc->facts.min_devhandle,
1676 sc->facts.max_pds, sc->facts.max_msix_vectors,
1677 sc->facts.max_perids);
1678 mpi3mr_dprint(sc, MPI3MR_INFO, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x\n",
1679 sc->facts.sge_mod_mask, sc->facts.sge_mod_value,
1680 sc->facts.sge_mod_shift);
1681 mpi3mr_dprint(sc, MPI3MR_INFO,
1682 "max_dev_per_throttle_group(%d), max_throttle_groups(%d), io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
1683 sc->facts.max_dev_per_tg, sc->facts.max_io_throttle_group,
1684 sc->facts.io_throttle_data_length * 4,
1685 sc->facts.io_throttle_high, sc->facts.io_throttle_low);
1686
1687 sc->max_host_ios = sc->facts.max_reqs -
1688 (MPI3MR_INTERNALCMDS_RESVD + 1);
1689
1690 /*
1691 * Set the DMA mask for the card. dma_mask is the number of bits that
1692 * can have bits set in them. Translate this into bus_dma loaddr args.
1693 * Add sanity for more bits than address space or other overflow
1694 * situations.
1695 */
1696 if (sc->facts.dma_mask == 0 ||
1697 (sc->facts.dma_mask >= sizeof(bus_addr_t) * 8))
1698 sc->dma_loaddr = BUS_SPACE_MAXADDR;
1699 else
1700 sc->dma_loaddr = ~((1ull << sc->facts.dma_mask) - 1);
1701 mpi3mr_dprint(sc, MPI3MR_INFO,
1702 "dma_mask bits: %d loaddr 0x%jx\n",
1703 sc->facts.dma_mask, sc->dma_loaddr);
1704
1705 return retval;
1706 }
1707
mpi3mr_setup_reply_free_queues(struct mpi3mr_softc * sc)1708 static inline void mpi3mr_setup_reply_free_queues(struct mpi3mr_softc *sc)
1709 {
1710 int i;
1711 bus_addr_t phys_addr;
1712
1713 /* initialize Reply buffer Queue */
1714 for (i = 0, phys_addr = sc->reply_buf_phys;
1715 i < sc->num_reply_bufs; i++, phys_addr += sc->reply_sz)
1716 sc->reply_free_q[i] = phys_addr;
1717 sc->reply_free_q[i] = (0);
1718
1719 /* initialize Sense Buffer Queue */
1720 for (i = 0, phys_addr = sc->sense_buf_phys;
1721 i < sc->num_sense_bufs; i++, phys_addr += MPI3MR_SENSEBUF_SZ)
1722 sc->sense_buf_q[i] = phys_addr;
1723 sc->sense_buf_q[i] = (0);
1724
1725 }
1726
mpi3mr_reply_dma_alloc(struct mpi3mr_softc * sc)1727 static int mpi3mr_reply_dma_alloc(struct mpi3mr_softc *sc)
1728 {
1729 U32 sz;
1730
1731 sc->num_reply_bufs = sc->facts.max_reqs + MPI3MR_NUM_EVTREPLIES;
1732 sc->reply_free_q_sz = sc->num_reply_bufs + 1;
1733 sc->num_sense_bufs = sc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
1734 sc->sense_buf_q_sz = sc->num_sense_bufs + 1;
1735
1736 sz = sc->num_reply_bufs * sc->reply_sz;
1737
1738 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
1739 16, 0, /* algnmnt, boundary */
1740 sc->dma_loaddr, /* lowaddr */
1741 BUS_SPACE_MAXADDR, /* highaddr */
1742 NULL, NULL, /* filter, filterarg */
1743 sz, /* maxsize */
1744 1, /* nsegments */
1745 sz, /* maxsegsize */
1746 0, /* flags */
1747 NULL, NULL, /* lockfunc, lockarg */
1748 &sc->reply_buf_tag)) {
1749 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1750 return (ENOMEM);
1751 }
1752
1753 if (bus_dmamem_alloc(sc->reply_buf_tag, (void **)&sc->reply_buf,
1754 BUS_DMA_NOWAIT, &sc->reply_buf_dmamap)) {
1755 mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d DMA mem alloc failed\n",
1756 __func__, __LINE__);
1757 return (ENOMEM);
1758 }
1759
1760 bzero(sc->reply_buf, sz);
1761 bus_dmamap_load(sc->reply_buf_tag, sc->reply_buf_dmamap, sc->reply_buf, sz,
1762 mpi3mr_memaddr_cb, &sc->reply_buf_phys, BUS_DMA_NOWAIT);
1763
1764 sc->reply_buf_dma_min_address = sc->reply_buf_phys;
1765 sc->reply_buf_dma_max_address = sc->reply_buf_phys + sz;
1766 mpi3mr_dprint(sc, MPI3MR_XINFO, "reply buf (0x%p): depth(%d), frame_size(%d), "
1767 "pool_size(%d kB), reply_buf_dma(0x%llx)\n",
1768 sc->reply_buf, sc->num_reply_bufs, sc->reply_sz,
1769 (sz / 1024), (unsigned long long)sc->reply_buf_phys);
1770
1771 /* reply free queue, 8 byte align */
1772 sz = sc->reply_free_q_sz * 8;
1773
1774 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
1775 8, 0, /* algnmnt, boundary */
1776 sc->dma_loaddr, /* lowaddr */
1777 BUS_SPACE_MAXADDR, /* highaddr */
1778 NULL, NULL, /* filter, filterarg */
1779 sz, /* maxsize */
1780 1, /* nsegments */
1781 sz, /* maxsegsize */
1782 0, /* flags */
1783 NULL, NULL, /* lockfunc, lockarg */
1784 &sc->reply_free_q_tag)) {
1785 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply free queue DMA tag\n");
1786 return (ENOMEM);
1787 }
1788
1789 if (bus_dmamem_alloc(sc->reply_free_q_tag, (void **)&sc->reply_free_q,
1790 BUS_DMA_NOWAIT, &sc->reply_free_q_dmamap)) {
1791 mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d DMA mem alloc failed\n",
1792 __func__, __LINE__);
1793 return (ENOMEM);
1794 }
1795
1796 bzero(sc->reply_free_q, sz);
1797 bus_dmamap_load(sc->reply_free_q_tag, sc->reply_free_q_dmamap, sc->reply_free_q, sz,
1798 mpi3mr_memaddr_cb, &sc->reply_free_q_phys, BUS_DMA_NOWAIT);
1799
1800 mpi3mr_dprint(sc, MPI3MR_XINFO, "reply_free_q (0x%p): depth(%d), frame_size(%d), "
1801 "pool_size(%d kB), reply_free_q_dma(0x%llx)\n",
1802 sc->reply_free_q, sc->reply_free_q_sz, 8, (sz / 1024),
1803 (unsigned long long)sc->reply_free_q_phys);
1804
1805 /* sense buffer pool, 4 byte align */
1806 sz = sc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
1807
1808 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
1809 4, 0, /* algnmnt, boundary */
1810 sc->dma_loaddr, /* lowaddr */
1811 BUS_SPACE_MAXADDR, /* highaddr */
1812 NULL, NULL, /* filter, filterarg */
1813 sz, /* maxsize */
1814 1, /* nsegments */
1815 sz, /* maxsegsize */
1816 0, /* flags */
1817 NULL, NULL, /* lockfunc, lockarg */
1818 &sc->sense_buf_tag)) {
1819 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer DMA tag\n");
1820 return (ENOMEM);
1821 }
1822
1823 if (bus_dmamem_alloc(sc->sense_buf_tag, (void **)&sc->sense_buf,
1824 BUS_DMA_NOWAIT, &sc->sense_buf_dmamap)) {
1825 mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d DMA mem alloc failed\n",
1826 __func__, __LINE__);
1827 return (ENOMEM);
1828 }
1829
1830 bzero(sc->sense_buf, sz);
1831 bus_dmamap_load(sc->sense_buf_tag, sc->sense_buf_dmamap, sc->sense_buf, sz,
1832 mpi3mr_memaddr_cb, &sc->sense_buf_phys, BUS_DMA_NOWAIT);
1833
1834 mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf (0x%p): depth(%d), frame_size(%d), "
1835 "pool_size(%d kB), sense_dma(0x%llx)\n",
1836 sc->sense_buf, sc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
1837 (sz / 1024), (unsigned long long)sc->sense_buf_phys);
1838
1839 /* sense buffer queue, 8 byte align */
1840 sz = sc->sense_buf_q_sz * 8;
1841
1842 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
1843 8, 0, /* algnmnt, boundary */
1844 sc->dma_loaddr, /* lowaddr */
1845 BUS_SPACE_MAXADDR, /* highaddr */
1846 NULL, NULL, /* filter, filterarg */
1847 sz, /* maxsize */
1848 1, /* nsegments */
1849 sz, /* maxsegsize */
1850 0, /* flags */
1851 NULL, NULL, /* lockfunc, lockarg */
1852 &sc->sense_buf_q_tag)) {
1853 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer Queue DMA tag\n");
1854 return (ENOMEM);
1855 }
1856
1857 if (bus_dmamem_alloc(sc->sense_buf_q_tag, (void **)&sc->sense_buf_q,
1858 BUS_DMA_NOWAIT, &sc->sense_buf_q_dmamap)) {
1859 mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d DMA mem alloc failed\n",
1860 __func__, __LINE__);
1861 return (ENOMEM);
1862 }
1863
1864 bzero(sc->sense_buf_q, sz);
1865 bus_dmamap_load(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap, sc->sense_buf_q, sz,
1866 mpi3mr_memaddr_cb, &sc->sense_buf_q_phys, BUS_DMA_NOWAIT);
1867
1868 mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf_q (0x%p): depth(%d), frame_size(%d), "
1869 "pool_size(%d kB), sense_dma(0x%llx)\n",
1870 sc->sense_buf_q, sc->sense_buf_q_sz, 8, (sz / 1024),
1871 (unsigned long long)sc->sense_buf_q_phys);
1872
1873 return 0;
1874 }
1875
mpi3mr_reply_alloc(struct mpi3mr_softc * sc)1876 static int mpi3mr_reply_alloc(struct mpi3mr_softc *sc)
1877 {
1878 int retval = 0;
1879 U32 i;
1880
1881 if (sc->init_cmds.reply)
1882 goto post_reply_sbuf;
1883
1884 sc->init_cmds.reply = malloc(sc->reply_sz,
1885 M_MPI3MR, M_NOWAIT | M_ZERO);
1886
1887 if (!sc->init_cmds.reply) {
1888 printf(IOCNAME "Cannot allocate memory for init_cmds.reply\n",
1889 sc->name);
1890 goto out_failed;
1891 }
1892
1893 sc->ioctl_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1894 if (!sc->ioctl_cmds.reply) {
1895 printf(IOCNAME "Cannot allocate memory for ioctl_cmds.reply\n",
1896 sc->name);
1897 goto out_failed;
1898 }
1899
1900 sc->host_tm_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1901 if (!sc->host_tm_cmds.reply) {
1902 printf(IOCNAME "Cannot allocate memory for host_tm.reply\n",
1903 sc->name);
1904 goto out_failed;
1905 }
1906 for (i=0; i<MPI3MR_NUM_DEVRMCMD; i++) {
1907 sc->dev_rmhs_cmds[i].reply = malloc(sc->reply_sz,
1908 M_MPI3MR, M_NOWAIT | M_ZERO);
1909 if (!sc->dev_rmhs_cmds[i].reply) {
1910 printf(IOCNAME "Cannot allocate memory for"
1911 " dev_rmhs_cmd[%d].reply\n",
1912 sc->name, i);
1913 goto out_failed;
1914 }
1915 }
1916
1917 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
1918 sc->evtack_cmds[i].reply = malloc(sc->reply_sz,
1919 M_MPI3MR, M_NOWAIT | M_ZERO);
1920 if (!sc->evtack_cmds[i].reply)
1921 goto out_failed;
1922 }
1923
1924 sc->dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
1925
1926 sc->removepend_bitmap = malloc(sc->dev_handle_bitmap_sz,
1927 M_MPI3MR, M_NOWAIT | M_ZERO);
1928 if (!sc->removepend_bitmap) {
1929 printf(IOCNAME "Cannot alloc memory for remove pend bitmap\n",
1930 sc->name);
1931 goto out_failed;
1932 }
1933
1934 sc->devrem_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_DEVRMCMD, 8);
1935 sc->devrem_bitmap = malloc(sc->devrem_bitmap_sz,
1936 M_MPI3MR, M_NOWAIT | M_ZERO);
1937 if (!sc->devrem_bitmap) {
1938 printf(IOCNAME "Cannot alloc memory for dev remove bitmap\n",
1939 sc->name);
1940 goto out_failed;
1941 }
1942
1943 sc->evtack_cmds_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_EVTACKCMD, 8);
1944
1945 sc->evtack_cmds_bitmap = malloc(sc->evtack_cmds_bitmap_sz,
1946 M_MPI3MR, M_NOWAIT | M_ZERO);
1947 if (!sc->evtack_cmds_bitmap)
1948 goto out_failed;
1949
1950 if (mpi3mr_reply_dma_alloc(sc)) {
1951 printf(IOCNAME "func:%s line:%d DMA memory allocation failed\n",
1952 sc->name, __func__, __LINE__);
1953 goto out_failed;
1954 }
1955
1956 post_reply_sbuf:
1957 mpi3mr_setup_reply_free_queues(sc);
1958 return retval;
1959 out_failed:
1960 mpi3mr_cleanup_interrupts(sc);
1961 mpi3mr_free_mem(sc);
1962 retval = -1;
1963 return retval;
1964 }
1965
1966 static void
mpi3mr_print_fw_pkg_ver(struct mpi3mr_softc * sc)1967 mpi3mr_print_fw_pkg_ver(struct mpi3mr_softc *sc)
1968 {
1969 int retval = 0;
1970 void *fw_pkg_ver = NULL;
1971 bus_dma_tag_t fw_pkg_ver_tag;
1972 bus_dmamap_t fw_pkg_ver_map;
1973 bus_addr_t fw_pkg_ver_dma;
1974 Mpi3CIUploadRequest_t ci_upload;
1975 Mpi3ComponentImageHeader_t *ci_header;
1976 U32 fw_pkg_ver_len = sizeof(*ci_header);
1977 U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
1978
1979 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
1980 4, 0, /* algnmnt, boundary */
1981 sc->dma_loaddr, /* lowaddr */
1982 BUS_SPACE_MAXADDR, /* highaddr */
1983 NULL, NULL, /* filter, filterarg */
1984 fw_pkg_ver_len, /* maxsize */
1985 1, /* nsegments */
1986 fw_pkg_ver_len, /* maxsegsize */
1987 0, /* flags */
1988 NULL, NULL, /* lockfunc, lockarg */
1989 &fw_pkg_ver_tag)) {
1990 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate fw package version request DMA tag\n");
1991 return;
1992 }
1993
1994 if (bus_dmamem_alloc(fw_pkg_ver_tag, (void **)&fw_pkg_ver, BUS_DMA_NOWAIT, &fw_pkg_ver_map)) {
1995 mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d fw package version DMA mem alloc failed\n",
1996 __func__, __LINE__);
1997 return;
1998 }
1999
2000 bzero(fw_pkg_ver, fw_pkg_ver_len);
2001
2002 bus_dmamap_load(fw_pkg_ver_tag, fw_pkg_ver_map, fw_pkg_ver, fw_pkg_ver_len,
2003 mpi3mr_memaddr_cb, &fw_pkg_ver_dma, BUS_DMA_NOWAIT);
2004
2005 mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d fw package version phys addr= %#016jx size= %d\n",
2006 __func__, __LINE__, (uintmax_t)fw_pkg_ver_dma, fw_pkg_ver_len);
2007
2008 if (!fw_pkg_ver) {
2009 mpi3mr_dprint(sc, MPI3MR_ERROR, "Memory alloc for fw package version failed\n");
2010 goto out;
2011 }
2012
2013 memset(&ci_upload, 0, sizeof(ci_upload));
2014 mtx_lock(&sc->init_cmds.completion.lock);
2015 if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2016 mpi3mr_dprint(sc, MPI3MR_INFO,"Issue CI Header Upload: command is in use\n");
2017 mtx_unlock(&sc->init_cmds.completion.lock);
2018 goto out;
2019 }
2020 sc->init_cmds.state = MPI3MR_CMD_PENDING;
2021 sc->init_cmds.is_waiting = 1;
2022 sc->init_cmds.callback = NULL;
2023 ci_upload.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2024 ci_upload.Function = MPI3_FUNCTION_CI_UPLOAD;
2025 ci_upload.MsgFlags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2026 ci_upload.ImageOffset = MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET;
2027 ci_upload.SegmentSize = MPI3_IMAGE_HEADER_SIZE;
2028
2029 mpi3mr_add_sg_single(&ci_upload.SGL, sgl_flags, fw_pkg_ver_len,
2030 fw_pkg_ver_dma);
2031
2032 init_completion(&sc->init_cmds.completion);
2033 if ((retval = mpi3mr_submit_admin_cmd(sc, &ci_upload, sizeof(ci_upload)))) {
2034 mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: Admin Post failed\n");
2035 goto out_unlock;
2036 }
2037 wait_for_completion_timeout(&sc->init_cmds.completion,
2038 (MPI3MR_INTADMCMD_TIMEOUT));
2039 if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2040 mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: command timed out\n");
2041 sc->init_cmds.is_waiting = 0;
2042 if (!(sc->init_cmds.state & MPI3MR_CMD_RESET))
2043 mpi3mr_check_rh_fault_ioc(sc,
2044 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2045 goto out_unlock;
2046 }
2047 if ((GET_IOC_STATUS(sc->init_cmds.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) {
2048 mpi3mr_dprint(sc, MPI3MR_ERROR,
2049 "Issue CI Header Upload: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n",
2050 GET_IOC_STATUS(sc->init_cmds.ioc_status), sc->init_cmds.ioc_loginfo);
2051 goto out_unlock;
2052 }
2053
2054 ci_header = (Mpi3ComponentImageHeader_t *) fw_pkg_ver;
2055 mpi3mr_dprint(sc, MPI3MR_XINFO,
2056 "Issue CI Header Upload:EnvVariableOffset(0x%x) \
2057 HeaderSize(0x%x) Signature1(0x%x)\n",
2058 ci_header->EnvironmentVariableOffset,
2059 ci_header->HeaderSize,
2060 ci_header->Signature1);
2061 mpi3mr_dprint(sc, MPI3MR_INFO, "FW Package Version: %02d.%02d.%02d.%02d\n",
2062 ci_header->ComponentImageVersion.GenMajor,
2063 ci_header->ComponentImageVersion.GenMinor,
2064 ci_header->ComponentImageVersion.PhaseMajor,
2065 ci_header->ComponentImageVersion.PhaseMinor);
2066 out_unlock:
2067 sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2068 mtx_unlock(&sc->init_cmds.completion.lock);
2069
2070 out:
2071 if (fw_pkg_ver_dma != 0)
2072 bus_dmamap_unload(fw_pkg_ver_tag, fw_pkg_ver_map);
2073 if (fw_pkg_ver)
2074 bus_dmamem_free(fw_pkg_ver_tag, fw_pkg_ver, fw_pkg_ver_map);
2075 if (fw_pkg_ver_tag)
2076 bus_dma_tag_destroy(fw_pkg_ver_tag);
2077
2078 }
2079
2080 /**
2081 * mpi3mr_issue_iocinit - Send IOC Init
2082 * @sc: Adapter instance reference
2083 *
2084 * Issue IOC Init MPI request through admin queue and wait for
2085 * the completion of it or time out.
2086 *
2087 * Return: 0 on success, non-zero on failures.
2088 */
mpi3mr_issue_iocinit(struct mpi3mr_softc * sc)2089 static int mpi3mr_issue_iocinit(struct mpi3mr_softc *sc)
2090 {
2091 Mpi3IOCInitRequest_t iocinit_req;
2092 Mpi3DriverInfoLayout_t *drvr_info = NULL;
2093 bus_dma_tag_t drvr_info_tag;
2094 bus_dmamap_t drvr_info_map;
2095 bus_addr_t drvr_info_phys;
2096 U32 drvr_info_len = sizeof(*drvr_info);
2097 int retval = 0;
2098 struct timeval now;
2099 uint64_t time_in_msec;
2100
2101 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
2102 4, 0, /* algnmnt, boundary */
2103 sc->dma_loaddr, /* lowaddr */
2104 BUS_SPACE_MAXADDR, /* highaddr */
2105 NULL, NULL, /* filter, filterarg */
2106 drvr_info_len, /* maxsize */
2107 1, /* nsegments */
2108 drvr_info_len, /* maxsegsize */
2109 0, /* flags */
2110 NULL, NULL, /* lockfunc, lockarg */
2111 &drvr_info_tag)) {
2112 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
2113 return (ENOMEM);
2114 }
2115
2116 if (bus_dmamem_alloc(drvr_info_tag, (void **)&drvr_info,
2117 BUS_DMA_NOWAIT, &drvr_info_map)) {
2118 mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data DMA mem alloc failed\n",
2119 __func__, __LINE__);
2120 return (ENOMEM);
2121 }
2122
2123 bzero(drvr_info, drvr_info_len);
2124 bus_dmamap_load(drvr_info_tag, drvr_info_map, drvr_info, drvr_info_len,
2125 mpi3mr_memaddr_cb, &drvr_info_phys, BUS_DMA_NOWAIT);
2126 mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts drvr_info phys addr= %#016jx size= %d\n",
2127 __func__, __LINE__, (uintmax_t)drvr_info_phys, drvr_info_len);
2128
2129 if (!drvr_info)
2130 {
2131 retval = -1;
2132 printf(IOCNAME "Memory alloc for Driver Info failed\n",
2133 sc->name);
2134 goto out;
2135 }
2136 drvr_info->InformationLength = (drvr_info_len);
2137 strcpy(drvr_info->DriverSignature, "Broadcom");
2138 strcpy(drvr_info->OsName, "FreeBSD");
2139 strcpy(drvr_info->OsVersion, fmt_os_ver);
2140 strcpy(drvr_info->DriverName, MPI3MR_DRIVER_NAME);
2141 strcpy(drvr_info->DriverVersion, MPI3MR_DRIVER_VERSION);
2142 strcpy(drvr_info->DriverReleaseDate, MPI3MR_DRIVER_RELDATE);
2143 drvr_info->DriverCapabilities = 0;
2144 memcpy((U8 *)&sc->driver_info, (U8 *)drvr_info, sizeof(sc->driver_info));
2145
2146 memset(&iocinit_req, 0, sizeof(iocinit_req));
2147 mtx_lock(&sc->init_cmds.completion.lock);
2148 if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2149 retval = -1;
2150 printf(IOCNAME "Issue IOCInit: Init command is in use\n",
2151 sc->name);
2152 mtx_unlock(&sc->init_cmds.completion.lock);
2153 goto out;
2154 }
2155 sc->init_cmds.state = MPI3MR_CMD_PENDING;
2156 sc->init_cmds.is_waiting = 1;
2157 sc->init_cmds.callback = NULL;
2158 iocinit_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
2159 iocinit_req.Function = MPI3_FUNCTION_IOC_INIT;
2160 iocinit_req.MPIVersion.Struct.Dev = MPI3_VERSION_DEV;
2161 iocinit_req.MPIVersion.Struct.Unit = MPI3_VERSION_UNIT;
2162 iocinit_req.MPIVersion.Struct.Major = MPI3_VERSION_MAJOR;
2163 iocinit_req.MPIVersion.Struct.Minor = MPI3_VERSION_MINOR;
2164 iocinit_req.WhoInit = MPI3_WHOINIT_HOST_DRIVER;
2165 iocinit_req.ReplyFreeQueueDepth = sc->reply_free_q_sz;
2166 iocinit_req.ReplyFreeQueueAddress =
2167 sc->reply_free_q_phys;
2168 iocinit_req.SenseBufferLength = MPI3MR_SENSEBUF_SZ;
2169 iocinit_req.SenseBufferFreeQueueDepth =
2170 sc->sense_buf_q_sz;
2171 iocinit_req.SenseBufferFreeQueueAddress =
2172 sc->sense_buf_q_phys;
2173 iocinit_req.DriverInformationAddress = drvr_info_phys;
2174
2175 getmicrotime(&now);
2176 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
2177 iocinit_req.TimeStamp = htole64(time_in_msec);
2178
2179 init_completion(&sc->init_cmds.completion);
2180 retval = mpi3mr_submit_admin_cmd(sc, &iocinit_req,
2181 sizeof(iocinit_req));
2182
2183 if (retval) {
2184 printf(IOCNAME "Issue IOCInit: Admin Post failed\n",
2185 sc->name);
2186 goto out_unlock;
2187 }
2188
2189 wait_for_completion_timeout(&sc->init_cmds.completion,
2190 (MPI3MR_INTADMCMD_TIMEOUT));
2191 if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2192 printf(IOCNAME "Issue IOCInit: command timed out\n",
2193 sc->name);
2194 mpi3mr_check_rh_fault_ioc(sc,
2195 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
2196 sc->unrecoverable = 1;
2197 retval = -1;
2198 goto out_unlock;
2199 }
2200
2201 if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2202 != MPI3_IOCSTATUS_SUCCESS ) {
2203 printf(IOCNAME "Issue IOCInit: Failed IOCStatus(0x%04x) "
2204 " Loginfo(0x%08x) \n" , sc->name,
2205 (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2206 sc->init_cmds.ioc_loginfo);
2207 retval = -1;
2208 goto out_unlock;
2209 }
2210
2211 out_unlock:
2212 sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2213 mtx_unlock(&sc->init_cmds.completion.lock);
2214
2215 out:
2216 if (drvr_info_phys != 0)
2217 bus_dmamap_unload(drvr_info_tag, drvr_info_map);
2218 if (drvr_info != NULL)
2219 bus_dmamem_free(drvr_info_tag, drvr_info, drvr_info_map);
2220 if (drvr_info_tag != NULL)
2221 bus_dma_tag_destroy(drvr_info_tag);
2222 return retval;
2223 }
2224
2225 static void
mpi3mr_display_ioc_info(struct mpi3mr_softc * sc)2226 mpi3mr_display_ioc_info(struct mpi3mr_softc *sc)
2227 {
2228 int i = 0;
2229 char personality[16];
2230
2231 switch (sc->facts.personality) {
2232 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
2233 strcpy(personality, "Enhanced HBA");
2234 break;
2235 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
2236 strcpy(personality, "RAID");
2237 break;
2238 default:
2239 strcpy(personality, "Unknown");
2240 break;
2241 }
2242
2243 mpi3mr_dprint(sc, MPI3MR_INFO, "Current Personality: %s\n", personality);
2244
2245 mpi3mr_dprint(sc, MPI3MR_INFO, "%s\n", sc->fw_version);
2246
2247 mpi3mr_dprint(sc, MPI3MR_INFO, "Protocol=(");
2248
2249 if (sc->facts.protocol_flags &
2250 MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2251 printf("Initiator");
2252 i++;
2253 }
2254
2255 if (sc->facts.protocol_flags &
2256 MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2257 printf("%sTarget", i ? "," : "");
2258 i++;
2259 }
2260
2261 if (sc->facts.protocol_flags &
2262 MPI3_IOCFACTS_PROTOCOL_NVME) {
2263 printf("%sNVMe attachment", i ? "," : "");
2264 i++;
2265 }
2266 i = 0;
2267 printf("), ");
2268 printf("Capabilities=(");
2269
2270 if (sc->facts.ioc_capabilities &
2271 MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE) {
2272 printf("RAID");
2273 i++;
2274 }
2275
2276 printf(")\n");
2277 }
2278
2279 /**
2280 * mpi3mr_unmask_events - Unmask events in event mask bitmap
2281 * @sc: Adapter instance reference
2282 * @event: MPI event ID
2283 *
2284 * Un mask the specific event by resetting the event_mask
2285 * bitmap.
2286 *
2287 * Return: None.
2288 */
mpi3mr_unmask_events(struct mpi3mr_softc * sc,U16 event)2289 static void mpi3mr_unmask_events(struct mpi3mr_softc *sc, U16 event)
2290 {
2291 U32 desired_event;
2292
2293 if (event >= 128)
2294 return;
2295
2296 desired_event = (1 << (event % 32));
2297
2298 if (event < 32)
2299 sc->event_masks[0] &= ~desired_event;
2300 else if (event < 64)
2301 sc->event_masks[1] &= ~desired_event;
2302 else if (event < 96)
2303 sc->event_masks[2] &= ~desired_event;
2304 else if (event < 128)
2305 sc->event_masks[3] &= ~desired_event;
2306 }
2307
mpi3mr_set_events_mask(struct mpi3mr_softc * sc)2308 static void mpi3mr_set_events_mask(struct mpi3mr_softc *sc)
2309 {
2310 int i;
2311 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2312 sc->event_masks[i] = -1;
2313
2314 mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_ADDED);
2315 mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_INFO_CHANGED);
2316 mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
2317
2318 mpi3mr_unmask_events(sc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
2319
2320 mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
2321 mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DISCOVERY);
2322 mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
2323 mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
2324
2325 mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
2326 mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_ENUMERATION);
2327
2328 mpi3mr_unmask_events(sc, MPI3_EVENT_PREPARE_FOR_RESET);
2329 mpi3mr_unmask_events(sc, MPI3_EVENT_CABLE_MGMT);
2330 mpi3mr_unmask_events(sc, MPI3_EVENT_ENERGY_PACK_CHANGE);
2331 }
2332
2333 /**
2334 * mpi3mr_issue_event_notification - Send event notification
2335 * @sc: Adapter instance reference
2336 *
2337 * Issue event notification MPI request through admin queue and
2338 * wait for the completion of it or time out.
2339 *
2340 * Return: 0 on success, non-zero on failures.
2341 */
mpi3mr_issue_event_notification(struct mpi3mr_softc * sc)2342 int mpi3mr_issue_event_notification(struct mpi3mr_softc *sc)
2343 {
2344 Mpi3EventNotificationRequest_t evtnotify_req;
2345 int retval = 0;
2346 U8 i;
2347
2348 memset(&evtnotify_req, 0, sizeof(evtnotify_req));
2349 mtx_lock(&sc->init_cmds.completion.lock);
2350 if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2351 retval = -1;
2352 printf(IOCNAME "Issue EvtNotify: Init command is in use\n",
2353 sc->name);
2354 mtx_unlock(&sc->init_cmds.completion.lock);
2355 goto out;
2356 }
2357 sc->init_cmds.state = MPI3MR_CMD_PENDING;
2358 sc->init_cmds.is_waiting = 1;
2359 sc->init_cmds.callback = NULL;
2360 evtnotify_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
2361 evtnotify_req.Function = MPI3_FUNCTION_EVENT_NOTIFICATION;
2362 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2363 evtnotify_req.EventMasks[i] =
2364 (sc->event_masks[i]);
2365 init_completion(&sc->init_cmds.completion);
2366 retval = mpi3mr_submit_admin_cmd(sc, &evtnotify_req,
2367 sizeof(evtnotify_req));
2368 if (retval) {
2369 printf(IOCNAME "Issue EvtNotify: Admin Post failed\n",
2370 sc->name);
2371 goto out_unlock;
2372 }
2373
2374 poll_for_command_completion(sc,
2375 &sc->init_cmds,
2376 (MPI3MR_INTADMCMD_TIMEOUT));
2377 if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2378 printf(IOCNAME "Issue EvtNotify: command timed out\n",
2379 sc->name);
2380 mpi3mr_check_rh_fault_ioc(sc,
2381 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
2382 retval = -1;
2383 goto out_unlock;
2384 }
2385
2386 if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2387 != MPI3_IOCSTATUS_SUCCESS ) {
2388 printf(IOCNAME "Issue EvtNotify: Failed IOCStatus(0x%04x) "
2389 " Loginfo(0x%08x) \n" , sc->name,
2390 (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2391 sc->init_cmds.ioc_loginfo);
2392 retval = -1;
2393 goto out_unlock;
2394 }
2395
2396 out_unlock:
2397 sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2398 mtx_unlock(&sc->init_cmds.completion.lock);
2399
2400 out:
2401 return retval;
2402 }
2403
2404 int
mpi3mr_register_events(struct mpi3mr_softc * sc)2405 mpi3mr_register_events(struct mpi3mr_softc *sc)
2406 {
2407 int error;
2408
2409 mpi3mr_set_events_mask(sc);
2410
2411 error = mpi3mr_issue_event_notification(sc);
2412
2413 if (error) {
2414 printf(IOCNAME "Failed to issue event notification %d\n",
2415 sc->name, error);
2416 }
2417
2418 return error;
2419 }
2420
2421 /**
2422 * mpi3mr_process_event_ack - Process event acknowledgment
2423 * @sc: Adapter instance reference
2424 * @event: MPI3 event ID
2425 * @event_ctx: Event context
2426 *
2427 * Send event acknowledgement through admin queue and wait for
2428 * it to complete.
2429 *
2430 * Return: 0 on success, non-zero on failures.
2431 */
mpi3mr_process_event_ack(struct mpi3mr_softc * sc,U8 event,U32 event_ctx)2432 int mpi3mr_process_event_ack(struct mpi3mr_softc *sc, U8 event,
2433 U32 event_ctx)
2434 {
2435 Mpi3EventAckRequest_t evtack_req;
2436 int retval = 0;
2437
2438 memset(&evtack_req, 0, sizeof(evtack_req));
2439 mtx_lock(&sc->init_cmds.completion.lock);
2440 if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2441 retval = -1;
2442 printf(IOCNAME "Issue EvtAck: Init command is in use\n",
2443 sc->name);
2444 mtx_unlock(&sc->init_cmds.completion.lock);
2445 goto out;
2446 }
2447 sc->init_cmds.state = MPI3MR_CMD_PENDING;
2448 sc->init_cmds.is_waiting = 1;
2449 sc->init_cmds.callback = NULL;
2450 evtack_req.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2451 evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
2452 evtack_req.Event = event;
2453 evtack_req.EventContext = htole32(event_ctx);
2454
2455 init_completion(&sc->init_cmds.completion);
2456 retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
2457 sizeof(evtack_req));
2458 if (retval) {
2459 printf(IOCNAME "Issue EvtAck: Admin Post failed\n",
2460 sc->name);
2461 goto out_unlock;
2462 }
2463
2464 wait_for_completion_timeout(&sc->init_cmds.completion,
2465 (MPI3MR_INTADMCMD_TIMEOUT));
2466 if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2467 printf(IOCNAME "Issue EvtAck: command timed out\n",
2468 sc->name);
2469 retval = -1;
2470 goto out_unlock;
2471 }
2472
2473 if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2474 != MPI3_IOCSTATUS_SUCCESS ) {
2475 printf(IOCNAME "Issue EvtAck: Failed IOCStatus(0x%04x) "
2476 " Loginfo(0x%08x) \n" , sc->name,
2477 (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2478 sc->init_cmds.ioc_loginfo);
2479 retval = -1;
2480 goto out_unlock;
2481 }
2482
2483 out_unlock:
2484 sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2485 mtx_unlock(&sc->init_cmds.completion.lock);
2486
2487 out:
2488 return retval;
2489 }
2490
2491
mpi3mr_alloc_chain_bufs(struct mpi3mr_softc * sc)2492 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_softc *sc)
2493 {
2494 int retval = 0;
2495 U32 sz, i;
2496 U16 num_chains;
2497
2498 num_chains = sc->max_host_ios;
2499
2500 sc->chain_buf_count = num_chains;
2501 sz = sizeof(struct mpi3mr_chain) * num_chains;
2502
2503 sc->chain_sgl_list = malloc(sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2504
2505 if (!sc->chain_sgl_list) {
2506 printf(IOCNAME "Cannot allocate memory for chain SGL list\n",
2507 sc->name);
2508 retval = -1;
2509 goto out_failed;
2510 }
2511
2512 sz = MPI3MR_CHAINSGE_SIZE;
2513
2514 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
2515 4096, 0, /* algnmnt, boundary */
2516 sc->dma_loaddr, /* lowaddr */
2517 BUS_SPACE_MAXADDR, /* highaddr */
2518 NULL, NULL, /* filter, filterarg */
2519 sz, /* maxsize */
2520 1, /* nsegments */
2521 sz, /* maxsegsize */
2522 0, /* flags */
2523 NULL, NULL, /* lockfunc, lockarg */
2524 &sc->chain_sgl_list_tag)) {
2525 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Chain buffer DMA tag\n");
2526 return (ENOMEM);
2527 }
2528
2529 for (i = 0; i < num_chains; i++) {
2530 if (bus_dmamem_alloc(sc->chain_sgl_list_tag, (void **)&sc->chain_sgl_list[i].buf,
2531 BUS_DMA_NOWAIT, &sc->chain_sgl_list[i].buf_dmamap)) {
2532 mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d DMA mem alloc failed\n",
2533 __func__, __LINE__);
2534 return (ENOMEM);
2535 }
2536
2537 bzero(sc->chain_sgl_list[i].buf, sz);
2538 bus_dmamap_load(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap, sc->chain_sgl_list[i].buf, sz,
2539 mpi3mr_memaddr_cb, &sc->chain_sgl_list[i].buf_phys, BUS_DMA_NOWAIT);
2540 mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d phys addr= %#016jx size= %d\n",
2541 __func__, __LINE__, (uintmax_t)sc->chain_sgl_list[i].buf_phys, sz);
2542 }
2543
2544 sc->chain_bitmap_sz = MPI3MR_DIV_ROUND_UP(num_chains, 8);
2545
2546 sc->chain_bitmap = malloc(sc->chain_bitmap_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2547 if (!sc->chain_bitmap) {
2548 mpi3mr_dprint(sc, MPI3MR_INFO, "Cannot alloc memory for chain bitmap\n");
2549 retval = -1;
2550 goto out_failed;
2551 }
2552 return retval;
2553
2554 out_failed:
2555 for (i = 0; i < num_chains; i++) {
2556 if (sc->chain_sgl_list[i].buf_phys != 0)
2557 bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
2558 if (sc->chain_sgl_list[i].buf != NULL)
2559 bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf, sc->chain_sgl_list[i].buf_dmamap);
2560 }
2561 if (sc->chain_sgl_list_tag != NULL)
2562 bus_dma_tag_destroy(sc->chain_sgl_list_tag);
2563 return retval;
2564 }
2565
mpi3mr_pel_alloc(struct mpi3mr_softc * sc)2566 static int mpi3mr_pel_alloc(struct mpi3mr_softc *sc)
2567 {
2568 int retval = 0;
2569
2570 if (!sc->pel_cmds.reply) {
2571 sc->pel_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2572 if (!sc->pel_cmds.reply) {
2573 printf(IOCNAME "Cannot allocate memory for pel_cmds.reply\n",
2574 sc->name);
2575 goto out_failed;
2576 }
2577 }
2578
2579 if (!sc->pel_abort_cmd.reply) {
2580 sc->pel_abort_cmd.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2581 if (!sc->pel_abort_cmd.reply) {
2582 printf(IOCNAME "Cannot allocate memory for pel_abort_cmd.reply\n",
2583 sc->name);
2584 goto out_failed;
2585 }
2586 }
2587
2588 if (!sc->pel_seq_number) {
2589 sc->pel_seq_number_sz = sizeof(Mpi3PELSeq_t);
2590 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
2591 4, 0, /* alignment, boundary */
2592 sc->dma_loaddr, /* lowaddr */
2593 BUS_SPACE_MAXADDR, /* highaddr */
2594 NULL, NULL, /* filter, filterarg */
2595 sc->pel_seq_number_sz, /* maxsize */
2596 1, /* nsegments */
2597 sc->pel_seq_number_sz, /* maxsegsize */
2598 0, /* flags */
2599 NULL, NULL, /* lockfunc, lockarg */
2600 &sc->pel_seq_num_dmatag)) {
2601 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create PEL seq number dma memory tag\n");
2602 retval = -ENOMEM;
2603 goto out_failed;
2604 }
2605
2606 if (bus_dmamem_alloc(sc->pel_seq_num_dmatag, (void **)&sc->pel_seq_number,
2607 BUS_DMA_NOWAIT, &sc->pel_seq_num_dmamap)) {
2608 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate PEL seq number kernel buffer dma memory\n");
2609 retval = -ENOMEM;
2610 goto out_failed;
2611 }
2612
2613 bzero(sc->pel_seq_number, sc->pel_seq_number_sz);
2614
2615 bus_dmamap_load(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap, sc->pel_seq_number,
2616 sc->pel_seq_number_sz, mpi3mr_memaddr_cb, &sc->pel_seq_number_dma, BUS_DMA_NOWAIT);
2617
2618 if (!sc->pel_seq_number) {
2619 printf(IOCNAME "%s:%d Cannot load PEL seq number dma memory for size: %d\n", sc->name,
2620 __func__, __LINE__, sc->pel_seq_number_sz);
2621 retval = -ENOMEM;
2622 goto out_failed;
2623 }
2624 }
2625
2626 out_failed:
2627 return retval;
2628 }
2629
2630 /**
2631 * mpi3mr_validate_fw_update - validate IOCFacts post adapter reset
2632 * @sc: Adapter instance reference
2633 *
2634 * Return zero if the new IOCFacts is compatible with previous values
2635 * else return appropriate error
2636 */
2637 static int
mpi3mr_validate_fw_update(struct mpi3mr_softc * sc)2638 mpi3mr_validate_fw_update(struct mpi3mr_softc *sc)
2639 {
2640 U16 dev_handle_bitmap_sz;
2641 U8 *removepend_bitmap;
2642
2643 if (sc->facts.reply_sz > sc->reply_sz) {
2644 mpi3mr_dprint(sc, MPI3MR_ERROR,
2645 "Cannot increase reply size from %d to %d\n",
2646 sc->reply_sz, sc->reply_sz);
2647 return -EPERM;
2648 }
2649
2650 if (sc->num_io_throttle_group != sc->facts.max_io_throttle_group) {
2651 mpi3mr_dprint(sc, MPI3MR_ERROR,
2652 "max io throttle group doesn't match old(%d), new(%d)\n",
2653 sc->num_io_throttle_group,
2654 sc->facts.max_io_throttle_group);
2655 return -EPERM;
2656 }
2657
2658 if (sc->facts.max_op_reply_q < sc->num_queues) {
2659 mpi3mr_dprint(sc, MPI3MR_ERROR,
2660 "Cannot reduce number of operational reply queues from %d to %d\n",
2661 sc->num_queues,
2662 sc->facts.max_op_reply_q);
2663 return -EPERM;
2664 }
2665
2666 if (sc->facts.max_op_req_q < sc->num_queues) {
2667 mpi3mr_dprint(sc, MPI3MR_ERROR,
2668 "Cannot reduce number of operational request queues from %d to %d\n",
2669 sc->num_queues, sc->facts.max_op_req_q);
2670 return -EPERM;
2671 }
2672
2673 dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
2674
2675 if (dev_handle_bitmap_sz > sc->dev_handle_bitmap_sz) {
2676 removepend_bitmap = realloc(sc->removepend_bitmap,
2677 dev_handle_bitmap_sz, M_MPI3MR, M_NOWAIT);
2678
2679 if (!removepend_bitmap) {
2680 mpi3mr_dprint(sc, MPI3MR_ERROR,
2681 "failed to increase removepend_bitmap sz from: %d to %d\n",
2682 sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2683 return -ENOMEM;
2684 }
2685
2686 memset(removepend_bitmap + sc->dev_handle_bitmap_sz, 0,
2687 dev_handle_bitmap_sz - sc->dev_handle_bitmap_sz);
2688 sc->removepend_bitmap = removepend_bitmap;
2689 mpi3mr_dprint(sc, MPI3MR_INFO,
2690 "increased dev_handle_bitmap_sz from %d to %d\n",
2691 sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2692 sc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
2693 }
2694
2695 return 0;
2696 }
2697
2698 /*
2699 * mpi3mr_initialize_ioc - Controller initialization
2700 * @dev: pointer to device struct
2701 *
2702 * This function allocates the controller wide resources and brings
2703 * the controller to operational state
2704 *
2705 * Return: 0 on success and proper error codes on failure
2706 */
mpi3mr_initialize_ioc(struct mpi3mr_softc * sc,U8 init_type)2707 int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
2708 {
2709 int retval = 0;
2710 enum mpi3mr_iocstate ioc_state;
2711 U64 ioc_info;
2712 U32 ioc_status, ioc_control, i, timeout;
2713 Mpi3IOCFactsData_t facts_data;
2714 char str[32];
2715 U32 size;
2716
2717 sc->cpu_count = mp_ncpus;
2718
2719 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
2720 ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
2721 ioc_info = mpi3mr_regread64(sc, MPI3_SYSIF_IOC_INFO_LOW_OFFSET);
2722
2723 mpi3mr_dprint(sc, MPI3MR_INFO, "SOD ioc_status: 0x%x ioc_control: 0x%x "
2724 "ioc_info: 0x%lx\n", ioc_status, ioc_control, ioc_info);
2725
2726 /*The timeout value is in 2sec unit, changing it to seconds*/
2727 sc->ready_timeout =
2728 ((ioc_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
2729 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
2730
2731 ioc_state = mpi3mr_get_iocstate(sc);
2732
2733 mpi3mr_dprint(sc, MPI3MR_INFO, "IOC state: %s IOC ready timeout: %d\n",
2734 mpi3mr_iocstate_name(ioc_state), sc->ready_timeout);
2735
2736 if (ioc_state == MRIOC_STATE_BECOMING_READY ||
2737 ioc_state == MRIOC_STATE_RESET_REQUESTED) {
2738 timeout = sc->ready_timeout * 10;
2739 do {
2740 DELAY(1000 * 100);
2741 } while (--timeout);
2742
2743 ioc_state = mpi3mr_get_iocstate(sc);
2744 mpi3mr_dprint(sc, MPI3MR_INFO,
2745 "IOC in %s state after waiting for reset time\n",
2746 mpi3mr_iocstate_name(ioc_state));
2747 }
2748
2749 if (ioc_state == MRIOC_STATE_READY) {
2750 retval = mpi3mr_mur_ioc(sc, MPI3MR_RESET_FROM_BRINGUP);
2751 if (retval) {
2752 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to MU reset IOC, error 0x%x\n",
2753 retval);
2754 }
2755 ioc_state = mpi3mr_get_iocstate(sc);
2756 }
2757
2758 if (ioc_state != MRIOC_STATE_RESET) {
2759 mpi3mr_print_fault_info(sc);
2760 mpi3mr_dprint(sc, MPI3MR_ERROR, "issuing soft reset to bring to reset state\n");
2761 retval = mpi3mr_issue_reset(sc,
2762 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
2763 MPI3MR_RESET_FROM_BRINGUP);
2764 if (retval) {
2765 mpi3mr_dprint(sc, MPI3MR_ERROR,
2766 "%s :Failed to soft reset IOC, error 0x%d\n",
2767 __func__, retval);
2768 goto out_failed;
2769 }
2770 }
2771
2772 ioc_state = mpi3mr_get_iocstate(sc);
2773
2774 if (ioc_state != MRIOC_STATE_RESET) {
2775 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot bring IOC to reset state\n");
2776 goto out_failed;
2777 }
2778
2779 retval = mpi3mr_setup_admin_qpair(sc);
2780 if (retval) {
2781 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup Admin queues, error 0x%x\n",
2782 retval);
2783 goto out_failed;
2784 }
2785
2786 retval = mpi3mr_bring_ioc_ready(sc);
2787 if (retval) {
2788 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to bring IOC ready, error 0x%x\n",
2789 retval);
2790 goto out_failed;
2791 }
2792
2793 if (init_type == MPI3MR_INIT_TYPE_INIT) {
2794 retval = mpi3mr_alloc_interrupts(sc, 1);
2795 if (retval) {
2796 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, error 0x%x\n",
2797 retval);
2798 goto out_failed;
2799 }
2800
2801 retval = mpi3mr_setup_irqs(sc);
2802 if (retval) {
2803 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup ISR, error 0x%x\n",
2804 retval);
2805 goto out_failed;
2806 }
2807 }
2808
2809 mpi3mr_enable_interrupts(sc);
2810
2811 if (init_type == MPI3MR_INIT_TYPE_INIT) {
2812 mtx_init(&sc->mpi3mr_mtx, "SIM lock", NULL, MTX_DEF);
2813 mtx_init(&sc->io_lock, "IO lock", NULL, MTX_DEF);
2814 mtx_init(&sc->admin_req_lock, "Admin Request Queue lock", NULL, MTX_SPIN);
2815 mtx_init(&sc->reply_free_q_lock, "Reply free Queue lock", NULL, MTX_SPIN);
2816 mtx_init(&sc->sense_buf_q_lock, "Sense buffer Queue lock", NULL, MTX_SPIN);
2817 mtx_init(&sc->chain_buf_lock, "Chain buffer lock", NULL, MTX_SPIN);
2818 mtx_init(&sc->cmd_pool_lock, "Command pool lock", NULL, MTX_DEF);
2819 mtx_init(&sc->fwevt_lock, "Firmware Event lock", NULL, MTX_DEF);
2820 mtx_init(&sc->target_lock, "Target lock", NULL, MTX_SPIN);
2821 mtx_init(&sc->reset_mutex, "Reset lock", NULL, MTX_DEF);
2822
2823 mtx_init(&sc->init_cmds.completion.lock, "Init commands lock", NULL, MTX_DEF);
2824 sc->init_cmds.reply = NULL;
2825 sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2826 sc->init_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2827 sc->init_cmds.host_tag = MPI3MR_HOSTTAG_INITCMDS;
2828
2829 mtx_init(&sc->ioctl_cmds.completion.lock, "IOCTL commands lock", NULL, MTX_DEF);
2830 sc->ioctl_cmds.reply = NULL;
2831 sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED;
2832 sc->ioctl_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2833 sc->ioctl_cmds.host_tag = MPI3MR_HOSTTAG_IOCTLCMDS;
2834
2835 mtx_init(&sc->pel_abort_cmd.completion.lock, "PEL Abort command lock", NULL, MTX_DEF);
2836 sc->pel_abort_cmd.reply = NULL;
2837 sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
2838 sc->pel_abort_cmd.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2839 sc->pel_abort_cmd.host_tag = MPI3MR_HOSTTAG_PELABORT;
2840
2841 mtx_init(&sc->host_tm_cmds.completion.lock, "TM commands lock", NULL, MTX_DEF);
2842 sc->host_tm_cmds.reply = NULL;
2843 sc->host_tm_cmds.state = MPI3MR_CMD_NOTUSED;
2844 sc->host_tm_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2845 sc->host_tm_cmds.host_tag = MPI3MR_HOSTTAG_TMS;
2846
2847 TAILQ_INIT(&sc->cmd_list_head);
2848 TAILQ_INIT(&sc->event_list);
2849 TAILQ_INIT(&sc->delayed_rmhs_list);
2850 TAILQ_INIT(&sc->delayed_evtack_cmds_list);
2851
2852 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2853 snprintf(str, 32, "Dev REMHS commands lock[%d]", i);
2854 mtx_init(&sc->dev_rmhs_cmds[i].completion.lock, str, NULL, MTX_DEF);
2855 sc->dev_rmhs_cmds[i].reply = NULL;
2856 sc->dev_rmhs_cmds[i].state = MPI3MR_CMD_NOTUSED;
2857 sc->dev_rmhs_cmds[i].dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2858 sc->dev_rmhs_cmds[i].host_tag = MPI3MR_HOSTTAG_DEVRMCMD_MIN
2859 + i;
2860 }
2861 }
2862
2863 retval = mpi3mr_issue_iocfacts(sc, &facts_data);
2864 if (retval) {
2865 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Facts, retval: 0x%x\n",
2866 retval);
2867 goto out_failed;
2868 }
2869
2870 retval = mpi3mr_process_factsdata(sc, &facts_data);
2871 if (retval) {
2872 mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC Facts data processing failedi, retval: 0x%x\n",
2873 retval);
2874 goto out_failed;
2875 }
2876
2877 sc->num_io_throttle_group = sc->facts.max_io_throttle_group;
2878 mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
2879
2880 if (init_type == MPI3MR_INIT_TYPE_RESET) {
2881 retval = mpi3mr_validate_fw_update(sc);
2882 if (retval)
2883 goto out_failed;
2884 } else {
2885 sc->reply_sz = sc->facts.reply_sz;
2886 }
2887
2888 mpi3mr_display_ioc_info(sc);
2889
2890 retval = mpi3mr_reply_alloc(sc);
2891 if (retval) {
2892 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated reply and sense buffers, retval: 0x%x\n",
2893 retval);
2894 goto out_failed;
2895 }
2896
2897 if (init_type == MPI3MR_INIT_TYPE_INIT) {
2898 retval = mpi3mr_alloc_chain_bufs(sc);
2899 if (retval) {
2900 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated chain buffers, retval: 0x%x\n",
2901 retval);
2902 goto out_failed;
2903 }
2904 }
2905
2906 retval = mpi3mr_issue_iocinit(sc);
2907 if (retval) {
2908 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Init, retval: 0x%x\n",
2909 retval);
2910 goto out_failed;
2911 }
2912
2913 mpi3mr_print_fw_pkg_ver(sc);
2914
2915 sc->reply_free_q_host_index = sc->num_reply_bufs;
2916 mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
2917 sc->reply_free_q_host_index);
2918
2919 sc->sense_buf_q_host_index = sc->num_sense_bufs;
2920
2921 mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
2922 sc->sense_buf_q_host_index);
2923
2924 if (init_type == MPI3MR_INIT_TYPE_INIT) {
2925 retval = mpi3mr_alloc_interrupts(sc, 0);
2926 if (retval) {
2927 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, retval: 0x%x\n",
2928 retval);
2929 goto out_failed;
2930 }
2931
2932 retval = mpi3mr_setup_irqs(sc);
2933 if (retval) {
2934 printf(IOCNAME "Failed to setup ISR, error: 0x%x\n",
2935 sc->name, retval);
2936 goto out_failed;
2937 }
2938
2939 mpi3mr_enable_interrupts(sc);
2940
2941 } else
2942 mpi3mr_enable_interrupts(sc);
2943
2944 retval = mpi3mr_create_op_queues(sc);
2945
2946 if (retval) {
2947 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create operational queues, error: %d\n",
2948 retval);
2949 goto out_failed;
2950 }
2951
2952 if (!sc->throttle_groups && sc->num_io_throttle_group) {
2953 mpi3mr_dprint(sc, MPI3MR_ERROR, "allocating memory for throttle groups\n");
2954 size = sizeof(struct mpi3mr_throttle_group_info);
2955 sc->throttle_groups = (struct mpi3mr_throttle_group_info *)
2956 malloc(sc->num_io_throttle_group *
2957 size, M_MPI3MR, M_NOWAIT | M_ZERO);
2958 if (!sc->throttle_groups)
2959 goto out_failed;
2960 }
2961
2962 if (init_type == MPI3MR_INIT_TYPE_RESET) {
2963 mpi3mr_dprint(sc, MPI3MR_INFO, "Re-register events\n");
2964 retval = mpi3mr_register_events(sc);
2965 if (retval) {
2966 mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to re-register events, retval: 0x%x\n",
2967 retval);
2968 goto out_failed;
2969 }
2970
2971 mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Port Enable\n");
2972 retval = mpi3mr_issue_port_enable(sc, 0);
2973 if (retval) {
2974 mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to issue port enable, retval: 0x%x\n",
2975 retval);
2976 goto out_failed;
2977 }
2978 }
2979 retval = mpi3mr_pel_alloc(sc);
2980 if (retval) {
2981 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate memory for PEL, retval: 0x%x\n",
2982 retval);
2983 goto out_failed;
2984 }
2985
2986 return retval;
2987
2988 out_failed:
2989 retval = -1;
2990 return retval;
2991 }
2992
mpi3mr_port_enable_complete(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drvrcmd)2993 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
2994 struct mpi3mr_drvr_cmd *drvrcmd)
2995 {
2996 drvrcmd->state = MPI3MR_CMD_NOTUSED;
2997 drvrcmd->callback = NULL;
2998 printf(IOCNAME "Completing Port Enable Request\n", sc->name);
2999 sc->mpi3mr_flags |= MPI3MR_FLAGS_PORT_ENABLE_DONE;
3000 mpi3mr_startup_decrement(sc->cam_sc);
3001 }
3002
mpi3mr_issue_port_enable(struct mpi3mr_softc * sc,U8 async)3003 int mpi3mr_issue_port_enable(struct mpi3mr_softc *sc, U8 async)
3004 {
3005 Mpi3PortEnableRequest_t pe_req;
3006 int retval = 0;
3007
3008 memset(&pe_req, 0, sizeof(pe_req));
3009 mtx_lock(&sc->init_cmds.completion.lock);
3010 if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
3011 retval = -1;
3012 printf(IOCNAME "Issue PortEnable: Init command is in use\n", sc->name);
3013 mtx_unlock(&sc->init_cmds.completion.lock);
3014 goto out;
3015 }
3016
3017 sc->init_cmds.state = MPI3MR_CMD_PENDING;
3018
3019 if (async) {
3020 sc->init_cmds.is_waiting = 0;
3021 sc->init_cmds.callback = mpi3mr_port_enable_complete;
3022 } else {
3023 sc->init_cmds.is_waiting = 1;
3024 sc->init_cmds.callback = NULL;
3025 init_completion(&sc->init_cmds.completion);
3026 }
3027 pe_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
3028 pe_req.Function = MPI3_FUNCTION_PORT_ENABLE;
3029
3030 printf(IOCNAME "Sending Port Enable Request\n", sc->name);
3031 retval = mpi3mr_submit_admin_cmd(sc, &pe_req, sizeof(pe_req));
3032 if (retval) {
3033 printf(IOCNAME "Issue PortEnable: Admin Post failed\n",
3034 sc->name);
3035 goto out_unlock;
3036 }
3037
3038 if (!async) {
3039 wait_for_completion_timeout(&sc->init_cmds.completion,
3040 MPI3MR_PORTENABLE_TIMEOUT);
3041 if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3042 printf(IOCNAME "Issue PortEnable: command timed out\n",
3043 sc->name);
3044 retval = -1;
3045 mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3046 goto out_unlock;
3047 }
3048 mpi3mr_port_enable_complete(sc, &sc->init_cmds);
3049 }
3050 out_unlock:
3051 mtx_unlock(&sc->init_cmds.completion.lock);
3052
3053 out:
3054 return retval;
3055 }
3056
3057 void
mpi3mr_watchdog_thread(void * arg)3058 mpi3mr_watchdog_thread(void *arg)
3059 {
3060 struct mpi3mr_softc *sc;
3061 enum mpi3mr_iocstate ioc_state;
3062 U32 fault, host_diagnostic, ioc_status;
3063
3064 sc = (struct mpi3mr_softc *)arg;
3065
3066 mpi3mr_dprint(sc, MPI3MR_XINFO, "%s\n", __func__);
3067
3068 sc->watchdog_thread_active = 1;
3069 mtx_lock(&sc->reset_mutex);
3070 for (;;) {
3071 if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ||
3072 (sc->unrecoverable == 1)) {
3073 mpi3mr_dprint(sc, MPI3MR_INFO,
3074 "Exit due to %s from %s\n",
3075 sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ? "Shutdown" :
3076 "Hardware critical error", __func__);
3077 break;
3078 }
3079 mtx_unlock(&sc->reset_mutex);
3080
3081 if ((sc->prepare_for_reset) &&
3082 ((sc->prepare_for_reset_timeout_counter++) >=
3083 MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
3084 mpi3mr_soft_reset_handler(sc,
3085 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
3086 goto sleep;
3087 }
3088
3089 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
3090
3091 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
3092 mpi3mr_soft_reset_handler(sc, MPI3MR_RESET_FROM_FIRMWARE, 0);
3093 goto sleep;
3094 }
3095
3096 ioc_state = mpi3mr_get_iocstate(sc);
3097 if (ioc_state == MRIOC_STATE_FAULT) {
3098 fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
3099 MPI3_SYSIF_FAULT_CODE_MASK;
3100
3101 host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
3102 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
3103 if (!sc->diagsave_timeout) {
3104 mpi3mr_print_fault_info(sc);
3105 mpi3mr_dprint(sc, MPI3MR_INFO,
3106 "diag save in progress\n");
3107 }
3108 if ((sc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
3109 goto sleep;
3110 }
3111 mpi3mr_print_fault_info(sc);
3112 sc->diagsave_timeout = 0;
3113
3114 if ((fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) ||
3115 (fault == MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED)) {
3116 mpi3mr_dprint(sc, MPI3MR_INFO,
3117 "Controller requires system power cycle or complete reset is needed,"
3118 "fault code: 0x%x. marking controller as unrecoverable\n", fault);
3119 sc->unrecoverable = 1;
3120 break;
3121 }
3122 if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
3123 || (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS)
3124 || (sc->reset_in_progress))
3125 break;
3126 if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
3127 mpi3mr_soft_reset_handler(sc,
3128 MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
3129 else
3130 mpi3mr_soft_reset_handler(sc,
3131 MPI3MR_RESET_FROM_FAULT_WATCH, 0);
3132
3133 }
3134
3135 if (sc->reset.type == MPI3MR_TRIGGER_SOFT_RESET) {
3136 mpi3mr_print_fault_info(sc);
3137 mpi3mr_soft_reset_handler(sc, sc->reset.reason, 1);
3138 }
3139 sleep:
3140 mtx_lock(&sc->reset_mutex);
3141 /*
3142 * Sleep for 1 second if we're not exiting, then loop to top
3143 * to poll exit status and hardware health.
3144 */
3145 if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) == 0 &&
3146 !sc->unrecoverable) {
3147 msleep(&sc->watchdog_chan, &sc->reset_mutex, PRIBIO,
3148 "mpi3mr_watchdog", 1 * hz);
3149 }
3150 }
3151 mtx_unlock(&sc->reset_mutex);
3152 sc->watchdog_thread_active = 0;
3153 mpi3mr_kproc_exit(0);
3154 }
3155
mpi3mr_display_event_data(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_rep)3156 static void mpi3mr_display_event_data(struct mpi3mr_softc *sc,
3157 Mpi3EventNotificationReply_t *event_rep)
3158 {
3159 char *desc = NULL;
3160 U16 event;
3161
3162 event = event_rep->Event;
3163
3164 switch (event) {
3165 case MPI3_EVENT_LOG_DATA:
3166 desc = "Log Data";
3167 break;
3168 case MPI3_EVENT_CHANGE:
3169 desc = "Event Change";
3170 break;
3171 case MPI3_EVENT_GPIO_INTERRUPT:
3172 desc = "GPIO Interrupt";
3173 break;
3174 case MPI3_EVENT_CABLE_MGMT:
3175 desc = "Cable Management";
3176 break;
3177 case MPI3_EVENT_ENERGY_PACK_CHANGE:
3178 desc = "Energy Pack Change";
3179 break;
3180 case MPI3_EVENT_DEVICE_ADDED:
3181 {
3182 Mpi3DevicePage0_t *event_data =
3183 (Mpi3DevicePage0_t *)event_rep->EventData;
3184 mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Added: Dev=0x%04x Form=0x%x Perst id: 0x%x\n",
3185 event_data->DevHandle, event_data->DeviceForm, event_data->PersistentID);
3186 return;
3187 }
3188 case MPI3_EVENT_DEVICE_INFO_CHANGED:
3189 {
3190 Mpi3DevicePage0_t *event_data =
3191 (Mpi3DevicePage0_t *)event_rep->EventData;
3192 mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Info Changed: Dev=0x%04x Form=0x%x\n",
3193 event_data->DevHandle, event_data->DeviceForm);
3194 return;
3195 }
3196 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
3197 {
3198 Mpi3EventDataDeviceStatusChange_t *event_data =
3199 (Mpi3EventDataDeviceStatusChange_t *)event_rep->EventData;
3200 mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Status Change: Dev=0x%04x RC=0x%x\n",
3201 event_data->DevHandle, event_data->ReasonCode);
3202 return;
3203 }
3204 case MPI3_EVENT_SAS_DISCOVERY:
3205 {
3206 Mpi3EventDataSasDiscovery_t *event_data =
3207 (Mpi3EventDataSasDiscovery_t *)event_rep->EventData;
3208 mpi3mr_dprint(sc, MPI3MR_EVENT, "SAS Discovery: (%s)",
3209 (event_data->ReasonCode == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
3210 "start" : "stop");
3211 if (event_data->DiscoveryStatus &&
3212 (sc->mpi3mr_debug & MPI3MR_EVENT)) {
3213 printf("discovery_status(0x%08x)",
3214 event_data->DiscoveryStatus);
3215
3216 }
3217
3218 if (sc->mpi3mr_debug & MPI3MR_EVENT)
3219 printf("\n");
3220 return;
3221 }
3222 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
3223 desc = "SAS Broadcast Primitive";
3224 break;
3225 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
3226 desc = "SAS Notify Primitive";
3227 break;
3228 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
3229 desc = "SAS Init Device Status Change";
3230 break;
3231 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
3232 desc = "SAS Init Table Overflow";
3233 break;
3234 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
3235 desc = "SAS Topology Change List";
3236 break;
3237 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
3238 desc = "Enclosure Device Status Change";
3239 break;
3240 case MPI3_EVENT_HARD_RESET_RECEIVED:
3241 desc = "Hard Reset Received";
3242 break;
3243 case MPI3_EVENT_SAS_PHY_COUNTER:
3244 desc = "SAS PHY Counter";
3245 break;
3246 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
3247 desc = "SAS Device Discovery Error";
3248 break;
3249 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
3250 desc = "PCIE Topology Change List";
3251 break;
3252 case MPI3_EVENT_PCIE_ENUMERATION:
3253 {
3254 Mpi3EventDataPcieEnumeration_t *event_data =
3255 (Mpi3EventDataPcieEnumeration_t *)event_rep->EventData;
3256 mpi3mr_dprint(sc, MPI3MR_EVENT, "PCIE Enumeration: (%s)",
3257 (event_data->ReasonCode ==
3258 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" :
3259 "stop");
3260 if (event_data->EnumerationStatus)
3261 mpi3mr_dprint(sc, MPI3MR_EVENT, "enumeration_status(0x%08x)",
3262 event_data->EnumerationStatus);
3263 if (sc->mpi3mr_debug & MPI3MR_EVENT)
3264 printf("\n");
3265 return;
3266 }
3267 case MPI3_EVENT_PREPARE_FOR_RESET:
3268 desc = "Prepare For Reset";
3269 break;
3270 }
3271
3272 if (!desc)
3273 return;
3274
3275 mpi3mr_dprint(sc, MPI3MR_EVENT, "%s\n", desc);
3276 }
3277
3278 struct mpi3mr_target *
mpi3mr_find_target_by_per_id(struct mpi3mr_cam_softc * cam_sc,uint16_t per_id)3279 mpi3mr_find_target_by_per_id(struct mpi3mr_cam_softc *cam_sc,
3280 uint16_t per_id)
3281 {
3282 struct mpi3mr_target *target = NULL;
3283
3284 mtx_lock_spin(&cam_sc->sc->target_lock);
3285 TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3286 if (target->per_id == per_id)
3287 break;
3288 }
3289
3290 mtx_unlock_spin(&cam_sc->sc->target_lock);
3291 return target;
3292 }
3293
3294 struct mpi3mr_target *
mpi3mr_find_target_by_dev_handle(struct mpi3mr_cam_softc * cam_sc,uint16_t handle)3295 mpi3mr_find_target_by_dev_handle(struct mpi3mr_cam_softc *cam_sc,
3296 uint16_t handle)
3297 {
3298 struct mpi3mr_target *target = NULL;
3299
3300 mtx_lock_spin(&cam_sc->sc->target_lock);
3301 TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3302 if (target->dev_handle == handle)
3303 break;
3304
3305 }
3306 mtx_unlock_spin(&cam_sc->sc->target_lock);
3307 return target;
3308 }
3309
mpi3mr_update_device(struct mpi3mr_softc * sc,struct mpi3mr_target * tgtdev,Mpi3DevicePage0_t * dev_pg0,bool is_added)3310 void mpi3mr_update_device(struct mpi3mr_softc *sc,
3311 struct mpi3mr_target *tgtdev, Mpi3DevicePage0_t *dev_pg0,
3312 bool is_added)
3313 {
3314 U16 flags = 0;
3315
3316 tgtdev->per_id = (dev_pg0->PersistentID);
3317 tgtdev->dev_handle = (dev_pg0->DevHandle);
3318 tgtdev->dev_type = dev_pg0->DeviceForm;
3319 tgtdev->encl_handle = (dev_pg0->EnclosureHandle);
3320 tgtdev->parent_handle = (dev_pg0->ParentDevHandle);
3321 tgtdev->slot = (dev_pg0->Slot);
3322 tgtdev->qdepth = (dev_pg0->QueueDepth);
3323 tgtdev->wwid = (dev_pg0->WWID);
3324
3325 flags = (dev_pg0->Flags);
3326 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
3327 if (is_added == true)
3328 tgtdev->io_throttle_enabled =
3329 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
3330
3331 switch (dev_pg0->AccessStatus) {
3332 case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
3333 case MPI3_DEVICE0_ASTATUS_PREPARE:
3334 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
3335 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
3336 break;
3337 default:
3338 tgtdev->is_hidden = 1;
3339 break;
3340 }
3341
3342 switch (tgtdev->dev_type) {
3343 case MPI3_DEVICE_DEVFORM_SAS_SATA:
3344 {
3345 Mpi3Device0SasSataFormat_t *sasinf =
3346 &dev_pg0->DeviceSpecific.SasSataFormat;
3347 U16 dev_info = (sasinf->DeviceInfo);
3348 tgtdev->dev_spec.sassata_inf.dev_info = dev_info;
3349 tgtdev->dev_spec.sassata_inf.sas_address =
3350 (sasinf->SASAddress);
3351 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
3352 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
3353 tgtdev->is_hidden = 1;
3354 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
3355 MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
3356 tgtdev->is_hidden = 1;
3357 break;
3358 }
3359 case MPI3_DEVICE_DEVFORM_PCIE:
3360 {
3361 Mpi3Device0PcieFormat_t *pcieinf =
3362 &dev_pg0->DeviceSpecific.PcieFormat;
3363 U16 dev_info = (pcieinf->DeviceInfo);
3364
3365 tgtdev->q_depth = dev_pg0->QueueDepth;
3366 tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
3367 tgtdev->dev_spec.pcie_inf.capb =
3368 (pcieinf->Capabilities);
3369 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
3370 if (dev_pg0->AccessStatus == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
3371 tgtdev->dev_spec.pcie_inf.mdts =
3372 (pcieinf->MaximumDataTransferSize);
3373 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->PageSize;
3374 tgtdev->dev_spec.pcie_inf.reset_to =
3375 pcieinf->ControllerResetTO;
3376 tgtdev->dev_spec.pcie_inf.abort_to =
3377 pcieinf->NVMeAbortTO;
3378 }
3379 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
3380 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
3381
3382 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3383 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
3384 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3385 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
3386 tgtdev->is_hidden = 1;
3387
3388 break;
3389 }
3390 case MPI3_DEVICE_DEVFORM_VD:
3391 {
3392 Mpi3Device0VdFormat_t *vdinf =
3393 &dev_pg0->DeviceSpecific.VdFormat;
3394 struct mpi3mr_throttle_group_info *tg = NULL;
3395
3396 tgtdev->dev_spec.vol_inf.state = vdinf->VdState;
3397 if (vdinf->VdState == MPI3_DEVICE0_VD_STATE_OFFLINE)
3398 tgtdev->is_hidden = 1;
3399 tgtdev->dev_spec.vol_inf.tg_id = vdinf->IOThrottleGroup;
3400 tgtdev->dev_spec.vol_inf.tg_high =
3401 vdinf->IOThrottleGroupHigh * 2048;
3402 tgtdev->dev_spec.vol_inf.tg_low =
3403 vdinf->IOThrottleGroupLow * 2048;
3404 if (vdinf->IOThrottleGroup < sc->num_io_throttle_group) {
3405 tg = sc->throttle_groups + vdinf->IOThrottleGroup;
3406 tg->id = vdinf->IOThrottleGroup;
3407 tg->high = tgtdev->dev_spec.vol_inf.tg_high;
3408 tg->low = tgtdev->dev_spec.vol_inf.tg_low;
3409 if (is_added == true)
3410 tg->fw_qd = tgtdev->q_depth;
3411 tg->modified_qd = tgtdev->q_depth;
3412 }
3413 tgtdev->dev_spec.vol_inf.tg = tg;
3414 tgtdev->throttle_group = tg;
3415 break;
3416 }
3417 default:
3418 goto out;
3419 }
3420
3421 out:
3422 return;
3423 }
3424
mpi3mr_create_device(struct mpi3mr_softc * sc,Mpi3DevicePage0_t * dev_pg0)3425 int mpi3mr_create_device(struct mpi3mr_softc *sc,
3426 Mpi3DevicePage0_t *dev_pg0)
3427 {
3428 int retval = 0;
3429 struct mpi3mr_target *target = NULL;
3430 U16 per_id = 0;
3431
3432 per_id = dev_pg0->PersistentID;
3433
3434 mtx_lock_spin(&sc->target_lock);
3435 TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
3436 if (target->per_id == per_id) {
3437 target->state = MPI3MR_DEV_CREATED;
3438 break;
3439 }
3440 }
3441 mtx_unlock_spin(&sc->target_lock);
3442
3443 if (target) {
3444 mpi3mr_update_device(sc, target, dev_pg0, true);
3445 } else {
3446 target = malloc(sizeof(*target), M_MPI3MR,
3447 M_NOWAIT | M_ZERO);
3448
3449 if (target == NULL) {
3450 retval = -1;
3451 goto out;
3452 }
3453
3454 target->exposed_to_os = 0;
3455 mpi3mr_update_device(sc, target, dev_pg0, true);
3456 mtx_lock_spin(&sc->target_lock);
3457 TAILQ_INSERT_TAIL(&sc->cam_sc->tgt_list, target, tgt_next);
3458 target->state = MPI3MR_DEV_CREATED;
3459 mtx_unlock_spin(&sc->target_lock);
3460 }
3461 out:
3462 return retval;
3463 }
3464
3465 /**
3466 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
3467 * @sc: Adapter instance reference
3468 * @drv_cmd: Internal command tracker
3469 *
3470 * Issues a target reset TM to the firmware from the device
3471 * removal TM pend list or retry the removal handshake sequence
3472 * based on the IOU control request IOC status.
3473 *
3474 * Return: Nothing
3475 */
mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drv_cmd)3476 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
3477 struct mpi3mr_drvr_cmd *drv_cmd)
3478 {
3479 U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3480 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3481
3482 mpi3mr_dprint(sc, MPI3MR_EVENT,
3483 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
3484 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
3485 drv_cmd->ioc_loginfo);
3486 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3487 if (drv_cmd->retry_count < MPI3MR_DEVRMHS_RETRYCOUNT) {
3488 drv_cmd->retry_count++;
3489 mpi3mr_dprint(sc, MPI3MR_EVENT,
3490 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
3491 __func__, drv_cmd->dev_handle,
3492 drv_cmd->retry_count);
3493 mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle,
3494 drv_cmd, drv_cmd->iou_rc);
3495 return;
3496 }
3497 mpi3mr_dprint(sc, MPI3MR_ERROR,
3498 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
3499 __func__, drv_cmd->dev_handle);
3500 } else {
3501 mpi3mr_dprint(sc, MPI3MR_INFO,
3502 "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
3503 __func__, drv_cmd->dev_handle);
3504 mpi3mr_clear_bit(drv_cmd->dev_handle, sc->removepend_bitmap);
3505 }
3506
3507 if (!TAILQ_EMPTY(&sc->delayed_rmhs_list)) {
3508 delayed_dev_rmhs = TAILQ_FIRST(&sc->delayed_rmhs_list);
3509 drv_cmd->dev_handle = delayed_dev_rmhs->handle;
3510 drv_cmd->retry_count = 0;
3511 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
3512 mpi3mr_dprint(sc, MPI3MR_EVENT,
3513 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
3514 __func__, drv_cmd->dev_handle);
3515 mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle, drv_cmd,
3516 drv_cmd->iou_rc);
3517 TAILQ_REMOVE(&sc->delayed_rmhs_list, delayed_dev_rmhs, list);
3518 free(delayed_dev_rmhs, M_MPI3MR);
3519 return;
3520 }
3521 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3522 drv_cmd->callback = NULL;
3523 drv_cmd->retry_count = 0;
3524 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3525 mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3526 }
3527
3528 /**
3529 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
3530 * @sc: Adapter instance reference
3531 * @drv_cmd: Internal command tracker
3532 *
3533 * Issues a target reset TM to the firmware from the device
3534 * removal TM pend list or issue IO Unit control request as
3535 * part of device removal or hidden acknowledgment handshake.
3536 *
3537 * Return: Nothing
3538 */
mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drv_cmd)3539 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
3540 struct mpi3mr_drvr_cmd *drv_cmd)
3541 {
3542 Mpi3IoUnitControlRequest_t iou_ctrl;
3543 U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3544 Mpi3SCSITaskMgmtReply_t *tm_reply = NULL;
3545 int retval;
3546
3547 if (drv_cmd->state & MPI3MR_CMD_REPLYVALID)
3548 tm_reply = (Mpi3SCSITaskMgmtReply_t *)drv_cmd->reply;
3549
3550 if (tm_reply)
3551 printf(IOCNAME
3552 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
3553 sc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
3554 drv_cmd->ioc_loginfo,
3555 le32toh(tm_reply->TerminationCount));
3556
3557 printf(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
3558 sc->name, drv_cmd->dev_handle, cmd_idx);
3559
3560 memset(&iou_ctrl, 0, sizeof(iou_ctrl));
3561
3562 drv_cmd->state = MPI3MR_CMD_PENDING;
3563 drv_cmd->is_waiting = 0;
3564 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
3565 iou_ctrl.Operation = drv_cmd->iou_rc;
3566 iou_ctrl.Param16[0] = htole16(drv_cmd->dev_handle);
3567 iou_ctrl.HostTag = htole16(drv_cmd->host_tag);
3568 iou_ctrl.Function = MPI3_FUNCTION_IO_UNIT_CONTROL;
3569
3570 retval = mpi3mr_submit_admin_cmd(sc, &iou_ctrl, sizeof(iou_ctrl));
3571 if (retval) {
3572 printf(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
3573 sc->name);
3574 goto out_failed;
3575 }
3576
3577 return;
3578 out_failed:
3579 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3580 drv_cmd->callback = NULL;
3581 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3582 drv_cmd->retry_count = 0;
3583 mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3584 }
3585
3586 /**
3587 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
3588 * @sc: Adapter instance reference
3589 * @handle: Device handle
3590 * @cmdparam: Internal command tracker
3591 * @iou_rc: IO Unit reason code
3592 *
3593 * Issues a target reset TM to the firmware or add it to a pend
3594 * list as part of device removal or hidden acknowledgment
3595 * handshake.
3596 *
3597 * Return: Nothing
3598 */
mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc * sc,U16 handle,struct mpi3mr_drvr_cmd * cmdparam,U8 iou_rc)3599 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
3600 struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc)
3601 {
3602 Mpi3SCSITaskMgmtRequest_t tm_req;
3603 int retval = 0;
3604 U16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
3605 U8 retrycount = 5;
3606 struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3607 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3608 struct mpi3mr_target *tgtdev = NULL;
3609
3610 mtx_lock_spin(&sc->target_lock);
3611 TAILQ_FOREACH(tgtdev, &sc->cam_sc->tgt_list, tgt_next) {
3612 if ((tgtdev->dev_handle == handle) &&
3613 (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) {
3614 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED;
3615 break;
3616 }
3617 }
3618 mtx_unlock_spin(&sc->target_lock);
3619
3620 if (drv_cmd)
3621 goto issue_cmd;
3622 do {
3623 cmd_idx = mpi3mr_find_first_zero_bit(sc->devrem_bitmap,
3624 MPI3MR_NUM_DEVRMCMD);
3625 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
3626 if (!mpi3mr_test_and_set_bit(cmd_idx, sc->devrem_bitmap))
3627 break;
3628 cmd_idx = MPI3MR_NUM_DEVRMCMD;
3629 }
3630 } while (retrycount--);
3631
3632 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
3633 delayed_dev_rmhs = malloc(sizeof(*delayed_dev_rmhs),M_MPI3MR,
3634 M_ZERO|M_NOWAIT);
3635
3636 if (!delayed_dev_rmhs)
3637 return;
3638 delayed_dev_rmhs->handle = handle;
3639 delayed_dev_rmhs->iou_rc = iou_rc;
3640 TAILQ_INSERT_TAIL(&(sc->delayed_rmhs_list), delayed_dev_rmhs, list);
3641 mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
3642 __func__, handle);
3643
3644
3645 return;
3646 }
3647 drv_cmd = &sc->dev_rmhs_cmds[cmd_idx];
3648
3649 issue_cmd:
3650 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3651 mpi3mr_dprint(sc, MPI3MR_EVENT,
3652 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
3653 __func__, handle, cmd_idx);
3654
3655 memset(&tm_req, 0, sizeof(tm_req));
3656 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3657 mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Issue TM: Command is in use\n", __func__);
3658 goto out;
3659 }
3660 drv_cmd->state = MPI3MR_CMD_PENDING;
3661 drv_cmd->is_waiting = 0;
3662 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
3663 drv_cmd->dev_handle = handle;
3664 drv_cmd->iou_rc = iou_rc;
3665 tm_req.DevHandle = htole16(handle);
3666 tm_req.TaskType = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3667 tm_req.HostTag = htole16(drv_cmd->host_tag);
3668 tm_req.TaskHostTag = htole16(MPI3MR_HOSTTAG_INVALID);
3669 tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3670
3671 mpi3mr_set_bit(handle, sc->removepend_bitmap);
3672 retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
3673 if (retval) {
3674 mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :Issue DevRmHsTM: Admin Post failed\n",
3675 __func__);
3676 goto out_failed;
3677 }
3678 out:
3679 return;
3680 out_failed:
3681 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3682 drv_cmd->callback = NULL;
3683 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3684 drv_cmd->retry_count = 0;
3685 mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3686 }
3687
3688 /**
3689 * mpi3mr_complete_evt_ack - Event ack request completion
3690 * @sc: Adapter instance reference
3691 * @drv_cmd: Internal command tracker
3692 *
3693 * This is the completion handler for non blocking event
3694 * acknowledgment sent to the firmware and this will issue any
3695 * pending event acknowledgment request.
3696 *
3697 * Return: Nothing
3698 */
mpi3mr_complete_evt_ack(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drv_cmd)3699 static void mpi3mr_complete_evt_ack(struct mpi3mr_softc *sc,
3700 struct mpi3mr_drvr_cmd *drv_cmd)
3701 {
3702 U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3703 struct delayed_evtack_node *delayed_evtack = NULL;
3704
3705 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3706 mpi3mr_dprint(sc, MPI3MR_EVENT,
3707 "%s: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", __func__,
3708 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3709 drv_cmd->ioc_loginfo);
3710 }
3711
3712 if (!TAILQ_EMPTY(&sc->delayed_evtack_cmds_list)) {
3713 delayed_evtack = TAILQ_FIRST(&sc->delayed_evtack_cmds_list);
3714 mpi3mr_dprint(sc, MPI3MR_EVENT,
3715 "%s: processing delayed event ack for event %d\n",
3716 __func__, delayed_evtack->event);
3717 mpi3mr_send_evt_ack(sc, delayed_evtack->event, drv_cmd,
3718 delayed_evtack->event_ctx);
3719 TAILQ_REMOVE(&sc->delayed_evtack_cmds_list, delayed_evtack, list);
3720 free(delayed_evtack, M_MPI3MR);
3721 return;
3722 }
3723 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3724 drv_cmd->callback = NULL;
3725 mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3726 }
3727
3728 /**
3729 * mpi3mr_send_evt_ack - Issue event acknwoledgment request
3730 * @sc: Adapter instance reference
3731 * @event: MPI3 event id
3732 * @cmdparam: Internal command tracker
3733 * @event_ctx: Event context
3734 *
3735 * Issues event acknowledgment request to the firmware if there
3736 * is a free command to send the event ack else it to a pend
3737 * list so that it will be processed on a completion of a prior
3738 * event acknowledgment .
3739 *
3740 * Return: Nothing
3741 */
mpi3mr_send_evt_ack(struct mpi3mr_softc * sc,U8 event,struct mpi3mr_drvr_cmd * cmdparam,U32 event_ctx)3742 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
3743 struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx)
3744 {
3745 Mpi3EventAckRequest_t evtack_req;
3746 int retval = 0;
3747 U8 retrycount = 5;
3748 U16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
3749 struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3750 struct delayed_evtack_node *delayed_evtack = NULL;
3751
3752 if (drv_cmd)
3753 goto issue_cmd;
3754 do {
3755 cmd_idx = mpi3mr_find_first_zero_bit(sc->evtack_cmds_bitmap,
3756 MPI3MR_NUM_EVTACKCMD);
3757 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
3758 if (!mpi3mr_test_and_set_bit(cmd_idx,
3759 sc->evtack_cmds_bitmap))
3760 break;
3761 cmd_idx = MPI3MR_NUM_EVTACKCMD;
3762 }
3763 } while (retrycount--);
3764
3765 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
3766 delayed_evtack = malloc(sizeof(*delayed_evtack),M_MPI3MR,
3767 M_ZERO | M_NOWAIT);
3768 if (!delayed_evtack)
3769 return;
3770 delayed_evtack->event = event;
3771 delayed_evtack->event_ctx = event_ctx;
3772 TAILQ_INSERT_TAIL(&(sc->delayed_evtack_cmds_list), delayed_evtack, list);
3773 mpi3mr_dprint(sc, MPI3MR_EVENT, "%s : Event ack for event:%d is postponed\n",
3774 __func__, event);
3775 return;
3776 }
3777 drv_cmd = &sc->evtack_cmds[cmd_idx];
3778
3779 issue_cmd:
3780 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3781
3782 memset(&evtack_req, 0, sizeof(evtack_req));
3783 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3784 mpi3mr_dprint(sc, MPI3MR_EVENT, "%s: Command is in use\n", __func__);
3785 goto out;
3786 }
3787 drv_cmd->state = MPI3MR_CMD_PENDING;
3788 drv_cmd->is_waiting = 0;
3789 drv_cmd->callback = mpi3mr_complete_evt_ack;
3790 evtack_req.HostTag = htole16(drv_cmd->host_tag);
3791 evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
3792 evtack_req.Event = event;
3793 evtack_req.EventContext = htole32(event_ctx);
3794 retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
3795 sizeof(evtack_req));
3796
3797 if (retval) {
3798 mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Admin Post failed\n", __func__);
3799 goto out_failed;
3800 }
3801 out:
3802 return;
3803 out_failed:
3804 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3805 drv_cmd->callback = NULL;
3806 mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3807 }
3808
3809 /*
3810 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
3811 * @sc: Adapter instance reference
3812 * @event_reply: Event data
3813 *
3814 * Checks for the reason code and based on that either block I/O
3815 * to device, or unblock I/O to the device, or start the device
3816 * removal handshake with reason as remove with the firmware for
3817 * PCIe devices.
3818 *
3819 * Return: Nothing
3820 */
mpi3mr_pcietopochg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)3821 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_softc *sc,
3822 Mpi3EventNotificationReply_t *event_reply)
3823 {
3824 Mpi3EventDataPcieTopologyChangeList_t *topo_evt =
3825 (Mpi3EventDataPcieTopologyChangeList_t *) event_reply->EventData;
3826 int i;
3827 U16 handle;
3828 U8 reason_code;
3829 struct mpi3mr_target *tgtdev = NULL;
3830
3831 for (i = 0; i < topo_evt->NumEntries; i++) {
3832 handle = le16toh(topo_evt->PortEntry[i].AttachedDevHandle);
3833 if (!handle)
3834 continue;
3835 reason_code = topo_evt->PortEntry[i].PortStatus;
3836 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3837 switch (reason_code) {
3838 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
3839 if (tgtdev) {
3840 tgtdev->dev_removed = 1;
3841 tgtdev->dev_removedelay = 0;
3842 mpi3mr_atomic_set(&tgtdev->block_io, 0);
3843 }
3844 mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3845 MPI3_CTRL_OP_REMOVE_DEVICE);
3846 break;
3847 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
3848 if (tgtdev) {
3849 tgtdev->dev_removedelay = 1;
3850 mpi3mr_atomic_inc(&tgtdev->block_io);
3851 }
3852 break;
3853 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
3854 if (tgtdev &&
3855 tgtdev->dev_removedelay) {
3856 tgtdev->dev_removedelay = 0;
3857 if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3858 mpi3mr_atomic_dec(&tgtdev->block_io);
3859 }
3860 break;
3861 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
3862 default:
3863 break;
3864 }
3865 }
3866 }
3867
3868 /**
3869 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
3870 * @sc: Adapter instance reference
3871 * @event_reply: Event data
3872 *
3873 * Checks for the reason code and based on that either block I/O
3874 * to device, or unblock I/O to the device, or start the device
3875 * removal handshake with reason as remove with the firmware for
3876 * SAS/SATA devices.
3877 *
3878 * Return: Nothing
3879 */
mpi3mr_sastopochg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)3880 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_softc *sc,
3881 Mpi3EventNotificationReply_t *event_reply)
3882 {
3883 Mpi3EventDataSasTopologyChangeList_t *topo_evt =
3884 (Mpi3EventDataSasTopologyChangeList_t *)event_reply->EventData;
3885 int i;
3886 U16 handle;
3887 U8 reason_code;
3888 struct mpi3mr_target *tgtdev = NULL;
3889
3890 for (i = 0; i < topo_evt->NumEntries; i++) {
3891 handle = le16toh(topo_evt->PhyEntry[i].AttachedDevHandle);
3892 if (!handle)
3893 continue;
3894 reason_code = topo_evt->PhyEntry[i].Status &
3895 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
3896 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3897 switch (reason_code) {
3898 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
3899 if (tgtdev) {
3900 tgtdev->dev_removed = 1;
3901 tgtdev->dev_removedelay = 0;
3902 mpi3mr_atomic_set(&tgtdev->block_io, 0);
3903 }
3904 mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3905 MPI3_CTRL_OP_REMOVE_DEVICE);
3906 break;
3907 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
3908 if (tgtdev) {
3909 tgtdev->dev_removedelay = 1;
3910 mpi3mr_atomic_inc(&tgtdev->block_io);
3911 }
3912 break;
3913 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
3914 if (tgtdev &&
3915 tgtdev->dev_removedelay) {
3916 tgtdev->dev_removedelay = 0;
3917 if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3918 mpi3mr_atomic_dec(&tgtdev->block_io);
3919 }
3920 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
3921 default:
3922 break;
3923 }
3924 }
3925
3926 }
3927 /**
3928 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
3929 * @sc: Adapter instance reference
3930 * @event_reply: Event data
3931 *
3932 * Checks for the reason code and based on that either block I/O
3933 * to device, or unblock I/O to the device, or start the device
3934 * removal handshake with reason as remove/hide acknowledgment
3935 * with the firmware.
3936 *
3937 * Return: Nothing
3938 */
mpi3mr_devstatuschg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)3939 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_softc *sc,
3940 Mpi3EventNotificationReply_t *event_reply)
3941 {
3942 U16 dev_handle = 0;
3943 U8 ublock = 0, block = 0, hide = 0, uhide = 0, delete = 0, remove = 0;
3944 struct mpi3mr_target *tgtdev = NULL;
3945 Mpi3EventDataDeviceStatusChange_t *evtdata =
3946 (Mpi3EventDataDeviceStatusChange_t *) event_reply->EventData;
3947
3948 dev_handle = le16toh(evtdata->DevHandle);
3949
3950 switch (evtdata->ReasonCode) {
3951 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
3952 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
3953 block = 1;
3954 break;
3955 case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
3956 delete = 1;
3957 hide = 1;
3958 break;
3959 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
3960 uhide = 1;
3961 break;
3962 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
3963 delete = 1;
3964 remove = 1;
3965 break;
3966 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
3967 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
3968 ublock = 1;
3969 break;
3970 default:
3971 break;
3972 }
3973
3974 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
3975
3976 if (!tgtdev) {
3977 mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :target with dev_handle:0x%x not found\n",
3978 __func__, dev_handle);
3979 return;
3980 }
3981
3982 if (block)
3983 mpi3mr_atomic_inc(&tgtdev->block_io);
3984
3985 if (hide)
3986 tgtdev->is_hidden = hide;
3987
3988 if (uhide) {
3989 tgtdev->is_hidden = 0;
3990 tgtdev->dev_removed = 0;
3991 }
3992
3993 if (delete)
3994 tgtdev->dev_removed = 1;
3995
3996 if (ublock) {
3997 if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3998 mpi3mr_atomic_dec(&tgtdev->block_io);
3999 }
4000
4001 if (remove) {
4002 mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
4003 MPI3_CTRL_OP_REMOVE_DEVICE);
4004 }
4005 if (hide)
4006 mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
4007 MPI3_CTRL_OP_HIDDEN_ACK);
4008 }
4009
4010 /**
4011 * mpi3mr_preparereset_evt_th - Prepareforreset evt tophalf
4012 * @sc: Adapter instance reference
4013 * @event_reply: Event data
4014 *
4015 * Blocks and unblocks host level I/O based on the reason code
4016 *
4017 * Return: Nothing
4018 */
mpi3mr_preparereset_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4019 static void mpi3mr_preparereset_evt_th(struct mpi3mr_softc *sc,
4020 Mpi3EventNotificationReply_t *event_reply)
4021 {
4022 Mpi3EventDataPrepareForReset_t *evtdata =
4023 (Mpi3EventDataPrepareForReset_t *)event_reply->EventData;
4024
4025 if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_START) {
4026 mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=START\n",
4027 __func__);
4028 if (sc->prepare_for_reset)
4029 return;
4030 sc->prepare_for_reset = 1;
4031 sc->prepare_for_reset_timeout_counter = 0;
4032 } else if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
4033 mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=ABORT\n",
4034 __func__);
4035 sc->prepare_for_reset = 0;
4036 sc->prepare_for_reset_timeout_counter = 0;
4037 }
4038 if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4039 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4040 mpi3mr_send_evt_ack(sc, event_reply->Event, NULL,
4041 le32toh(event_reply->EventContext));
4042 }
4043
4044 /**
4045 * mpi3mr_energypackchg_evt_th - Energypackchange evt tophalf
4046 * @sc: Adapter instance reference
4047 * @event_reply: Event data
4048 *
4049 * Identifies the new shutdown timeout value and update.
4050 *
4051 * Return: Nothing
4052 */
mpi3mr_energypackchg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4053 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_softc *sc,
4054 Mpi3EventNotificationReply_t *event_reply)
4055 {
4056 Mpi3EventDataEnergyPackChange_t *evtdata =
4057 (Mpi3EventDataEnergyPackChange_t *)event_reply->EventData;
4058 U16 shutdown_timeout = le16toh(evtdata->ShutdownTimeout);
4059
4060 if (shutdown_timeout <= 0) {
4061 mpi3mr_dprint(sc, MPI3MR_ERROR,
4062 "%s :Invalid Shutdown Timeout received = %d\n",
4063 __func__, shutdown_timeout);
4064 return;
4065 }
4066
4067 mpi3mr_dprint(sc, MPI3MR_EVENT,
4068 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
4069 __func__, sc->facts.shutdown_timeout, shutdown_timeout);
4070 sc->facts.shutdown_timeout = shutdown_timeout;
4071 }
4072
4073 /**
4074 * mpi3mr_cablemgmt_evt_th - Cable mgmt evt tophalf
4075 * @sc: Adapter instance reference
4076 * @event_reply: Event data
4077 *
4078 * Displays Cable manegemt event details.
4079 *
4080 * Return: Nothing
4081 */
mpi3mr_cablemgmt_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4082 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_softc *sc,
4083 Mpi3EventNotificationReply_t *event_reply)
4084 {
4085 Mpi3EventDataCableManagement_t *evtdata =
4086 (Mpi3EventDataCableManagement_t *)event_reply->EventData;
4087
4088 switch (evtdata->Status) {
4089 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
4090 {
4091 mpi3mr_dprint(sc, MPI3MR_INFO, "An active cable with ReceptacleID %d cannot be powered.\n"
4092 "Devices connected to this cable are not detected.\n"
4093 "This cable requires %d mW of power.\n",
4094 evtdata->ReceptacleID,
4095 le32toh(evtdata->ActiveCablePowerRequirement));
4096 break;
4097 }
4098 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
4099 {
4100 mpi3mr_dprint(sc, MPI3MR_INFO, "A cable with ReceptacleID %d is not running at optimal speed\n",
4101 evtdata->ReceptacleID);
4102 break;
4103 }
4104 default:
4105 break;
4106 }
4107 }
4108
4109 /**
4110 * mpi3mr_process_events - Event's toph-half handler
4111 * @sc: Adapter instance reference
4112 * @event_reply: Event data
4113 *
4114 * Top half of event processing.
4115 *
4116 * Return: Nothing
4117 */
mpi3mr_process_events(struct mpi3mr_softc * sc,uintptr_t data,Mpi3EventNotificationReply_t * event_reply)4118 static void mpi3mr_process_events(struct mpi3mr_softc *sc,
4119 uintptr_t data, Mpi3EventNotificationReply_t *event_reply)
4120 {
4121 U16 evt_type;
4122 bool ack_req = 0, process_evt_bh = 0;
4123 struct mpi3mr_fw_event_work *fw_event;
4124 U16 sz;
4125
4126 if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
4127 goto out;
4128
4129 if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4130 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4131 ack_req = 1;
4132
4133 evt_type = event_reply->Event;
4134
4135 switch (evt_type) {
4136 case MPI3_EVENT_DEVICE_ADDED:
4137 {
4138 Mpi3DevicePage0_t *dev_pg0 =
4139 (Mpi3DevicePage0_t *) event_reply->EventData;
4140 if (mpi3mr_create_device(sc, dev_pg0))
4141 mpi3mr_dprint(sc, MPI3MR_ERROR,
4142 "%s :Failed to add device in the device add event\n",
4143 __func__);
4144 else
4145 process_evt_bh = 1;
4146 break;
4147 }
4148
4149 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
4150 {
4151 process_evt_bh = 1;
4152 mpi3mr_devstatuschg_evt_th(sc, event_reply);
4153 break;
4154 }
4155 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
4156 {
4157 process_evt_bh = 1;
4158 mpi3mr_sastopochg_evt_th(sc, event_reply);
4159 break;
4160 }
4161 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
4162 {
4163 process_evt_bh = 1;
4164 mpi3mr_pcietopochg_evt_th(sc, event_reply);
4165 break;
4166 }
4167 case MPI3_EVENT_PREPARE_FOR_RESET:
4168 {
4169 mpi3mr_preparereset_evt_th(sc, event_reply);
4170 ack_req = 0;
4171 break;
4172 }
4173 case MPI3_EVENT_DEVICE_INFO_CHANGED:
4174 case MPI3_EVENT_LOG_DATA:
4175 {
4176 process_evt_bh = 1;
4177 break;
4178 }
4179 case MPI3_EVENT_ENERGY_PACK_CHANGE:
4180 {
4181 mpi3mr_energypackchg_evt_th(sc, event_reply);
4182 break;
4183 }
4184 case MPI3_EVENT_CABLE_MGMT:
4185 {
4186 mpi3mr_cablemgmt_evt_th(sc, event_reply);
4187 break;
4188 }
4189
4190 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
4191 case MPI3_EVENT_SAS_DISCOVERY:
4192 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
4193 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
4194 case MPI3_EVENT_PCIE_ENUMERATION:
4195 break;
4196 default:
4197 mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Event 0x%02x is not handled by driver\n",
4198 __func__, evt_type);
4199 break;
4200 }
4201
4202 if (process_evt_bh || ack_req) {
4203 fw_event = malloc(sizeof(struct mpi3mr_fw_event_work), M_MPI3MR,
4204 M_ZERO|M_NOWAIT);
4205
4206 if (!fw_event) {
4207 printf("%s: allocate failed for fw_event\n", __func__);
4208 return;
4209 }
4210
4211 sz = le16toh(event_reply->EventDataLength) * 4;
4212 fw_event->event_data = malloc(sz, M_MPI3MR, M_ZERO|M_NOWAIT);
4213
4214 if (!fw_event->event_data) {
4215 printf("%s: allocate failed for event_data\n", __func__);
4216 free(fw_event, M_MPI3MR);
4217 return;
4218 }
4219
4220 bcopy(event_reply->EventData, fw_event->event_data, sz);
4221 fw_event->event = event_reply->Event;
4222 if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4223 event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4224 event_reply->Event == MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE ) &&
4225 sc->track_mapping_events)
4226 sc->pending_map_events++;
4227
4228 /*
4229 * Events should be processed after Port enable is completed.
4230 */
4231 if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4232 event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ) &&
4233 !(sc->mpi3mr_flags & MPI3MR_FLAGS_PORT_ENABLE_DONE))
4234 mpi3mr_startup_increment(sc->cam_sc);
4235
4236 fw_event->send_ack = ack_req;
4237 fw_event->event_context = le32toh(event_reply->EventContext);
4238 fw_event->event_data_size = sz;
4239 fw_event->process_event = process_evt_bh;
4240
4241 mtx_lock(&sc->fwevt_lock);
4242 TAILQ_INSERT_TAIL(&sc->cam_sc->ev_queue, fw_event, ev_link);
4243 taskqueue_enqueue(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
4244 mtx_unlock(&sc->fwevt_lock);
4245
4246 }
4247 out:
4248 return;
4249 }
4250
mpi3mr_handle_events(struct mpi3mr_softc * sc,uintptr_t data,Mpi3DefaultReply_t * def_reply)4251 static void mpi3mr_handle_events(struct mpi3mr_softc *sc, uintptr_t data,
4252 Mpi3DefaultReply_t *def_reply)
4253 {
4254 Mpi3EventNotificationReply_t *event_reply =
4255 (Mpi3EventNotificationReply_t *)def_reply;
4256
4257 sc->change_count = event_reply->IOCChangeCount;
4258 mpi3mr_display_event_data(sc, event_reply);
4259
4260 mpi3mr_process_events(sc, data, event_reply);
4261 }
4262
mpi3mr_process_admin_reply_desc(struct mpi3mr_softc * sc,Mpi3DefaultReplyDescriptor_t * reply_desc,U64 * reply_dma)4263 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
4264 Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4265 {
4266 U16 reply_desc_type, host_tag = 0, idx;
4267 U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4268 U32 ioc_loginfo = 0;
4269 Mpi3StatusReplyDescriptor_t *status_desc;
4270 Mpi3AddressReplyDescriptor_t *addr_desc;
4271 Mpi3SuccessReplyDescriptor_t *success_desc;
4272 Mpi3DefaultReply_t *def_reply = NULL;
4273 struct mpi3mr_drvr_cmd *cmdptr = NULL;
4274 Mpi3SCSIIOReply_t *scsi_reply;
4275 U8 *sense_buf = NULL;
4276
4277 *reply_dma = 0;
4278 reply_desc_type = reply_desc->ReplyFlags &
4279 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4280 switch (reply_desc_type) {
4281 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4282 status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4283 host_tag = status_desc->HostTag;
4284 ioc_status = status_desc->IOCStatus;
4285 if (ioc_status &
4286 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4287 ioc_loginfo = status_desc->IOCLogInfo;
4288 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4289 break;
4290 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4291 addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4292 *reply_dma = addr_desc->ReplyFrameAddress;
4293 def_reply = mpi3mr_get_reply_virt_addr(sc, *reply_dma);
4294 if (def_reply == NULL)
4295 goto out;
4296 host_tag = def_reply->HostTag;
4297 ioc_status = def_reply->IOCStatus;
4298 if (ioc_status &
4299 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4300 ioc_loginfo = def_reply->IOCLogInfo;
4301 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4302 if (def_reply->Function == MPI3_FUNCTION_SCSI_IO) {
4303 scsi_reply = (Mpi3SCSIIOReply_t *)def_reply;
4304 sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4305 scsi_reply->SenseDataBufferAddress);
4306 }
4307 break;
4308 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4309 success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4310 host_tag = success_desc->HostTag;
4311 break;
4312 default:
4313 break;
4314 }
4315 switch (host_tag) {
4316 case MPI3MR_HOSTTAG_INITCMDS:
4317 cmdptr = &sc->init_cmds;
4318 break;
4319 case MPI3MR_HOSTTAG_IOCTLCMDS:
4320 cmdptr = &sc->ioctl_cmds;
4321 break;
4322 case MPI3MR_HOSTTAG_TMS:
4323 cmdptr = &sc->host_tm_cmds;
4324 wakeup((void *)&sc->tm_chan);
4325 break;
4326 case MPI3MR_HOSTTAG_PELABORT:
4327 cmdptr = &sc->pel_abort_cmd;
4328 break;
4329 case MPI3MR_HOSTTAG_PELWAIT:
4330 cmdptr = &sc->pel_cmds;
4331 break;
4332 case MPI3MR_HOSTTAG_INVALID:
4333 if (def_reply && def_reply->Function ==
4334 MPI3_FUNCTION_EVENT_NOTIFICATION)
4335 mpi3mr_handle_events(sc, *reply_dma ,def_reply);
4336 default:
4337 break;
4338 }
4339
4340 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
4341 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX ) {
4342 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
4343 cmdptr = &sc->dev_rmhs_cmds[idx];
4344 }
4345
4346 if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
4347 host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
4348 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
4349 cmdptr = &sc->evtack_cmds[idx];
4350 }
4351
4352 if (cmdptr) {
4353 if (cmdptr->state & MPI3MR_CMD_PENDING) {
4354 cmdptr->state |= MPI3MR_CMD_COMPLETE;
4355 cmdptr->ioc_loginfo = ioc_loginfo;
4356 cmdptr->ioc_status = ioc_status;
4357 cmdptr->state &= ~MPI3MR_CMD_PENDING;
4358 if (def_reply) {
4359 cmdptr->state |= MPI3MR_CMD_REPLYVALID;
4360 memcpy((U8 *)cmdptr->reply, (U8 *)def_reply,
4361 sc->reply_sz);
4362 }
4363 if (sense_buf && cmdptr->sensebuf) {
4364 cmdptr->is_senseprst = 1;
4365 memcpy(cmdptr->sensebuf, sense_buf,
4366 MPI3MR_SENSEBUF_SZ);
4367 }
4368 if (cmdptr->is_waiting) {
4369 complete(&cmdptr->completion);
4370 cmdptr->is_waiting = 0;
4371 } else if (cmdptr->callback)
4372 cmdptr->callback(sc, cmdptr);
4373 }
4374 }
4375 out:
4376 if (sense_buf != NULL)
4377 mpi3mr_repost_sense_buf(sc,
4378 scsi_reply->SenseDataBufferAddress);
4379 return;
4380 }
4381
4382 /*
4383 * mpi3mr_complete_admin_cmd: ISR routine for admin commands
4384 * @sc: Adapter's soft instance
4385 *
4386 * This function processes admin command completions.
4387 */
mpi3mr_complete_admin_cmd(struct mpi3mr_softc * sc)4388 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc)
4389 {
4390 U32 exp_phase = sc->admin_reply_ephase;
4391 U32 adm_reply_ci = sc->admin_reply_ci;
4392 U32 num_adm_reply = 0;
4393 U64 reply_dma = 0;
4394 Mpi3DefaultReplyDescriptor_t *reply_desc;
4395
4396 mtx_lock_spin(&sc->admin_reply_lock);
4397 if (sc->admin_in_use == false) {
4398 sc->admin_in_use = true;
4399 mtx_unlock_spin(&sc->admin_reply_lock);
4400 } else {
4401 mtx_unlock_spin(&sc->admin_reply_lock);
4402 return 0;
4403 }
4404
4405 reply_desc = (Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4406 adm_reply_ci;
4407
4408 if ((reply_desc->ReplyFlags &
4409 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
4410 mtx_lock_spin(&sc->admin_reply_lock);
4411 sc->admin_in_use = false;
4412 mtx_unlock_spin(&sc->admin_reply_lock);
4413 return 0;
4414 }
4415
4416 do {
4417 sc->admin_req_ci = reply_desc->RequestQueueCI;
4418 mpi3mr_process_admin_reply_desc(sc, reply_desc, &reply_dma);
4419 if (reply_dma)
4420 mpi3mr_repost_reply_buf(sc, reply_dma);
4421 num_adm_reply++;
4422 if (++adm_reply_ci == sc->num_admin_replies) {
4423 adm_reply_ci = 0;
4424 exp_phase ^= 1;
4425 }
4426 reply_desc =
4427 (Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4428 adm_reply_ci;
4429 if ((reply_desc->ReplyFlags &
4430 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4431 break;
4432 } while (1);
4433
4434 mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
4435 sc->admin_reply_ci = adm_reply_ci;
4436 sc->admin_reply_ephase = exp_phase;
4437 mtx_lock_spin(&sc->admin_reply_lock);
4438 sc->admin_in_use = false;
4439 mtx_unlock_spin(&sc->admin_reply_lock);
4440 return num_adm_reply;
4441 }
4442
4443 static void
mpi3mr_cmd_done(struct mpi3mr_softc * sc,struct mpi3mr_cmd * cmd)4444 mpi3mr_cmd_done(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
4445 {
4446 mpi3mr_unmap_request(sc, cmd);
4447
4448 mtx_lock(&sc->mpi3mr_mtx);
4449 if (cmd->callout_owner) {
4450 callout_stop(&cmd->callout);
4451 cmd->callout_owner = false;
4452 }
4453
4454 if (sc->unrecoverable)
4455 mpi3mr_set_ccbstatus(cmd->ccb, CAM_DEV_NOT_THERE);
4456
4457 xpt_done(cmd->ccb);
4458 cmd->ccb = NULL;
4459 mtx_unlock(&sc->mpi3mr_mtx);
4460 mpi3mr_release_command(cmd);
4461 }
4462
mpi3mr_process_op_reply_desc(struct mpi3mr_softc * sc,Mpi3DefaultReplyDescriptor_t * reply_desc,U64 * reply_dma)4463 void mpi3mr_process_op_reply_desc(struct mpi3mr_softc *sc,
4464 Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4465 {
4466 U16 reply_desc_type, host_tag = 0;
4467 U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4468 U32 ioc_loginfo = 0;
4469 Mpi3StatusReplyDescriptor_t *status_desc = NULL;
4470 Mpi3AddressReplyDescriptor_t *addr_desc = NULL;
4471 Mpi3SuccessReplyDescriptor_t *success_desc = NULL;
4472 Mpi3SCSIIOReply_t *scsi_reply = NULL;
4473 U8 *sense_buf = NULL;
4474 U8 scsi_state = 0, scsi_status = 0, sense_state = 0;
4475 U32 xfer_count = 0, sense_count =0, resp_data = 0;
4476 struct mpi3mr_cmd *cm = NULL;
4477 union ccb *ccb;
4478 struct ccb_scsiio *csio;
4479 struct mpi3mr_cam_softc *cam_sc;
4480 U32 target_id;
4481 U8 *scsi_cdb;
4482 struct mpi3mr_target *target = NULL;
4483 U32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
4484 struct mpi3mr_throttle_group_info *tg = NULL;
4485 U8 throttle_enabled_dev = 0;
4486 static int ratelimit;
4487
4488 *reply_dma = 0;
4489 reply_desc_type = reply_desc->ReplyFlags &
4490 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4491 switch (reply_desc_type) {
4492 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4493 status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4494 host_tag = status_desc->HostTag;
4495 ioc_status = status_desc->IOCStatus;
4496 if (ioc_status &
4497 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4498 ioc_loginfo = status_desc->IOCLogInfo;
4499 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4500 break;
4501 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4502 addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4503 *reply_dma = addr_desc->ReplyFrameAddress;
4504 scsi_reply = mpi3mr_get_reply_virt_addr(sc,
4505 *reply_dma);
4506 if (scsi_reply == NULL) {
4507 mpi3mr_dprint(sc, MPI3MR_ERROR, "scsi_reply is NULL, "
4508 "this shouldn't happen, reply_desc: %p\n",
4509 reply_desc);
4510 goto out;
4511 }
4512
4513 host_tag = scsi_reply->HostTag;
4514 ioc_status = scsi_reply->IOCStatus;
4515 scsi_status = scsi_reply->SCSIStatus;
4516 scsi_state = scsi_reply->SCSIState;
4517 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
4518 xfer_count = scsi_reply->TransferCount;
4519 sense_count = scsi_reply->SenseCount;
4520 resp_data = scsi_reply->ResponseData;
4521 sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4522 scsi_reply->SenseDataBufferAddress);
4523 if (ioc_status &
4524 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4525 ioc_loginfo = scsi_reply->IOCLogInfo;
4526 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4527 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
4528 mpi3mr_dprint(sc, MPI3MR_ERROR, "Ran out of sense buffers\n");
4529
4530 break;
4531 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4532 success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4533 host_tag = success_desc->HostTag;
4534
4535 default:
4536 break;
4537 }
4538
4539 cm = sc->cmd_list[host_tag];
4540
4541 if (cm->state == MPI3MR_CMD_STATE_FREE)
4542 goto out;
4543
4544 cam_sc = sc->cam_sc;
4545 ccb = cm->ccb;
4546 csio = &ccb->csio;
4547 target_id = csio->ccb_h.target_id;
4548
4549 scsi_cdb = scsiio_cdb_ptr(csio);
4550
4551 target = mpi3mr_find_target_by_per_id(cam_sc, target_id);
4552 if (sc->iot_enable) {
4553 data_len_blks = csio->dxfer_len >> 9;
4554
4555 if (target) {
4556 tg = target->throttle_group;
4557 throttle_enabled_dev =
4558 target->io_throttle_enabled;
4559 }
4560
4561 if ((data_len_blks >= sc->io_throttle_data_length) &&
4562 throttle_enabled_dev) {
4563 mpi3mr_atomic_sub(&sc->pend_large_data_sz, data_len_blks);
4564 ioc_pend_data_len = mpi3mr_atomic_read(
4565 &sc->pend_large_data_sz);
4566 if (tg) {
4567 mpi3mr_atomic_sub(&tg->pend_large_data_sz,
4568 data_len_blks);
4569 tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4570 if (ratelimit % 1000) {
4571 mpi3mr_dprint(sc, MPI3MR_IOT,
4572 "large vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4573 "ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4574 target->per_id,
4575 target->dev_handle,
4576 data_len_blks, ioc_pend_data_len,
4577 tg_pend_data_len,
4578 sc->io_throttle_low,
4579 tg->low);
4580 ratelimit++;
4581 }
4582 if (tg->io_divert && ((ioc_pend_data_len <=
4583 sc->io_throttle_low) &&
4584 (tg_pend_data_len <= tg->low))) {
4585 tg->io_divert = 0;
4586 mpi3mr_dprint(sc, MPI3MR_IOT,
4587 "VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4588 target->per_id, tg->id);
4589 mpi3mr_set_io_divert_for_all_vd_in_tg(
4590 sc, tg, 0);
4591 }
4592 } else {
4593 if (ratelimit % 1000) {
4594 mpi3mr_dprint(sc, MPI3MR_IOT,
4595 "large pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4596 target->per_id,
4597 target->dev_handle,
4598 data_len_blks, ioc_pend_data_len,
4599 sc->io_throttle_low);
4600 ratelimit++;
4601 }
4602
4603 if (ioc_pend_data_len <= sc->io_throttle_low) {
4604 target->io_divert = 0;
4605 mpi3mr_dprint(sc, MPI3MR_IOT,
4606 "PD: Coming out of divert perst_id(%d)\n",
4607 target->per_id);
4608 }
4609 }
4610
4611 } else if (target->io_divert) {
4612 ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
4613 if (!tg) {
4614 if (ratelimit % 1000) {
4615 mpi3mr_dprint(sc, MPI3MR_IOT,
4616 "pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4617 target->per_id,
4618 target->dev_handle,
4619 data_len_blks, ioc_pend_data_len,
4620 sc->io_throttle_low);
4621 ratelimit++;
4622 }
4623
4624 if ( ioc_pend_data_len <= sc->io_throttle_low) {
4625 mpi3mr_dprint(sc, MPI3MR_IOT,
4626 "PD: Coming out of divert perst_id(%d)\n",
4627 target->per_id);
4628 target->io_divert = 0;
4629 }
4630
4631 } else if (ioc_pend_data_len <= sc->io_throttle_low) {
4632 tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4633 if (ratelimit % 1000) {
4634 mpi3mr_dprint(sc, MPI3MR_IOT,
4635 "vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4636 "ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4637 target->per_id,
4638 target->dev_handle,
4639 data_len_blks, ioc_pend_data_len,
4640 tg_pend_data_len,
4641 sc->io_throttle_low,
4642 tg->low);
4643 ratelimit++;
4644 }
4645 if (tg->io_divert && (tg_pend_data_len <= tg->low)) {
4646 tg->io_divert = 0;
4647 mpi3mr_dprint(sc, MPI3MR_IOT,
4648 "VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4649 target->per_id, tg->id);
4650 mpi3mr_set_io_divert_for_all_vd_in_tg(
4651 sc, tg, 0);
4652 }
4653
4654 }
4655 }
4656 }
4657
4658 if (success_desc) {
4659 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4660 goto out_success;
4661 }
4662
4663 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN
4664 && xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
4665 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
4666 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
4667 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4668
4669 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count
4670 && sense_buf) {
4671 int sense_len, returned_sense_len;
4672
4673 returned_sense_len = min(le32toh(sense_count),
4674 sizeof(struct scsi_sense_data));
4675 if (returned_sense_len < csio->sense_len)
4676 csio->sense_resid = csio->sense_len -
4677 returned_sense_len;
4678 else
4679 csio->sense_resid = 0;
4680
4681 sense_len = min(returned_sense_len,
4682 csio->sense_len - csio->sense_resid);
4683 bzero(&csio->sense_data, sizeof(csio->sense_data));
4684 bcopy(sense_buf, &csio->sense_data, sense_len);
4685 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4686 }
4687
4688 switch (ioc_status) {
4689 case MPI3_IOCSTATUS_BUSY:
4690 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
4691 mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
4692 break;
4693 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4694 /*
4695 * If devinfo is 0 this will be a volume. In that case don't
4696 * tell CAM that the volume is not there. We want volumes to
4697 * be enumerated until they are deleted/removed, not just
4698 * failed.
4699 */
4700 if (cm->targ->devinfo == 0)
4701 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4702 else
4703 mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
4704 break;
4705 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
4706 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
4707 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
4708 mpi3mr_set_ccbstatus(ccb, CAM_SCSI_BUSY);
4709 mpi3mr_dprint(sc, MPI3MR_TRACE,
4710 "func: %s line:%d tgt %u Hosttag %u loginfo %x\n",
4711 __func__, __LINE__,
4712 target_id, cm->hosttag,
4713 le32toh(scsi_reply->IOCLogInfo));
4714 mpi3mr_dprint(sc, MPI3MR_TRACE,
4715 "SCSIStatus %x SCSIState %x xfercount %u\n",
4716 scsi_reply->SCSIStatus, scsi_reply->SCSIState,
4717 le32toh(xfer_count));
4718 break;
4719 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
4720 /* resid is ignored for this condition */
4721 csio->resid = 0;
4722 mpi3mr_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
4723 break;
4724 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
4725 csio->resid = cm->length - le32toh(xfer_count);
4726 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
4727 case MPI3_IOCSTATUS_SUCCESS:
4728 if ((scsi_reply->IOCStatus & MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK) ==
4729 MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR)
4730 mpi3mr_dprint(sc, MPI3MR_XINFO, "func: %s line: %d recovered error\n", __func__, __LINE__);
4731
4732 /* Completion failed at the transport level. */
4733 if (scsi_reply->SCSIState & (MPI3_SCSI_STATE_NO_SCSI_STATUS |
4734 MPI3_SCSI_STATE_TERMINATED)) {
4735 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4736 break;
4737 }
4738
4739 /* In a modern packetized environment, an autosense failure
4740 * implies that there's not much else that can be done to
4741 * recover the command.
4742 */
4743 if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4744 mpi3mr_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
4745 break;
4746 }
4747
4748 /*
4749 * Intentionally override the normal SCSI status reporting
4750 * for these two cases. These are likely to happen in a
4751 * multi-initiator environment, and we want to make sure that
4752 * CAM retries these commands rather than fail them.
4753 */
4754 if ((scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_COMMAND_TERMINATED) ||
4755 (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_TASK_ABORTED)) {
4756 mpi3mr_set_ccbstatus(ccb, CAM_REQ_ABORTED);
4757 break;
4758 }
4759
4760 /* Handle normal status and sense */
4761 csio->scsi_status = scsi_reply->SCSIStatus;
4762 if (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_GOOD)
4763 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4764 else
4765 mpi3mr_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
4766
4767 if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4768 int sense_len, returned_sense_len;
4769
4770 returned_sense_len = min(le32toh(scsi_reply->SenseCount),
4771 sizeof(struct scsi_sense_data));
4772 if (returned_sense_len < csio->sense_len)
4773 csio->sense_resid = csio->sense_len -
4774 returned_sense_len;
4775 else
4776 csio->sense_resid = 0;
4777
4778 sense_len = min(returned_sense_len,
4779 csio->sense_len - csio->sense_resid);
4780 bzero(&csio->sense_data, sizeof(csio->sense_data));
4781 bcopy(cm->sense, &csio->sense_data, sense_len);
4782 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4783 }
4784
4785 break;
4786 case MPI3_IOCSTATUS_INVALID_SGL:
4787 mpi3mr_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
4788 break;
4789 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
4790 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
4791 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
4792 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4793 case MPI3_IOCSTATUS_INVALID_FUNCTION:
4794 case MPI3_IOCSTATUS_INTERNAL_ERROR:
4795 case MPI3_IOCSTATUS_INVALID_FIELD:
4796 case MPI3_IOCSTATUS_INVALID_STATE:
4797 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
4798 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4799 case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
4800 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4801 default:
4802 csio->resid = cm->length;
4803 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4804 break;
4805 }
4806
4807 out_success:
4808 if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_CMP) {
4809 ccb->ccb_h.status |= CAM_DEV_QFRZN;
4810 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
4811 }
4812
4813 mpi3mr_atomic_dec(&cm->targ->outstanding);
4814 mpi3mr_cmd_done(sc, cm);
4815 mpi3mr_dprint(sc, MPI3MR_TRACE, "Completion IO path :"
4816 " cdb[0]: %x targetid: 0x%x SMID: %x ioc_status: 0x%x ioc_loginfo: 0x%x scsi_status: 0x%x "
4817 "scsi_state: 0x%x response_data: 0x%x\n", scsi_cdb[0], target_id, host_tag,
4818 ioc_status, ioc_loginfo, scsi_status, scsi_state, resp_data);
4819 mpi3mr_atomic_dec(&sc->fw_outstanding);
4820 out:
4821
4822 if (sense_buf)
4823 mpi3mr_repost_sense_buf(sc,
4824 scsi_reply->SenseDataBufferAddress);
4825 return;
4826 }
4827
4828 /*
4829 * mpi3mr_complete_io_cmd: ISR routine for IO commands
4830 * @sc: Adapter's soft instance
4831 * @irq_ctx: Driver's internal per IRQ structure
4832 *
4833 * This function processes IO command completions.
4834 */
mpi3mr_complete_io_cmd(struct mpi3mr_softc * sc,struct mpi3mr_irq_context * irq_ctx)4835 int mpi3mr_complete_io_cmd(struct mpi3mr_softc *sc,
4836 struct mpi3mr_irq_context *irq_ctx)
4837 {
4838 struct mpi3mr_op_reply_queue *op_reply_q = irq_ctx->op_reply_q;
4839 U32 exp_phase = op_reply_q->ephase;
4840 U32 reply_ci = op_reply_q->ci;
4841 U32 num_op_replies = 0;
4842 U64 reply_dma = 0;
4843 Mpi3DefaultReplyDescriptor_t *reply_desc;
4844 U16 req_qid = 0;
4845
4846 mtx_lock_spin(&op_reply_q->q_lock);
4847 if (op_reply_q->in_use == false) {
4848 op_reply_q->in_use = true;
4849 mtx_unlock_spin(&op_reply_q->q_lock);
4850 } else {
4851 mtx_unlock_spin(&op_reply_q->q_lock);
4852 return 0;
4853 }
4854
4855 reply_desc = (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4856 mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]:reply_desc: (%pa) reply_ci: %x"
4857 " reply_desc->ReplyFlags: 0x%x\n"
4858 "reply_q_base_phys: %#016jx reply_q_base: (%pa) exp_phase: %x\n",
4859 op_reply_q->qid, reply_desc, reply_ci, reply_desc->ReplyFlags, op_reply_q->q_base_phys,
4860 op_reply_q->q_base, exp_phase);
4861
4862 if (((reply_desc->ReplyFlags &
4863 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) || !op_reply_q->qid) {
4864 mtx_lock_spin(&op_reply_q->q_lock);
4865 op_reply_q->in_use = false;
4866 mtx_unlock_spin(&op_reply_q->q_lock);
4867 return 0;
4868 }
4869
4870 do {
4871 req_qid = reply_desc->RequestQueueID;
4872 sc->op_req_q[req_qid - 1].ci =
4873 reply_desc->RequestQueueCI;
4874
4875 mpi3mr_process_op_reply_desc(sc, reply_desc, &reply_dma);
4876 mpi3mr_atomic_dec(&op_reply_q->pend_ios);
4877 if (reply_dma)
4878 mpi3mr_repost_reply_buf(sc, reply_dma);
4879 num_op_replies++;
4880 if (++reply_ci == op_reply_q->num_replies) {
4881 reply_ci = 0;
4882 exp_phase ^= 1;
4883 }
4884 reply_desc =
4885 (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4886 if ((reply_desc->ReplyFlags &
4887 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4888 break;
4889 } while (1);
4890
4891
4892 mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
4893 op_reply_q->ci = reply_ci;
4894 op_reply_q->ephase = exp_phase;
4895 mtx_lock_spin(&op_reply_q->q_lock);
4896 op_reply_q->in_use = false;
4897 mtx_unlock_spin(&op_reply_q->q_lock);
4898 return num_op_replies;
4899 }
4900
4901 /*
4902 * mpi3mr_isr: Primary ISR function
4903 * privdata: Driver's internal per IRQ structure
4904 *
4905 * This is driver's primary ISR function which is being called whenever any admin/IO
4906 * command completion.
4907 */
mpi3mr_isr(void * privdata)4908 void mpi3mr_isr(void *privdata)
4909 {
4910 struct mpi3mr_irq_context *irq_ctx = (struct mpi3mr_irq_context *)privdata;
4911 struct mpi3mr_softc *sc = irq_ctx->sc;
4912 U16 msi_idx;
4913
4914 if (!irq_ctx)
4915 return;
4916
4917 msi_idx = irq_ctx->msix_index;
4918
4919 if (!sc->intr_enabled)
4920 return;
4921
4922 if (!msi_idx)
4923 mpi3mr_complete_admin_cmd(sc);
4924
4925 if (irq_ctx->op_reply_q && irq_ctx->op_reply_q->qid) {
4926 mpi3mr_complete_io_cmd(sc, irq_ctx);
4927 }
4928 }
4929
4930 /*
4931 * mpi3mr_alloc_requests - Allocates host commands
4932 * @sc: Adapter reference
4933 *
4934 * This function allocates controller supported host commands
4935 *
4936 * Return: 0 on success and proper error codes on failure
4937 */
4938 int
mpi3mr_alloc_requests(struct mpi3mr_softc * sc)4939 mpi3mr_alloc_requests(struct mpi3mr_softc *sc)
4940 {
4941 struct mpi3mr_cmd *cmd;
4942 int i, j, nsegs, ret;
4943
4944 nsegs = MPI3MR_SG_DEPTH;
4945 ret = bus_dma_tag_create( sc->mpi3mr_parent_dmat, /* parent */
4946 1, 0, /* algnmnt, boundary */
4947 sc->dma_loaddr, /* lowaddr */
4948 BUS_SPACE_MAXADDR, /* highaddr */
4949 NULL, NULL, /* filter, filterarg */
4950 BUS_SPACE_MAXSIZE, /* maxsize */
4951 nsegs, /* nsegments */
4952 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4953 BUS_DMA_ALLOCNOW, /* flags */
4954 busdma_lock_mutex, /* lockfunc */
4955 &sc->io_lock, /* lockarg */
4956 &sc->buffer_dmat);
4957 if (ret) {
4958 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate buffer DMA tag ret: %d\n", ret);
4959 return (ENOMEM);
4960 }
4961
4962 /*
4963 * sc->cmd_list is an array of struct mpi3mr_cmd pointers.
4964 * Allocate the dynamic array first and then allocate individual
4965 * commands.
4966 */
4967 sc->cmd_list = malloc(sizeof(struct mpi3mr_cmd *) * sc->max_host_ios,
4968 M_MPI3MR, M_NOWAIT | M_ZERO);
4969
4970 if (!sc->cmd_list) {
4971 device_printf(sc->mpi3mr_dev, "Cannot alloc memory for mpt_cmd_list.\n");
4972 return (ENOMEM);
4973 }
4974
4975 for (i = 0; i < sc->max_host_ios; i++) {
4976 sc->cmd_list[i] = malloc(sizeof(struct mpi3mr_cmd),
4977 M_MPI3MR, M_NOWAIT | M_ZERO);
4978 if (!sc->cmd_list[i]) {
4979 for (j = 0; j < i; j++)
4980 free(sc->cmd_list[j], M_MPI3MR);
4981 free(sc->cmd_list, M_MPI3MR);
4982 sc->cmd_list = NULL;
4983 return (ENOMEM);
4984 }
4985 }
4986
4987 for (i = 1; i < sc->max_host_ios; i++) {
4988 cmd = sc->cmd_list[i];
4989 cmd->hosttag = i;
4990 cmd->sc = sc;
4991 cmd->state = MPI3MR_CMD_STATE_BUSY;
4992 callout_init_mtx(&cmd->callout, &sc->mpi3mr_mtx, 0);
4993 cmd->ccb = NULL;
4994 TAILQ_INSERT_TAIL(&(sc->cmd_list_head), cmd, next);
4995 if (bus_dmamap_create(sc->buffer_dmat, 0, &cmd->dmamap))
4996 return ENOMEM;
4997 }
4998 return (0);
4999 }
5000
5001 /*
5002 * mpi3mr_get_command: Get a coomand structure from free command pool
5003 * @sc: Adapter soft instance
5004 * Return: MPT command reference
5005 *
5006 * This function returns an MPT command to the caller.
5007 */
5008 struct mpi3mr_cmd *
mpi3mr_get_command(struct mpi3mr_softc * sc)5009 mpi3mr_get_command(struct mpi3mr_softc *sc)
5010 {
5011 struct mpi3mr_cmd *cmd = NULL;
5012
5013 mtx_lock(&sc->cmd_pool_lock);
5014 if (!TAILQ_EMPTY(&sc->cmd_list_head)) {
5015 cmd = TAILQ_FIRST(&sc->cmd_list_head);
5016 TAILQ_REMOVE(&sc->cmd_list_head, cmd, next);
5017 } else {
5018 goto out;
5019 }
5020
5021 mpi3mr_dprint(sc, MPI3MR_TRACE, "Get command SMID: 0x%x\n", cmd->hosttag);
5022
5023 memset((uint8_t *)&cmd->io_request, 0, MPI3MR_AREQ_FRAME_SZ);
5024 cmd->data_dir = 0;
5025 cmd->ccb = NULL;
5026 cmd->targ = NULL;
5027 cmd->state = MPI3MR_CMD_STATE_BUSY;
5028 cmd->data = NULL;
5029 cmd->length = 0;
5030 out:
5031 mtx_unlock(&sc->cmd_pool_lock);
5032 return cmd;
5033 }
5034
5035 /*
5036 * mpi3mr_release_command: Return a cmd to free command pool
5037 * input: Command packet for return to free command pool
5038 *
5039 * This function returns an MPT command to the free command list.
5040 */
5041 void
mpi3mr_release_command(struct mpi3mr_cmd * cmd)5042 mpi3mr_release_command(struct mpi3mr_cmd *cmd)
5043 {
5044 struct mpi3mr_softc *sc = cmd->sc;
5045
5046 mtx_lock(&sc->cmd_pool_lock);
5047 TAILQ_INSERT_HEAD(&(sc->cmd_list_head), cmd, next);
5048 cmd->state = MPI3MR_CMD_STATE_FREE;
5049 cmd->req_qidx = 0;
5050 mpi3mr_dprint(sc, MPI3MR_TRACE, "Release command SMID: 0x%x\n", cmd->hosttag);
5051 mtx_unlock(&sc->cmd_pool_lock);
5052
5053 return;
5054 }
5055
5056 /**
5057 * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
5058 * @sc: Adapter instance reference
5059 *
5060 * Free the DMA memory allocated for IOCTL handling purpose.
5061 *
5062 * Return: None
5063 */
mpi3mr_free_ioctl_dma_memory(struct mpi3mr_softc * sc)5064 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_softc *sc)
5065 {
5066 U16 i;
5067 struct dma_memory_desc *mem_desc;
5068
5069 for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5070 mem_desc = &sc->ioctl_sge[i];
5071 if (mem_desc->addr && mem_desc->dma_addr) {
5072 bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5073 bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5074 mem_desc->addr = NULL;
5075 if (mem_desc->tag != NULL)
5076 bus_dma_tag_destroy(mem_desc->tag);
5077 }
5078 }
5079
5080 mem_desc = &sc->ioctl_chain_sge;
5081 if (mem_desc->addr && mem_desc->dma_addr) {
5082 bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5083 bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5084 mem_desc->addr = NULL;
5085 if (mem_desc->tag != NULL)
5086 bus_dma_tag_destroy(mem_desc->tag);
5087 }
5088
5089 mem_desc = &sc->ioctl_resp_sge;
5090 if (mem_desc->addr && mem_desc->dma_addr) {
5091 bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5092 bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5093 mem_desc->addr = NULL;
5094 if (mem_desc->tag != NULL)
5095 bus_dma_tag_destroy(mem_desc->tag);
5096 }
5097
5098 sc->ioctl_sges_allocated = false;
5099 }
5100
5101 /**
5102 * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
5103 * @sc: Adapter instance reference
5104 *
5105 * This function allocates dmaable memory required to handle the
5106 * application issued MPI3 IOCTL requests.
5107 *
5108 * Return: None
5109 */
mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc * sc)5110 void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc *sc)
5111 {
5112 struct dma_memory_desc *mem_desc;
5113 U16 i;
5114
5115 for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5116 mem_desc = &sc->ioctl_sge[i];
5117 mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
5118
5119 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
5120 4, 0, /* algnmnt, boundary */
5121 sc->dma_loaddr, /* lowaddr */
5122 BUS_SPACE_MAXADDR, /* highaddr */
5123 NULL, NULL, /* filter, filterarg */
5124 mem_desc->size, /* maxsize */
5125 1, /* nsegments */
5126 mem_desc->size, /* maxsegsize */
5127 0, /* flags */
5128 NULL, NULL, /* lockfunc, lockarg */
5129 &mem_desc->tag)) {
5130 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5131 goto out_failed;
5132 }
5133
5134 if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5135 BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5136 mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
5137 goto out_failed;
5138 }
5139 bzero(mem_desc->addr, mem_desc->size);
5140 bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5141 mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5142
5143 if (!mem_desc->addr)
5144 goto out_failed;
5145 }
5146
5147 mem_desc = &sc->ioctl_chain_sge;
5148 mem_desc->size = MPI3MR_4K_PGSZ;
5149 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
5150 4, 0, /* algnmnt, boundary */
5151 sc->dma_loaddr, /* lowaddr */
5152 BUS_SPACE_MAXADDR, /* highaddr */
5153 NULL, NULL, /* filter, filterarg */
5154 mem_desc->size, /* maxsize */
5155 1, /* nsegments */
5156 mem_desc->size, /* maxsegsize */
5157 0, /* flags */
5158 NULL, NULL, /* lockfunc, lockarg */
5159 &mem_desc->tag)) {
5160 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5161 goto out_failed;
5162 }
5163
5164 if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5165 BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5166 mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
5167 goto out_failed;
5168 }
5169 bzero(mem_desc->addr, mem_desc->size);
5170 bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5171 mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5172
5173 if (!mem_desc->addr)
5174 goto out_failed;
5175
5176 mem_desc = &sc->ioctl_resp_sge;
5177 mem_desc->size = MPI3MR_4K_PGSZ;
5178 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
5179 4, 0, /* algnmnt, boundary */
5180 sc->dma_loaddr, /* lowaddr */
5181 BUS_SPACE_MAXADDR, /* highaddr */
5182 NULL, NULL, /* filter, filterarg */
5183 mem_desc->size, /* maxsize */
5184 1, /* nsegments */
5185 mem_desc->size, /* maxsegsize */
5186 0, /* flags */
5187 NULL, NULL, /* lockfunc, lockarg */
5188 &mem_desc->tag)) {
5189 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5190 goto out_failed;
5191 }
5192
5193 if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5194 BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5195 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5196 goto out_failed;
5197 }
5198 bzero(mem_desc->addr, mem_desc->size);
5199 bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5200 mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5201
5202 if (!mem_desc->addr)
5203 goto out_failed;
5204
5205 sc->ioctl_sges_allocated = true;
5206
5207 return;
5208 out_failed:
5209 printf("cannot allocate DMA memory for the mpt commands"
5210 " from the applications, application interface for MPT command is disabled\n");
5211 mpi3mr_free_ioctl_dma_memory(sc);
5212 }
5213
5214 void
mpi3mr_destory_mtx(struct mpi3mr_softc * sc)5215 mpi3mr_destory_mtx(struct mpi3mr_softc *sc)
5216 {
5217 int i;
5218 struct mpi3mr_op_req_queue *op_req_q;
5219 struct mpi3mr_op_reply_queue *op_reply_q;
5220
5221 if (sc->admin_reply) {
5222 if (mtx_initialized(&sc->admin_reply_lock))
5223 mtx_destroy(&sc->admin_reply_lock);
5224 }
5225
5226 if (sc->op_reply_q) {
5227 for(i = 0; i < sc->num_queues; i++) {
5228 op_reply_q = sc->op_reply_q + i;
5229 if (mtx_initialized(&op_reply_q->q_lock))
5230 mtx_destroy(&op_reply_q->q_lock);
5231 }
5232 }
5233
5234 if (sc->op_req_q) {
5235 for(i = 0; i < sc->num_queues; i++) {
5236 op_req_q = sc->op_req_q + i;
5237 if (mtx_initialized(&op_req_q->q_lock))
5238 mtx_destroy(&op_req_q->q_lock);
5239 }
5240 }
5241
5242 if (mtx_initialized(&sc->init_cmds.completion.lock))
5243 mtx_destroy(&sc->init_cmds.completion.lock);
5244
5245 if (mtx_initialized(&sc->ioctl_cmds.completion.lock))
5246 mtx_destroy(&sc->ioctl_cmds.completion.lock);
5247
5248 if (mtx_initialized(&sc->host_tm_cmds.completion.lock))
5249 mtx_destroy(&sc->host_tm_cmds.completion.lock);
5250
5251 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5252 if (mtx_initialized(&sc->dev_rmhs_cmds[i].completion.lock))
5253 mtx_destroy(&sc->dev_rmhs_cmds[i].completion.lock);
5254 }
5255
5256 if (mtx_initialized(&sc->reset_mutex))
5257 mtx_destroy(&sc->reset_mutex);
5258
5259 if (mtx_initialized(&sc->target_lock))
5260 mtx_destroy(&sc->target_lock);
5261
5262 if (mtx_initialized(&sc->fwevt_lock))
5263 mtx_destroy(&sc->fwevt_lock);
5264
5265 if (mtx_initialized(&sc->cmd_pool_lock))
5266 mtx_destroy(&sc->cmd_pool_lock);
5267
5268 if (mtx_initialized(&sc->reply_free_q_lock))
5269 mtx_destroy(&sc->reply_free_q_lock);
5270
5271 if (mtx_initialized(&sc->sense_buf_q_lock))
5272 mtx_destroy(&sc->sense_buf_q_lock);
5273
5274 if (mtx_initialized(&sc->chain_buf_lock))
5275 mtx_destroy(&sc->chain_buf_lock);
5276
5277 if (mtx_initialized(&sc->admin_req_lock))
5278 mtx_destroy(&sc->admin_req_lock);
5279
5280 if (mtx_initialized(&sc->mpi3mr_mtx))
5281 mtx_destroy(&sc->mpi3mr_mtx);
5282 }
5283
5284 /**
5285 * mpi3mr_free_mem - Freeup adapter level data structures
5286 * @sc: Adapter reference
5287 *
5288 * Return: Nothing.
5289 */
5290 void
mpi3mr_free_mem(struct mpi3mr_softc * sc)5291 mpi3mr_free_mem(struct mpi3mr_softc *sc)
5292 {
5293 int i;
5294 struct mpi3mr_op_req_queue *op_req_q;
5295 struct mpi3mr_op_reply_queue *op_reply_q;
5296 struct mpi3mr_irq_context *irq_ctx;
5297
5298 if (sc->cmd_list) {
5299 for (i = 0; i < sc->max_host_ios; i++) {
5300 free(sc->cmd_list[i], M_MPI3MR);
5301 }
5302 free(sc->cmd_list, M_MPI3MR);
5303 sc->cmd_list = NULL;
5304 }
5305
5306 if (sc->pel_seq_number && sc->pel_seq_number_dma) {
5307 bus_dmamap_unload(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap);
5308 bus_dmamem_free(sc->pel_seq_num_dmatag, sc->pel_seq_number, sc->pel_seq_num_dmamap);
5309 sc->pel_seq_number = NULL;
5310 if (sc->pel_seq_num_dmatag != NULL)
5311 bus_dma_tag_destroy(sc->pel_seq_num_dmatag);
5312 }
5313
5314 if (sc->throttle_groups) {
5315 free(sc->throttle_groups, M_MPI3MR);
5316 sc->throttle_groups = NULL;
5317 }
5318
5319 /* Free up operational queues*/
5320 if (sc->op_req_q) {
5321 for (i = 0; i < sc->num_queues; i++) {
5322 op_req_q = sc->op_req_q + i;
5323 if (op_req_q->q_base && op_req_q->q_base_phys) {
5324 bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
5325 bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
5326 op_req_q->q_base = NULL;
5327 if (op_req_q->q_base_tag != NULL)
5328 bus_dma_tag_destroy(op_req_q->q_base_tag);
5329 }
5330 }
5331 free(sc->op_req_q, M_MPI3MR);
5332 sc->op_req_q = NULL;
5333 }
5334
5335 if (sc->op_reply_q) {
5336 for (i = 0; i < sc->num_queues; i++) {
5337 op_reply_q = sc->op_reply_q + i;
5338 if (op_reply_q->q_base && op_reply_q->q_base_phys) {
5339 bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
5340 bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
5341 op_reply_q->q_base = NULL;
5342 if (op_reply_q->q_base_tag != NULL)
5343 bus_dma_tag_destroy(op_reply_q->q_base_tag);
5344 }
5345 }
5346 free(sc->op_reply_q, M_MPI3MR);
5347 sc->op_reply_q = NULL;
5348 }
5349
5350 /* Free up chain buffers*/
5351 if (sc->chain_sgl_list) {
5352 for (i = 0; i < sc->chain_buf_count; i++) {
5353 if (sc->chain_sgl_list[i].buf && sc->chain_sgl_list[i].buf_phys) {
5354 bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
5355 bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf,
5356 sc->chain_sgl_list[i].buf_dmamap);
5357 sc->chain_sgl_list[i].buf = NULL;
5358 }
5359 }
5360 if (sc->chain_sgl_list_tag != NULL)
5361 bus_dma_tag_destroy(sc->chain_sgl_list_tag);
5362 free(sc->chain_sgl_list, M_MPI3MR);
5363 sc->chain_sgl_list = NULL;
5364 }
5365
5366 if (sc->chain_bitmap) {
5367 free(sc->chain_bitmap, M_MPI3MR);
5368 sc->chain_bitmap = NULL;
5369 }
5370
5371 for (i = 0; i < sc->msix_count; i++) {
5372 irq_ctx = sc->irq_ctx + i;
5373 if (irq_ctx)
5374 irq_ctx->op_reply_q = NULL;
5375 }
5376
5377 /* Free reply_buf_tag */
5378 if (sc->reply_buf && sc->reply_buf_phys) {
5379 bus_dmamap_unload(sc->reply_buf_tag, sc->reply_buf_dmamap);
5380 bus_dmamem_free(sc->reply_buf_tag, sc->reply_buf,
5381 sc->reply_buf_dmamap);
5382 sc->reply_buf = NULL;
5383 if (sc->reply_buf_tag != NULL)
5384 bus_dma_tag_destroy(sc->reply_buf_tag);
5385 }
5386
5387 /* Free reply_free_q_tag */
5388 if (sc->reply_free_q && sc->reply_free_q_phys) {
5389 bus_dmamap_unload(sc->reply_free_q_tag, sc->reply_free_q_dmamap);
5390 bus_dmamem_free(sc->reply_free_q_tag, sc->reply_free_q,
5391 sc->reply_free_q_dmamap);
5392 sc->reply_free_q = NULL;
5393 if (sc->reply_free_q_tag != NULL)
5394 bus_dma_tag_destroy(sc->reply_free_q_tag);
5395 }
5396
5397 /* Free sense_buf_tag */
5398 if (sc->sense_buf && sc->sense_buf_phys) {
5399 bus_dmamap_unload(sc->sense_buf_tag, sc->sense_buf_dmamap);
5400 bus_dmamem_free(sc->sense_buf_tag, sc->sense_buf,
5401 sc->sense_buf_dmamap);
5402 sc->sense_buf = NULL;
5403 if (sc->sense_buf_tag != NULL)
5404 bus_dma_tag_destroy(sc->sense_buf_tag);
5405 }
5406
5407 /* Free sense_buf_q_tag */
5408 if (sc->sense_buf_q && sc->sense_buf_q_phys) {
5409 bus_dmamap_unload(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap);
5410 bus_dmamem_free(sc->sense_buf_q_tag, sc->sense_buf_q,
5411 sc->sense_buf_q_dmamap);
5412 sc->sense_buf_q = NULL;
5413 if (sc->sense_buf_q_tag != NULL)
5414 bus_dma_tag_destroy(sc->sense_buf_q_tag);
5415 }
5416
5417 /* Free up internal(non-IO) commands*/
5418 if (sc->init_cmds.reply) {
5419 free(sc->init_cmds.reply, M_MPI3MR);
5420 sc->init_cmds.reply = NULL;
5421 }
5422
5423 if (sc->ioctl_cmds.reply) {
5424 free(sc->ioctl_cmds.reply, M_MPI3MR);
5425 sc->ioctl_cmds.reply = NULL;
5426 }
5427
5428 if (sc->pel_cmds.reply) {
5429 free(sc->pel_cmds.reply, M_MPI3MR);
5430 sc->pel_cmds.reply = NULL;
5431 }
5432
5433 if (sc->pel_abort_cmd.reply) {
5434 free(sc->pel_abort_cmd.reply, M_MPI3MR);
5435 sc->pel_abort_cmd.reply = NULL;
5436 }
5437
5438 if (sc->host_tm_cmds.reply) {
5439 free(sc->host_tm_cmds.reply, M_MPI3MR);
5440 sc->host_tm_cmds.reply = NULL;
5441 }
5442
5443 if (sc->log_data_buffer) {
5444 free(sc->log_data_buffer, M_MPI3MR);
5445 sc->log_data_buffer = NULL;
5446 }
5447
5448 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5449 if (sc->dev_rmhs_cmds[i].reply) {
5450 free(sc->dev_rmhs_cmds[i].reply, M_MPI3MR);
5451 sc->dev_rmhs_cmds[i].reply = NULL;
5452 }
5453 }
5454
5455 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5456 if (sc->evtack_cmds[i].reply) {
5457 free(sc->evtack_cmds[i].reply, M_MPI3MR);
5458 sc->evtack_cmds[i].reply = NULL;
5459 }
5460 }
5461
5462 if (sc->removepend_bitmap) {
5463 free(sc->removepend_bitmap, M_MPI3MR);
5464 sc->removepend_bitmap = NULL;
5465 }
5466
5467 if (sc->devrem_bitmap) {
5468 free(sc->devrem_bitmap, M_MPI3MR);
5469 sc->devrem_bitmap = NULL;
5470 }
5471
5472 if (sc->evtack_cmds_bitmap) {
5473 free(sc->evtack_cmds_bitmap, M_MPI3MR);
5474 sc->evtack_cmds_bitmap = NULL;
5475 }
5476
5477 /* Free Admin reply*/
5478 if (sc->admin_reply && sc->admin_reply_phys) {
5479 bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
5480 bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
5481 sc->admin_reply_dmamap);
5482 sc->admin_reply = NULL;
5483 if (sc->admin_reply_tag != NULL)
5484 bus_dma_tag_destroy(sc->admin_reply_tag);
5485 }
5486
5487 /* Free Admin request*/
5488 if (sc->admin_req && sc->admin_req_phys) {
5489 bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
5490 bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
5491 sc->admin_req_dmamap);
5492 sc->admin_req = NULL;
5493 if (sc->admin_req_tag != NULL)
5494 bus_dma_tag_destroy(sc->admin_req_tag);
5495 }
5496 mpi3mr_free_ioctl_dma_memory(sc);
5497
5498 }
5499
5500 /**
5501 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5502 * @sc: Adapter instance reference
5503 * @cmdptr: Internal command tracker
5504 *
5505 * Complete an internal driver commands with state indicating it
5506 * is completed due to reset.
5507 *
5508 * Return: Nothing.
5509 */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * cmdptr)5510 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_softc *sc,
5511 struct mpi3mr_drvr_cmd *cmdptr)
5512 {
5513 if (cmdptr->state & MPI3MR_CMD_PENDING) {
5514 cmdptr->state |= MPI3MR_CMD_RESET;
5515 cmdptr->state &= ~MPI3MR_CMD_PENDING;
5516 if (cmdptr->is_waiting) {
5517 complete(&cmdptr->completion);
5518 cmdptr->is_waiting = 0;
5519 } else if (cmdptr->callback)
5520 cmdptr->callback(sc, cmdptr);
5521 }
5522 }
5523
5524 /**
5525 * mpi3mr_flush_drv_cmds - Flush internal driver commands
5526 * @sc: Adapter instance reference
5527 *
5528 * Flush all internal driver commands post reset
5529 *
5530 * Return: Nothing.
5531 */
mpi3mr_flush_drv_cmds(struct mpi3mr_softc * sc)5532 static void mpi3mr_flush_drv_cmds(struct mpi3mr_softc *sc)
5533 {
5534 int i = 0;
5535 struct mpi3mr_drvr_cmd *cmdptr;
5536
5537 cmdptr = &sc->init_cmds;
5538 mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5539
5540 cmdptr = &sc->ioctl_cmds;
5541 mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5542
5543 cmdptr = &sc->host_tm_cmds;
5544 mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5545
5546 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5547 cmdptr = &sc->dev_rmhs_cmds[i];
5548 mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5549 }
5550
5551 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5552 cmdptr = &sc->evtack_cmds[i];
5553 mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5554 }
5555
5556 cmdptr = &sc->pel_cmds;
5557 mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5558
5559 cmdptr = &sc->pel_abort_cmd;
5560 mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5561 }
5562
5563
5564 /**
5565 * mpi3mr_memset_buffers - memset memory for a controller
5566 * @sc: Adapter instance reference
5567 *
5568 * clear all the memory allocated for a controller, typically
5569 * called post reset to reuse the memory allocated during the
5570 * controller init.
5571 *
5572 * Return: Nothing.
5573 */
mpi3mr_memset_buffers(struct mpi3mr_softc * sc)5574 static void mpi3mr_memset_buffers(struct mpi3mr_softc *sc)
5575 {
5576 U16 i;
5577 struct mpi3mr_throttle_group_info *tg;
5578
5579 memset(sc->admin_req, 0, sc->admin_req_q_sz);
5580 memset(sc->admin_reply, 0, sc->admin_reply_q_sz);
5581
5582 memset(sc->init_cmds.reply, 0, sc->reply_sz);
5583 memset(sc->ioctl_cmds.reply, 0, sc->reply_sz);
5584 memset(sc->host_tm_cmds.reply, 0, sc->reply_sz);
5585 memset(sc->pel_cmds.reply, 0, sc->reply_sz);
5586 memset(sc->pel_abort_cmd.reply, 0, sc->reply_sz);
5587 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
5588 memset(sc->dev_rmhs_cmds[i].reply, 0, sc->reply_sz);
5589 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
5590 memset(sc->evtack_cmds[i].reply, 0, sc->reply_sz);
5591 memset(sc->removepend_bitmap, 0, sc->dev_handle_bitmap_sz);
5592 memset(sc->devrem_bitmap, 0, sc->devrem_bitmap_sz);
5593 memset(sc->evtack_cmds_bitmap, 0, sc->evtack_cmds_bitmap_sz);
5594
5595 for (i = 0; i < sc->num_queues; i++) {
5596 sc->op_reply_q[i].qid = 0;
5597 sc->op_reply_q[i].ci = 0;
5598 sc->op_reply_q[i].num_replies = 0;
5599 sc->op_reply_q[i].ephase = 0;
5600 mpi3mr_atomic_set(&sc->op_reply_q[i].pend_ios, 0);
5601 memset(sc->op_reply_q[i].q_base, 0, sc->op_reply_q[i].qsz);
5602
5603 sc->op_req_q[i].ci = 0;
5604 sc->op_req_q[i].pi = 0;
5605 sc->op_req_q[i].num_reqs = 0;
5606 sc->op_req_q[i].qid = 0;
5607 sc->op_req_q[i].reply_qid = 0;
5608 memset(sc->op_req_q[i].q_base, 0, sc->op_req_q[i].qsz);
5609 }
5610
5611 mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
5612 if (sc->throttle_groups) {
5613 tg = sc->throttle_groups;
5614 for (i = 0; i < sc->num_io_throttle_group; i++, tg++) {
5615 tg->id = 0;
5616 tg->fw_qd = 0;
5617 tg->modified_qd = 0;
5618 tg->io_divert= 0;
5619 tg->high = 0;
5620 tg->low = 0;
5621 mpi3mr_atomic_set(&tg->pend_large_data_sz, 0);
5622 }
5623 }
5624 }
5625
5626 /**
5627 * mpi3mr_invalidate_devhandles -Invalidate device handles
5628 * @sc: Adapter instance reference
5629 *
5630 * Invalidate the device handles in the target device structures
5631 * . Called post reset prior to reinitializing the controller.
5632 *
5633 * Return: Nothing.
5634 */
mpi3mr_invalidate_devhandles(struct mpi3mr_softc * sc)5635 static void mpi3mr_invalidate_devhandles(struct mpi3mr_softc *sc)
5636 {
5637 struct mpi3mr_target *target = NULL;
5638
5639 mtx_lock_spin(&sc->target_lock);
5640 TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5641 if (target) {
5642 target->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
5643 target->io_throttle_enabled = 0;
5644 target->io_divert = 0;
5645 target->throttle_group = NULL;
5646 }
5647 }
5648 mtx_unlock_spin(&sc->target_lock);
5649 }
5650
5651 /**
5652 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
5653 * @sc: Adapter instance reference
5654 *
5655 * This is executed post controller reset to identify any
5656 * missing devices during reset and remove from the upper layers
5657 * or expose any newly detected device to the upper layers.
5658 *
5659 * Return: Nothing.
5660 */
5661
mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc * sc)5662 static void mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc *sc)
5663 {
5664 struct mpi3mr_target *target = NULL;
5665 struct mpi3mr_target *target_temp = NULL;
5666
5667 TAILQ_FOREACH_SAFE(target, &sc->cam_sc->tgt_list, tgt_next, target_temp) {
5668 if (target->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
5669 if (target->exposed_to_os)
5670 mpi3mr_remove_device_from_os(sc, target->dev_handle);
5671 mpi3mr_remove_device_from_list(sc, target, true);
5672 }
5673 }
5674
5675 TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5676 if ((target->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
5677 !target->is_hidden && !target->exposed_to_os) {
5678 mpi3mr_add_device(sc, target->per_id);
5679 }
5680 }
5681
5682 }
5683
mpi3mr_flush_io(struct mpi3mr_softc * sc)5684 static void mpi3mr_flush_io(struct mpi3mr_softc *sc)
5685 {
5686 int i;
5687 struct mpi3mr_cmd *cmd = NULL;
5688 union ccb *ccb = NULL;
5689
5690 for (i = 0; i < sc->max_host_ios; i++) {
5691 cmd = sc->cmd_list[i];
5692
5693 if (cmd && cmd->ccb) {
5694 if (cmd->callout_owner) {
5695 ccb = (union ccb *)(cmd->ccb);
5696 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
5697 mpi3mr_cmd_done(sc, cmd);
5698 } else {
5699 cmd->ccb = NULL;
5700 mpi3mr_release_command(cmd);
5701 }
5702 }
5703 }
5704 }
5705 /**
5706 * mpi3mr_clear_reset_history - Clear reset history
5707 * @sc: Adapter instance reference
5708 *
5709 * Write the reset history bit in IOC Status to clear the bit,
5710 * if it is already set.
5711 *
5712 * Return: Nothing.
5713 */
mpi3mr_clear_reset_history(struct mpi3mr_softc * sc)5714 static inline void mpi3mr_clear_reset_history(struct mpi3mr_softc *sc)
5715 {
5716 U32 ioc_status;
5717
5718 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5719 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
5720 mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
5721 }
5722
5723 /**
5724 * mpi3mr_set_diagsave - Set diag save bit for snapdump
5725 * @sc: Adapter reference
5726 *
5727 * Set diag save bit in IOC configuration register to enable
5728 * snapdump.
5729 *
5730 * Return: Nothing.
5731 */
mpi3mr_set_diagsave(struct mpi3mr_softc * sc)5732 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc)
5733 {
5734 U32 ioc_config;
5735
5736 ioc_config =
5737 mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5738 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
5739 mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
5740 }
5741
5742 /**
5743 * mpi3mr_issue_reset - Issue reset to the controller
5744 * @sc: Adapter reference
5745 * @reset_type: Reset type
5746 * @reset_reason: Reset reason code
5747 *
5748 * Unlock the host diagnostic registers and write the specific
5749 * reset type to that, wait for reset acknowledgement from the
5750 * controller, if the reset is not successful retry for the
5751 * predefined number of times.
5752 *
5753 * Return: 0 on success, non-zero on failure.
5754 */
mpi3mr_issue_reset(struct mpi3mr_softc * sc,U16 reset_type,U32 reset_reason)5755 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
5756 U32 reset_reason)
5757 {
5758 int retval = -1;
5759 U8 unlock_retry_count = 0;
5760 U32 host_diagnostic, ioc_status, ioc_config;
5761 U32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
5762
5763 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
5764 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
5765 return retval;
5766 if (sc->unrecoverable)
5767 return retval;
5768
5769 if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
5770 retval = 0;
5771 return retval;
5772 }
5773
5774 mpi3mr_dprint(sc, MPI3MR_INFO, "%s reset due to %s(0x%x)\n",
5775 mpi3mr_reset_type_name(reset_type),
5776 mpi3mr_reset_rc_name(reset_reason), reset_reason);
5777
5778 mpi3mr_clear_reset_history(sc);
5779 do {
5780 mpi3mr_dprint(sc, MPI3MR_INFO,
5781 "Write magic sequence to unlock host diag register (retry=%d)\n",
5782 ++unlock_retry_count);
5783 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
5784 mpi3mr_dprint(sc, MPI3MR_ERROR,
5785 "%s reset failed! due to host diag register unlock failure"
5786 "host_diagnostic(0x%08x)\n", mpi3mr_reset_type_name(reset_type),
5787 host_diagnostic);
5788 sc->unrecoverable = 1;
5789 return retval;
5790 }
5791
5792 mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5793 MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH);
5794 mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5795 MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST);
5796 mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5797 MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5798 mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5799 MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD);
5800 mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5801 MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH);
5802 mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5803 MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH);
5804 mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5805 MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH);
5806
5807 DELAY(1000); /* delay in usec */
5808 host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
5809 mpi3mr_dprint(sc, MPI3MR_INFO,
5810 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
5811 unlock_retry_count, host_diagnostic);
5812 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
5813
5814 mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
5815 mpi3mr_regwrite(sc, MPI3_SYSIF_HOST_DIAG_OFFSET, host_diagnostic | reset_type);
5816
5817 if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
5818 do {
5819 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5820 if (ioc_status &
5821 MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
5822 ioc_config =
5823 mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5824 if (mpi3mr_soft_reset_success(ioc_status,
5825 ioc_config)) {
5826 mpi3mr_clear_reset_history(sc);
5827 retval = 0;
5828 break;
5829 }
5830 }
5831 DELAY(100 * 1000);
5832 } while (--timeout);
5833 } else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
5834 do {
5835 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5836 if (mpi3mr_diagfault_success(sc, ioc_status)) {
5837 retval = 0;
5838 break;
5839 }
5840 DELAY(100 * 1000);
5841 } while (--timeout);
5842 }
5843
5844 mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5845 MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5846
5847 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5848 ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5849
5850 mpi3mr_dprint(sc, MPI3MR_INFO,
5851 "IOC Status/Config after %s reset is (0x%x)/(0x%x)\n",
5852 !retval ? "successful":"failed", ioc_status,
5853 ioc_config);
5854
5855 if (retval)
5856 sc->unrecoverable = 1;
5857
5858 return retval;
5859 }
5860
mpi3mr_cleanup_event_taskq(struct mpi3mr_softc * sc)5861 inline void mpi3mr_cleanup_event_taskq(struct mpi3mr_softc *sc)
5862 {
5863 /*
5864 * Block the taskqueue before draining. This means any new tasks won't
5865 * be queued to the taskqueue worker thread. But it doesn't stop the
5866 * current workers that are running. taskqueue_drain waits for those
5867 * correctly in the case of thread backed taskqueues. The while loop
5868 * ensures that all taskqueue threads have finished their current tasks.
5869 */
5870 taskqueue_block(sc->cam_sc->ev_tq);
5871 while (taskqueue_cancel(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task, NULL) != 0) {
5872 taskqueue_drain(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
5873 }
5874 }
5875
5876 /**
5877 * mpi3mr_soft_reset_handler - Reset the controller
5878 * @sc: Adapter instance reference
5879 * @reset_reason: Reset reason code
5880 * @snapdump: snapdump enable/disbale bit
5881 *
5882 * This is an handler for recovering controller by issuing soft
5883 * reset or diag fault reset. This is a blocking function and
5884 * when one reset is executed if any other resets they will be
5885 * blocked. All IOCTLs/IO will be blocked during the reset. If
5886 * controller reset is successful then the controller will be
5887 * reinitalized, otherwise the controller will be marked as not
5888 * recoverable
5889 *
5890 * Return: 0 on success, non-zero on failure.
5891 */
mpi3mr_soft_reset_handler(struct mpi3mr_softc * sc,U32 reset_reason,bool snapdump)5892 int mpi3mr_soft_reset_handler(struct mpi3mr_softc *sc,
5893 U32 reset_reason, bool snapdump)
5894 {
5895 int retval = 0, i = 0;
5896 enum mpi3mr_iocstate ioc_state;
5897
5898 mpi3mr_dprint(sc, MPI3MR_INFO, "soft reset invoked: reason code: %s\n",
5899 mpi3mr_reset_rc_name(reset_reason));
5900
5901 if ((reset_reason == MPI3MR_RESET_FROM_IOCTL) &&
5902 (sc->reset.ioctl_reset_snapdump != true))
5903 snapdump = false;
5904
5905 mpi3mr_dprint(sc, MPI3MR_INFO,
5906 "soft_reset_handler: wait if diag save is in progress\n");
5907 while (sc->diagsave_timeout)
5908 DELAY(1000 * 1000);
5909
5910 ioc_state = mpi3mr_get_iocstate(sc);
5911 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) {
5912 mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is in unrecoverable state, exit\n");
5913 sc->reset.type = MPI3MR_NO_RESET;
5914 sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
5915 sc->reset.status = -1;
5916 sc->reset.ioctl_reset_snapdump = false;
5917 return -1;
5918 }
5919
5920 if (sc->reset_in_progress) {
5921 mpi3mr_dprint(sc, MPI3MR_INFO, "reset is already in progress, exit\n");
5922 return -1;
5923 }
5924
5925 /* Pause IOs, drain and block the event taskqueue */
5926 xpt_freeze_simq(sc->cam_sc->sim, 1);
5927
5928 mpi3mr_cleanup_event_taskq(sc);
5929
5930 sc->reset_in_progress = 1;
5931 sc->block_ioctls = 1;
5932
5933 while (mpi3mr_atomic_read(&sc->pend_ioctls) && (i < PEND_IOCTLS_COMP_WAIT_TIME)) {
5934 ioc_state = mpi3mr_get_iocstate(sc);
5935 if (ioc_state == MRIOC_STATE_FAULT)
5936 break;
5937 i++;
5938 if (!(i % 5)) {
5939 mpi3mr_dprint(sc, MPI3MR_INFO,
5940 "[%2ds]waiting for IOCTL to be finished from %s\n", i, __func__);
5941 }
5942 DELAY(1000 * 1000);
5943 }
5944
5945 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5946 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5947 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5948
5949 mpi3mr_dprint(sc, MPI3MR_INFO, "Turn off events prior to reset\n");
5950
5951 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5952 sc->event_masks[i] = -1;
5953 mpi3mr_issue_event_notification(sc);
5954 }
5955
5956 mpi3mr_disable_interrupts(sc);
5957
5958 if (snapdump)
5959 mpi3mr_trigger_snapdump(sc, reset_reason);
5960
5961 retval = mpi3mr_issue_reset(sc,
5962 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5963 if (retval) {
5964 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to issue soft reset to the ioc\n");
5965 goto out;
5966 }
5967
5968 mpi3mr_flush_drv_cmds(sc);
5969 mpi3mr_flush_io(sc);
5970 mpi3mr_invalidate_devhandles(sc);
5971 mpi3mr_memset_buffers(sc);
5972
5973 if (sc->prepare_for_reset) {
5974 sc->prepare_for_reset = 0;
5975 sc->prepare_for_reset_timeout_counter = 0;
5976 }
5977
5978 retval = mpi3mr_initialize_ioc(sc, MPI3MR_INIT_TYPE_RESET);
5979 if (retval) {
5980 mpi3mr_dprint(sc, MPI3MR_ERROR, "reinit after soft reset failed: reason %d\n",
5981 reset_reason);
5982 goto out;
5983 }
5984
5985 DELAY((1000 * 1000) * 10);
5986 out:
5987 if (!retval) {
5988 sc->diagsave_timeout = 0;
5989 sc->reset_in_progress = 0;
5990 mpi3mr_rfresh_tgtdevs(sc);
5991 sc->ts_update_counter = 0;
5992 sc->block_ioctls = 0;
5993 sc->pel_abort_requested = 0;
5994 if (sc->pel_wait_pend) {
5995 sc->pel_cmds.retry_count = 0;
5996 mpi3mr_issue_pel_wait(sc, &sc->pel_cmds);
5997 mpi3mr_app_send_aen(sc);
5998 }
5999 } else {
6000 mpi3mr_issue_reset(sc,
6001 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
6002 sc->unrecoverable = 1;
6003 sc->reset_in_progress = 0;
6004 }
6005
6006 mpi3mr_dprint(sc, MPI3MR_INFO, "Soft Reset: %s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
6007
6008 taskqueue_unblock(sc->cam_sc->ev_tq);
6009 xpt_release_simq(sc->cam_sc->sim, 1);
6010
6011 sc->reset.type = MPI3MR_NO_RESET;
6012 sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
6013 sc->reset.status = retval;
6014 sc->reset.ioctl_reset_snapdump = false;
6015
6016 return retval;
6017 }
6018
6019 /**
6020 * mpi3mr_issue_ioc_shutdown - shutdown controller
6021 * @sc: Adapter instance reference
6022 *
6023 * Send shutodwn notification to the controller and wait for the
6024 * shutdown_timeout for it to be completed.
6025 *
6026 * Return: Nothing.
6027 */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_softc * sc)6028 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_softc *sc)
6029 {
6030 U32 ioc_config, ioc_status;
6031 U8 retval = 1, retry = 0;
6032 U32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
6033
6034 mpi3mr_dprint(sc, MPI3MR_INFO, "sending shutdown notification\n");
6035 if (sc->unrecoverable) {
6036 mpi3mr_dprint(sc, MPI3MR_ERROR,
6037 "controller is unrecoverable, shutdown not issued\n");
6038 return;
6039 }
6040 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6041 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6042 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
6043 mpi3mr_dprint(sc, MPI3MR_ERROR, "shutdown already in progress\n");
6044 return;
6045 }
6046
6047 ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6048 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6049 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6050
6051 mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6052
6053 if (sc->facts.shutdown_timeout)
6054 timeout = sc->facts.shutdown_timeout * 10;
6055
6056 do {
6057 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6058 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6059 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
6060 retval = 0;
6061 break;
6062 }
6063
6064 if (sc->unrecoverable)
6065 break;
6066
6067 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
6068 mpi3mr_print_fault_info(sc);
6069
6070 if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT)
6071 break;
6072
6073 if (mpi3mr_issue_reset(sc,
6074 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6075 MPI3MR_RESET_FROM_CTLR_CLEANUP))
6076 break;
6077
6078 ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6079 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6080 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6081
6082 mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6083
6084 if (sc->facts.shutdown_timeout)
6085 timeout = sc->facts.shutdown_timeout * 10;
6086
6087 retry++;
6088 }
6089
6090 DELAY(100 * 1000);
6091
6092 } while (--timeout);
6093
6094 ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6095 ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6096
6097 if (retval) {
6098 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6099 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
6100 mpi3mr_dprint(sc, MPI3MR_ERROR,
6101 "shutdown still in progress after timeout\n");
6102 }
6103
6104 mpi3mr_dprint(sc, MPI3MR_INFO,
6105 "ioc_status/ioc_config after %s shutdown is (0x%x)/(0x%x)\n",
6106 (!retval)?"successful":"failed", ioc_status,
6107 ioc_config);
6108 }
6109
6110 /**
6111 * mpi3mr_cleanup_ioc - Cleanup controller
6112 * @sc: Adapter instance reference
6113
6114 * controller cleanup handler, Message unit reset or soft reset
6115 * and shutdown notification is issued to the controller.
6116 *
6117 * Return: Nothing.
6118 */
mpi3mr_cleanup_ioc(struct mpi3mr_softc * sc)6119 void mpi3mr_cleanup_ioc(struct mpi3mr_softc *sc)
6120 {
6121 enum mpi3mr_iocstate ioc_state;
6122
6123 mpi3mr_dprint(sc, MPI3MR_INFO, "cleaning up the controller\n");
6124 mpi3mr_disable_interrupts(sc);
6125
6126 ioc_state = mpi3mr_get_iocstate(sc);
6127
6128 if ((!sc->unrecoverable) && (!sc->reset_in_progress) &&
6129 (ioc_state == MRIOC_STATE_READY)) {
6130 if (mpi3mr_mur_ioc(sc,
6131 MPI3MR_RESET_FROM_CTLR_CLEANUP))
6132 mpi3mr_issue_reset(sc,
6133 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6134 MPI3MR_RESET_FROM_MUR_FAILURE);
6135 mpi3mr_issue_ioc_shutdown(sc);
6136 }
6137
6138 mpi3mr_dprint(sc, MPI3MR_INFO, "controller cleanup completed\n");
6139 }
6140