1 /*-
2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 * Copyright (c) 2005, WHEEL Sp. z o.o.
7 * Copyright (c) 2005 Justin T. Gibbs.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * substantially similar to the "NO WARRANTY" disclaimer below
17 * ("Disclaimer") and any redistribution must be conditioned upon including
18 * a substantially similar Disclaimer requirement for further binary
19 * redistribution.
20 * 3. Neither the names of the above listed copyright holders nor the names
21 * of any contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
34 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36 /*-
37 * Some Breakage and Bug Fixing added later.
38 * Copyright (c) 2006, by Matthew Jacob
39 * All Rights Reserved
40 *
41 * Support from LSI-Logic has also gone a great deal toward making this a
42 * workable subsystem and is gratefully acknowledged.
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include <dev/mpt/mpt.h>
49 #include <dev/mpt/mpt_raid.h>
50
51 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
52 #include "dev/mpt/mpilib/mpi_raid.h"
53
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt_sim.h>
59
60 #include <sys/callout.h>
61 #include <sys/kthread.h>
62 #include <sys/sysctl.h>
63
64 #include <machine/stdarg.h>
65
66 struct mpt_raid_action_result
67 {
68 union {
69 MPI_RAID_VOL_INDICATOR indicator_struct;
70 uint32_t new_settings;
71 uint8_t phys_disk_num;
72 } action_data;
73 uint16_t action_status;
74 };
75
76 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
77 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
78
79 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
80
81 static mpt_probe_handler_t mpt_raid_probe;
82 static mpt_attach_handler_t mpt_raid_attach;
83 static mpt_enable_handler_t mpt_raid_enable;
84 static mpt_event_handler_t mpt_raid_event;
85 static mpt_shutdown_handler_t mpt_raid_shutdown;
86 static mpt_reset_handler_t mpt_raid_ioc_reset;
87 static mpt_detach_handler_t mpt_raid_detach;
88
89 static struct mpt_personality mpt_raid_personality =
90 {
91 .name = "mpt_raid",
92 .probe = mpt_raid_probe,
93 .attach = mpt_raid_attach,
94 .enable = mpt_raid_enable,
95 .event = mpt_raid_event,
96 .reset = mpt_raid_ioc_reset,
97 .shutdown = mpt_raid_shutdown,
98 .detach = mpt_raid_detach,
99 };
100
101 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
102 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
103
104 static mpt_reply_handler_t mpt_raid_reply_handler;
105 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
106 MSG_DEFAULT_REPLY *reply_frame);
107 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
108 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
109 static void mpt_raid_thread(void *arg);
110 static callout_func_t mpt_raid_timer;
111 #if 0
112 static void mpt_enable_vol(struct mpt_softc *mpt,
113 struct mpt_raid_volume *mpt_vol, int enable);
114 #endif
115 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
116 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
117 struct cam_path *);
118 static void mpt_raid_sysctl_attach(struct mpt_softc *);
119
120 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
121 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
122 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
123 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
124 const char *fmt, ...);
125 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
126 const char *fmt, ...);
127
128 static int mpt_issue_raid_req(struct mpt_softc *mpt,
129 struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
130 u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
131 int write, int wait);
132
133 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
134 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
135
136 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
137
138 static const char *
mpt_vol_type(struct mpt_raid_volume * vol)139 mpt_vol_type(struct mpt_raid_volume *vol)
140 {
141 switch (vol->config_page->VolumeType) {
142 case MPI_RAID_VOL_TYPE_IS:
143 return ("RAID-0");
144 case MPI_RAID_VOL_TYPE_IME:
145 return ("RAID-1E");
146 case MPI_RAID_VOL_TYPE_IM:
147 return ("RAID-1");
148 default:
149 return ("Unknown");
150 }
151 }
152
153 static const char *
mpt_vol_state(struct mpt_raid_volume * vol)154 mpt_vol_state(struct mpt_raid_volume *vol)
155 {
156 switch (vol->config_page->VolumeStatus.State) {
157 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
158 return ("Optimal");
159 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
160 return ("Degraded");
161 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
162 return ("Failed");
163 default:
164 return ("Unknown");
165 }
166 }
167
168 static const char *
mpt_disk_state(struct mpt_raid_disk * disk)169 mpt_disk_state(struct mpt_raid_disk *disk)
170 {
171 switch (disk->config_page.PhysDiskStatus.State) {
172 case MPI_PHYSDISK0_STATUS_ONLINE:
173 return ("Online");
174 case MPI_PHYSDISK0_STATUS_MISSING:
175 return ("Missing");
176 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
177 return ("Incompatible");
178 case MPI_PHYSDISK0_STATUS_FAILED:
179 return ("Failed");
180 case MPI_PHYSDISK0_STATUS_INITIALIZING:
181 return ("Initializing");
182 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
183 return ("Offline Requested");
184 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
185 return ("Failed per Host Request");
186 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
187 return ("Offline");
188 default:
189 return ("Unknown");
190 }
191 }
192
193 static void
mpt_vol_prt(struct mpt_softc * mpt,struct mpt_raid_volume * vol,const char * fmt,...)194 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
195 const char *fmt, ...)
196 {
197 va_list ap;
198
199 printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
200 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
201 vol->config_page->VolumeBus, vol->config_page->VolumeID);
202 va_start(ap, fmt);
203 vprintf(fmt, ap);
204 va_end(ap);
205 }
206
207 static void
mpt_disk_prt(struct mpt_softc * mpt,struct mpt_raid_disk * disk,const char * fmt,...)208 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
209 const char *fmt, ...)
210 {
211 va_list ap;
212
213 if (disk->volume != NULL) {
214 printf("(%s:vol%d:%d): ",
215 device_get_nameunit(mpt->dev),
216 disk->volume->config_page->VolumeID,
217 disk->member_number);
218 } else {
219 printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
220 disk->config_page.PhysDiskBus,
221 disk->config_page.PhysDiskID);
222 }
223 va_start(ap, fmt);
224 vprintf(fmt, ap);
225 va_end(ap);
226 }
227
228 static void
mpt_raid_async(void * callback_arg,u_int32_t code,struct cam_path * path,void * arg)229 mpt_raid_async(void *callback_arg, u_int32_t code,
230 struct cam_path *path, void *arg)
231 {
232 struct mpt_softc *mpt;
233
234 mpt = (struct mpt_softc*)callback_arg;
235 switch (code) {
236 case AC_FOUND_DEVICE:
237 {
238 struct ccb_getdev *cgd;
239 struct mpt_raid_volume *mpt_vol;
240
241 cgd = (struct ccb_getdev *)arg;
242 if (cgd == NULL) {
243 break;
244 }
245
246 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
247 cgd->ccb_h.target_id);
248
249 RAID_VOL_FOREACH(mpt, mpt_vol) {
250 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
251 continue;
252
253 if (mpt_vol->config_page->VolumeID
254 == cgd->ccb_h.target_id) {
255 mpt_adjust_queue_depth(mpt, mpt_vol, path);
256 break;
257 }
258 }
259 }
260 default:
261 break;
262 }
263 }
264
265 static int
mpt_raid_probe(struct mpt_softc * mpt)266 mpt_raid_probe(struct mpt_softc *mpt)
267 {
268
269 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
270 return (ENODEV);
271 }
272 return (0);
273 }
274
275 static int
mpt_raid_attach(struct mpt_softc * mpt)276 mpt_raid_attach(struct mpt_softc *mpt)
277 {
278 struct ccb_setasync csa;
279 mpt_handler_t handler;
280 int error;
281
282 mpt_callout_init(mpt, &mpt->raid_timer);
283
284 error = mpt_spawn_raid_thread(mpt);
285 if (error != 0) {
286 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
287 goto cleanup;
288 }
289
290 MPT_LOCK(mpt);
291 handler.reply_handler = mpt_raid_reply_handler;
292 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
293 &raid_handler_id);
294 if (error != 0) {
295 mpt_prt(mpt, "Unable to register RAID haandler!\n");
296 goto cleanup;
297 }
298
299 xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
300 csa.ccb_h.func_code = XPT_SASYNC_CB;
301 csa.event_enable = AC_FOUND_DEVICE;
302 csa.callback = mpt_raid_async;
303 csa.callback_arg = mpt;
304 xpt_action((union ccb *)&csa);
305 if (csa.ccb_h.status != CAM_REQ_CMP) {
306 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
307 "CAM async handler.\n");
308 }
309 MPT_UNLOCK(mpt);
310
311 mpt_raid_sysctl_attach(mpt);
312 return (0);
313 cleanup:
314 MPT_UNLOCK(mpt);
315 mpt_raid_detach(mpt);
316 return (error);
317 }
318
319 static int
mpt_raid_enable(struct mpt_softc * mpt)320 mpt_raid_enable(struct mpt_softc *mpt)
321 {
322
323 return (0);
324 }
325
326 static void
mpt_raid_detach(struct mpt_softc * mpt)327 mpt_raid_detach(struct mpt_softc *mpt)
328 {
329 struct ccb_setasync csa;
330 mpt_handler_t handler;
331
332 mpt_callout_drain(mpt, &mpt->raid_timer);
333
334 MPT_LOCK(mpt);
335 mpt_terminate_raid_thread(mpt);
336 handler.reply_handler = mpt_raid_reply_handler;
337 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
338 raid_handler_id);
339 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
340 csa.ccb_h.func_code = XPT_SASYNC_CB;
341 csa.event_enable = 0;
342 csa.callback = mpt_raid_async;
343 csa.callback_arg = mpt;
344 xpt_action((union ccb *)&csa);
345 MPT_UNLOCK(mpt);
346 }
347
348 static void
mpt_raid_ioc_reset(struct mpt_softc * mpt,int type)349 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
350 {
351
352 /* Nothing to do yet. */
353 }
354
355 static const char *raid_event_txt[] =
356 {
357 "Volume Created",
358 "Volume Deleted",
359 "Volume Settings Changed",
360 "Volume Status Changed",
361 "Volume Physical Disk Membership Changed",
362 "Physical Disk Created",
363 "Physical Disk Deleted",
364 "Physical Disk Settings Changed",
365 "Physical Disk Status Changed",
366 "Domain Validation Required",
367 "SMART Data Received",
368 "Replace Action Started",
369 };
370
371 static int
mpt_raid_event(struct mpt_softc * mpt,request_t * req,MSG_EVENT_NOTIFY_REPLY * msg)372 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
373 MSG_EVENT_NOTIFY_REPLY *msg)
374 {
375 EVENT_DATA_RAID *raid_event;
376 struct mpt_raid_volume *mpt_vol;
377 struct mpt_raid_disk *mpt_disk;
378 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
379 int i;
380 int print_event;
381
382 if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
383 return (0);
384 }
385
386 raid_event = (EVENT_DATA_RAID *)&msg->Data;
387
388 mpt_vol = NULL;
389 vol_pg = NULL;
390 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
391 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
392 mpt_vol = &mpt->raid_volumes[i];
393 vol_pg = mpt_vol->config_page;
394
395 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
396 continue;
397
398 if (vol_pg->VolumeID == raid_event->VolumeID
399 && vol_pg->VolumeBus == raid_event->VolumeBus)
400 break;
401 }
402 if (i >= mpt->ioc_page2->MaxVolumes) {
403 mpt_vol = NULL;
404 vol_pg = NULL;
405 }
406 }
407
408 mpt_disk = NULL;
409 if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
410 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
411 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
412 mpt_disk = NULL;
413 }
414 }
415
416 print_event = 1;
417 switch(raid_event->ReasonCode) {
418 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
419 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
420 break;
421 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
422 if (mpt_vol != NULL) {
423 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
424 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
425 } else {
426 /*
427 * Coalesce status messages into one
428 * per background run of our RAID thread.
429 * This removes "spurious" status messages
430 * from our output.
431 */
432 print_event = 0;
433 }
434 }
435 break;
436 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
437 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
438 mpt->raid_rescan++;
439 if (mpt_vol != NULL) {
440 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
441 }
442 break;
443 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
444 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
445 mpt->raid_rescan++;
446 break;
447 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
448 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
449 mpt->raid_rescan++;
450 if (mpt_disk != NULL) {
451 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
452 }
453 break;
454 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
455 mpt->raid_rescan++;
456 break;
457 case MPI_EVENT_RAID_RC_SMART_DATA:
458 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
459 break;
460 }
461
462 if (print_event) {
463 if (mpt_disk != NULL) {
464 mpt_disk_prt(mpt, mpt_disk, "");
465 } else if (mpt_vol != NULL) {
466 mpt_vol_prt(mpt, mpt_vol, "");
467 } else {
468 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
469 raid_event->VolumeID);
470
471 if (raid_event->PhysDiskNum != 0xFF)
472 mpt_prtc(mpt, ":%d): ",
473 raid_event->PhysDiskNum);
474 else
475 mpt_prtc(mpt, "): ");
476 }
477
478 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
479 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
480 raid_event->ReasonCode);
481 else
482 mpt_prtc(mpt, "%s\n",
483 raid_event_txt[raid_event->ReasonCode]);
484 }
485
486 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
487 /* XXX Use CAM's print sense for this... */
488 if (mpt_disk != NULL)
489 mpt_disk_prt(mpt, mpt_disk, "");
490 else
491 mpt_prt(mpt, "Volume(%d:%d:%d: ",
492 raid_event->VolumeBus, raid_event->VolumeID,
493 raid_event->PhysDiskNum);
494 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
495 raid_event->ASC, raid_event->ASCQ);
496 }
497
498 mpt_raid_wakeup(mpt);
499 return (1);
500 }
501
502 static void
mpt_raid_shutdown(struct mpt_softc * mpt)503 mpt_raid_shutdown(struct mpt_softc *mpt)
504 {
505 struct mpt_raid_volume *mpt_vol;
506
507 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
508 return;
509 }
510
511 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
512 RAID_VOL_FOREACH(mpt, mpt_vol) {
513 mpt_verify_mwce(mpt, mpt_vol);
514 }
515 }
516
517 static int
mpt_raid_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)518 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
519 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
520 {
521 int free_req;
522
523 if (req == NULL)
524 return (TRUE);
525
526 free_req = TRUE;
527 if (reply_frame != NULL)
528 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
529 #ifdef NOTYET
530 else if (req->ccb != NULL) {
531 /* Complete Quiesce CCB with error... */
532 }
533 #endif
534
535 req->state &= ~REQ_STATE_QUEUED;
536 req->state |= REQ_STATE_DONE;
537 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
538
539 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
540 wakeup(req);
541 } else if (free_req) {
542 mpt_free_request(mpt, req);
543 }
544
545 return (TRUE);
546 }
547
548 /*
549 * Parse additional completion information in the reply
550 * frame for RAID I/O requests.
551 */
552 static int
mpt_raid_reply_frame_handler(struct mpt_softc * mpt,request_t * req,MSG_DEFAULT_REPLY * reply_frame)553 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
554 MSG_DEFAULT_REPLY *reply_frame)
555 {
556 MSG_RAID_ACTION_REPLY *reply;
557 struct mpt_raid_action_result *action_result;
558 MSG_RAID_ACTION_REQUEST *rap;
559
560 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
561 req->IOCStatus = le16toh(reply->IOCStatus);
562 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
563
564 switch (rap->Action) {
565 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
566 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
567 break;
568 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
569 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
570 break;
571 default:
572 break;
573 }
574 action_result = REQ_TO_RAID_ACTION_RESULT(req);
575 memcpy(&action_result->action_data, &reply->ActionData,
576 sizeof(action_result->action_data));
577 action_result->action_status = le16toh(reply->ActionStatus);
578 return (TRUE);
579 }
580
581 /*
582 * Utiltity routine to perform a RAID action command;
583 */
584 static int
mpt_issue_raid_req(struct mpt_softc * mpt,struct mpt_raid_volume * vol,struct mpt_raid_disk * disk,request_t * req,u_int Action,uint32_t ActionDataWord,bus_addr_t addr,bus_size_t len,int write,int wait)585 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
586 struct mpt_raid_disk *disk, request_t *req, u_int Action,
587 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
588 int write, int wait)
589 {
590 MSG_RAID_ACTION_REQUEST *rap;
591 SGE_SIMPLE32 *se;
592
593 rap = req->req_vbuf;
594 memset(rap, 0, sizeof *rap);
595 rap->Action = Action;
596 rap->ActionDataWord = htole32(ActionDataWord);
597 rap->Function = MPI_FUNCTION_RAID_ACTION;
598 rap->VolumeID = vol->config_page->VolumeID;
599 rap->VolumeBus = vol->config_page->VolumeBus;
600 if (disk != NULL)
601 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
602 else
603 rap->PhysDiskNum = 0xFF;
604 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
605 se->Address = htole32(addr);
606 MPI_pSGE_SET_LENGTH(se, len);
607 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
608 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
609 MPI_SGE_FLAGS_END_OF_LIST |
610 (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
611 se->FlagsLength = htole32(se->FlagsLength);
612 rap->MsgContext = htole32(req->index | raid_handler_id);
613
614 mpt_check_doorbell(mpt);
615 mpt_send_cmd(mpt, req);
616
617 if (wait) {
618 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
619 /*sleep_ok*/FALSE, /*time_ms*/2000));
620 } else {
621 return (0);
622 }
623 }
624
625 /*************************** RAID Status Monitoring ***************************/
626 static int
mpt_spawn_raid_thread(struct mpt_softc * mpt)627 mpt_spawn_raid_thread(struct mpt_softc *mpt)
628 {
629 int error;
630
631 /*
632 * Freeze out any CAM transactions until our thread
633 * is able to run at least once. We need to update
634 * our RAID pages before acception I/O or we may
635 * reject I/O to an ID we later determine is for a
636 * hidden physdisk.
637 */
638 MPT_LOCK(mpt);
639 xpt_freeze_simq(mpt->phydisk_sim, 1);
640 MPT_UNLOCK(mpt);
641 error = kproc_create(mpt_raid_thread, mpt,
642 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
643 "mpt_raid%d", mpt->unit);
644 if (error != 0) {
645 MPT_LOCK(mpt);
646 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
647 MPT_UNLOCK(mpt);
648 }
649 return (error);
650 }
651
652 static void
mpt_terminate_raid_thread(struct mpt_softc * mpt)653 mpt_terminate_raid_thread(struct mpt_softc *mpt)
654 {
655
656 if (mpt->raid_thread == NULL) {
657 return;
658 }
659 mpt->shutdwn_raid = 1;
660 wakeup(&mpt->raid_volumes);
661 /*
662 * Sleep on a slightly different location
663 * for this interlock just for added safety.
664 */
665 mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
666 }
667
668 static void
mpt_raid_thread(void * arg)669 mpt_raid_thread(void *arg)
670 {
671 struct mpt_softc *mpt;
672 int firstrun;
673
674 mpt = (struct mpt_softc *)arg;
675 firstrun = 1;
676 MPT_LOCK(mpt);
677 while (mpt->shutdwn_raid == 0) {
678 if (mpt->raid_wakeup == 0) {
679 mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
680 continue;
681 }
682
683 mpt->raid_wakeup = 0;
684
685 if (mpt_refresh_raid_data(mpt)) {
686 mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
687 continue;
688 }
689
690 /*
691 * Now that we have our first snapshot of RAID data,
692 * allow CAM to access our physical disk bus.
693 */
694 if (firstrun) {
695 firstrun = 0;
696 xpt_release_simq(mpt->phydisk_sim, TRUE);
697 }
698
699 if (mpt->raid_rescan != 0) {
700 union ccb *ccb;
701 int error;
702
703 mpt->raid_rescan = 0;
704 MPT_UNLOCK(mpt);
705
706 ccb = xpt_alloc_ccb();
707
708 MPT_LOCK(mpt);
709 error = xpt_create_path(&ccb->ccb_h.path, NULL,
710 cam_sim_path(mpt->phydisk_sim),
711 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
712 if (error != CAM_REQ_CMP) {
713 xpt_free_ccb(ccb);
714 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
715 } else {
716 xpt_rescan(ccb);
717 }
718 }
719 }
720 mpt->raid_thread = NULL;
721 wakeup(&mpt->raid_thread);
722 MPT_UNLOCK(mpt);
723 kproc_exit(0);
724 }
725
726 #if 0
727 static void
728 mpt_raid_quiesce_timeout(void *arg)
729 {
730
731 /* Complete the CCB with error */
732 /* COWWWW */
733 }
734
735 static timeout_t mpt_raid_quiesce_timeout;
736 cam_status
737 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
738 request_t *req)
739 {
740 union ccb *ccb;
741
742 ccb = req->ccb;
743 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
744 return (CAM_REQ_CMP);
745
746 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
747 int rv;
748
749 mpt_disk->flags |= MPT_RDF_QUIESCING;
750 xpt_freeze_devq(ccb->ccb_h.path, 1);
751
752 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
753 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
754 /*ActionData*/0, /*addr*/0,
755 /*len*/0, /*write*/FALSE,
756 /*wait*/FALSE);
757 if (rv != 0)
758 return (CAM_REQ_CMP_ERR);
759
760 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
761 #if 0
762 if (rv == ETIMEDOUT) {
763 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
764 "Quiece Timed-out\n");
765 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
766 return (CAM_REQ_CMP_ERR);
767 }
768
769 ar = REQ_TO_RAID_ACTION_RESULT(req);
770 if (rv != 0
771 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
772 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
773 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
774 "%d:%x:%x\n", rv, req->IOCStatus,
775 ar->action_status);
776 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
777 return (CAM_REQ_CMP_ERR);
778 }
779 #endif
780 return (CAM_REQ_INPROG);
781 }
782 return (CAM_REQUEUE_REQ);
783 }
784 #endif
785
786 /* XXX Ignores that there may be multiple buses/IOCs involved. */
787 cam_status
mpt_map_physdisk(struct mpt_softc * mpt,union ccb * ccb,target_id_t * tgt)788 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
789 {
790 struct mpt_raid_disk *mpt_disk;
791
792 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
793 if (ccb->ccb_h.target_id < mpt->raid_max_disks
794 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
795 *tgt = mpt_disk->config_page.PhysDiskID;
796 return (0);
797 }
798 mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
799 ccb->ccb_h.target_id);
800 return (-1);
801 }
802
803 /* XXX Ignores that there may be multiple buses/IOCs involved. */
804 int
mpt_is_raid_member(struct mpt_softc * mpt,target_id_t tgt)805 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
806 {
807 struct mpt_raid_disk *mpt_disk;
808 int i;
809
810 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
811 return (0);
812 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
813 mpt_disk = &mpt->raid_disks[i];
814 if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
815 mpt_disk->config_page.PhysDiskID == tgt)
816 return (1);
817 }
818 return (0);
819
820 }
821
822 /* XXX Ignores that there may be multiple buses/IOCs involved. */
823 int
mpt_is_raid_volume(struct mpt_softc * mpt,target_id_t tgt)824 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
825 {
826 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
827 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
828
829 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
830 return (0);
831 }
832 ioc_vol = mpt->ioc_page2->RaidVolume;
833 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
834 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
835 if (ioc_vol->VolumeID == tgt) {
836 return (1);
837 }
838 }
839 return (0);
840 }
841
842 #if 0
843 static void
844 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
845 int enable)
846 {
847 request_t *req;
848 struct mpt_raid_action_result *ar;
849 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
850 int enabled;
851 int rv;
852
853 vol_pg = mpt_vol->config_page;
854 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
855
856 /*
857 * If the setting matches the configuration,
858 * there is nothing to do.
859 */
860 if ((enabled && enable)
861 || (!enabled && !enable))
862 return;
863
864 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
865 if (req == NULL) {
866 mpt_vol_prt(mpt, mpt_vol,
867 "mpt_enable_vol: Get request failed!\n");
868 return;
869 }
870
871 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
872 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
873 : MPI_RAID_ACTION_DISABLE_VOLUME,
874 /*data*/0, /*addr*/0, /*len*/0,
875 /*write*/FALSE, /*wait*/TRUE);
876 if (rv == ETIMEDOUT) {
877 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
878 "%s Volume Timed-out\n",
879 enable ? "Enable" : "Disable");
880 return;
881 }
882 ar = REQ_TO_RAID_ACTION_RESULT(req);
883 if (rv != 0
884 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
885 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
886 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
887 enable ? "Enable" : "Disable",
888 rv, req->IOCStatus, ar->action_status);
889 }
890
891 mpt_free_request(mpt, req);
892 }
893 #endif
894
895 static void
mpt_verify_mwce(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)896 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
897 {
898 request_t *req;
899 struct mpt_raid_action_result *ar;
900 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
901 uint32_t data;
902 int rv;
903 int resyncing;
904 int mwce;
905
906 vol_pg = mpt_vol->config_page;
907 resyncing = vol_pg->VolumeStatus.Flags
908 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
909 mwce = vol_pg->VolumeSettings.Settings
910 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
911
912 /*
913 * If the setting matches the configuration,
914 * there is nothing to do.
915 */
916 switch (mpt->raid_mwce_setting) {
917 case MPT_RAID_MWCE_REBUILD_ONLY:
918 if ((resyncing && mwce) || (!resyncing && !mwce)) {
919 return;
920 }
921 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
922 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
923 /*
924 * Wait one more status update to see if
925 * resyncing gets enabled. It gets disabled
926 * temporarilly when WCE is changed.
927 */
928 return;
929 }
930 break;
931 case MPT_RAID_MWCE_ON:
932 if (mwce)
933 return;
934 break;
935 case MPT_RAID_MWCE_OFF:
936 if (!mwce)
937 return;
938 break;
939 case MPT_RAID_MWCE_NC:
940 return;
941 }
942
943 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
944 if (req == NULL) {
945 mpt_vol_prt(mpt, mpt_vol,
946 "mpt_verify_mwce: Get request failed!\n");
947 return;
948 }
949
950 vol_pg->VolumeSettings.Settings ^=
951 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
952 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
953 vol_pg->VolumeSettings.Settings ^=
954 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
955 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
956 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
957 data, /*addr*/0, /*len*/0,
958 /*write*/FALSE, /*wait*/TRUE);
959 if (rv == ETIMEDOUT) {
960 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
961 "Write Cache Enable Timed-out\n");
962 return;
963 }
964 ar = REQ_TO_RAID_ACTION_RESULT(req);
965 if (rv != 0
966 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
967 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
968 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
969 "%d:%x:%x\n", rv, req->IOCStatus,
970 ar->action_status);
971 } else {
972 vol_pg->VolumeSettings.Settings ^=
973 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
974 }
975 mpt_free_request(mpt, req);
976 }
977
978 static void
mpt_verify_resync_rate(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)979 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
980 {
981 request_t *req;
982 struct mpt_raid_action_result *ar;
983 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
984 u_int prio;
985 int rv;
986
987 vol_pg = mpt_vol->config_page;
988
989 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
990 return;
991
992 /*
993 * If the current RAID resync rate does not
994 * match our configured rate, update it.
995 */
996 prio = vol_pg->VolumeSettings.Settings
997 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
998 if (vol_pg->ResyncRate != 0
999 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1000 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1001 if (req == NULL) {
1002 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1003 "Get request failed!\n");
1004 return;
1005 }
1006
1007 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1008 MPI_RAID_ACTION_SET_RESYNC_RATE,
1009 mpt->raid_resync_rate, /*addr*/0,
1010 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
1011 if (rv == ETIMEDOUT) {
1012 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1013 "Resync Rate Setting Timed-out\n");
1014 return;
1015 }
1016
1017 ar = REQ_TO_RAID_ACTION_RESULT(req);
1018 if (rv != 0
1019 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1020 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1021 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1022 "%d:%x:%x\n", rv, req->IOCStatus,
1023 ar->action_status);
1024 } else
1025 vol_pg->ResyncRate = mpt->raid_resync_rate;
1026 mpt_free_request(mpt, req);
1027 } else if ((prio && mpt->raid_resync_rate < 128)
1028 || (!prio && mpt->raid_resync_rate >= 128)) {
1029 uint32_t data;
1030
1031 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1032 if (req == NULL) {
1033 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1034 "Get request failed!\n");
1035 return;
1036 }
1037
1038 vol_pg->VolumeSettings.Settings ^=
1039 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1040 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1041 vol_pg->VolumeSettings.Settings ^=
1042 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1043 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1044 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1045 data, /*addr*/0, /*len*/0,
1046 /*write*/FALSE, /*wait*/TRUE);
1047 if (rv == ETIMEDOUT) {
1048 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1049 "Resync Rate Setting Timed-out\n");
1050 return;
1051 }
1052 ar = REQ_TO_RAID_ACTION_RESULT(req);
1053 if (rv != 0
1054 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1055 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1056 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1057 "%d:%x:%x\n", rv, req->IOCStatus,
1058 ar->action_status);
1059 } else {
1060 vol_pg->VolumeSettings.Settings ^=
1061 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1062 }
1063
1064 mpt_free_request(mpt, req);
1065 }
1066 }
1067
1068 static void
mpt_adjust_queue_depth(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol,struct cam_path * path)1069 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1070 struct cam_path *path)
1071 {
1072 struct ccb_relsim crs;
1073
1074 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1075 crs.ccb_h.func_code = XPT_REL_SIMQ;
1076 crs.ccb_h.flags = CAM_DEV_QFREEZE;
1077 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1078 crs.openings = mpt->raid_queue_depth;
1079 xpt_action((union ccb *)&crs);
1080 if (crs.ccb_h.status != CAM_REQ_CMP)
1081 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1082 "with CAM status %#x\n", crs.ccb_h.status);
1083 }
1084
1085 static void
mpt_announce_vol(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)1086 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1087 {
1088 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1089 u_int i;
1090
1091 vol_pg = mpt_vol->config_page;
1092 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1093 for (i = 1; i <= 0x8000; i <<= 1) {
1094 switch (vol_pg->VolumeSettings.Settings & i) {
1095 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1096 mpt_prtc(mpt, " Member-WCE");
1097 break;
1098 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1099 mpt_prtc(mpt, " Offline-On-SMART-Err");
1100 break;
1101 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1102 mpt_prtc(mpt, " Hot-Plug-Spares");
1103 break;
1104 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1105 mpt_prtc(mpt, " High-Priority-ReSync");
1106 break;
1107 default:
1108 break;
1109 }
1110 }
1111 mpt_prtc(mpt, " )\n");
1112 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1113 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1114 powerof2(vol_pg->VolumeSettings.HotSparePool)
1115 ? ":" : "s:");
1116 for (i = 0; i < 8; i++) {
1117 u_int mask;
1118
1119 mask = 0x1 << i;
1120 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1121 continue;
1122 mpt_prtc(mpt, " %d", i);
1123 }
1124 mpt_prtc(mpt, "\n");
1125 }
1126 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1127 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1128 struct mpt_raid_disk *mpt_disk;
1129 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1130 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1131 U8 f, s;
1132
1133 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1134 disk_pg = &mpt_disk->config_page;
1135 mpt_prtc(mpt, " ");
1136 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1137 pt_bus, disk_pg->PhysDiskID);
1138 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1139 mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1140 "Primary" : "Secondary");
1141 } else {
1142 mpt_prtc(mpt, "Stripe Position %d",
1143 mpt_disk->member_number);
1144 }
1145 f = disk_pg->PhysDiskStatus.Flags;
1146 s = disk_pg->PhysDiskStatus.State;
1147 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1148 mpt_prtc(mpt, " Out of Sync");
1149 }
1150 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1151 mpt_prtc(mpt, " Quiesced");
1152 }
1153 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1154 mpt_prtc(mpt, " Inactive");
1155 }
1156 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1157 mpt_prtc(mpt, " Was Optimal");
1158 }
1159 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1160 mpt_prtc(mpt, " Was Non-Optimal");
1161 }
1162 switch (s) {
1163 case MPI_PHYSDISK0_STATUS_ONLINE:
1164 mpt_prtc(mpt, " Online");
1165 break;
1166 case MPI_PHYSDISK0_STATUS_MISSING:
1167 mpt_prtc(mpt, " Missing");
1168 break;
1169 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1170 mpt_prtc(mpt, " Incompatible");
1171 break;
1172 case MPI_PHYSDISK0_STATUS_FAILED:
1173 mpt_prtc(mpt, " Failed");
1174 break;
1175 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1176 mpt_prtc(mpt, " Initializing");
1177 break;
1178 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1179 mpt_prtc(mpt, " Requested Offline");
1180 break;
1181 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1182 mpt_prtc(mpt, " Requested Failed");
1183 break;
1184 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1185 default:
1186 mpt_prtc(mpt, " Offline Other (%x)", s);
1187 break;
1188 }
1189 mpt_prtc(mpt, "\n");
1190 }
1191 }
1192
1193 static void
mpt_announce_disk(struct mpt_softc * mpt,struct mpt_raid_disk * mpt_disk)1194 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1195 {
1196 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1197 int rd_bus = cam_sim_bus(mpt->sim);
1198 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1199 u_int i;
1200
1201 disk_pg = &mpt_disk->config_page;
1202 mpt_disk_prt(mpt, mpt_disk,
1203 "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1204 device_get_nameunit(mpt->dev), rd_bus,
1205 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1206 pt_bus, mpt_disk - mpt->raid_disks);
1207 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1208 return;
1209 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1210 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1211 ? ":" : "s:");
1212 for (i = 0; i < 8; i++) {
1213 u_int mask;
1214
1215 mask = 0x1 << i;
1216 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1217 continue;
1218 mpt_prtc(mpt, " %d", i);
1219 }
1220 mpt_prtc(mpt, "\n");
1221 }
1222
1223 static void
mpt_refresh_raid_disk(struct mpt_softc * mpt,struct mpt_raid_disk * mpt_disk,IOC_3_PHYS_DISK * ioc_disk)1224 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1225 IOC_3_PHYS_DISK *ioc_disk)
1226 {
1227 int rv;
1228
1229 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1230 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1231 &mpt_disk->config_page.Header,
1232 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1233 if (rv != 0) {
1234 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1235 "Failed to read RAID Disk Hdr(%d)\n",
1236 ioc_disk->PhysDiskNum);
1237 return;
1238 }
1239 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1240 &mpt_disk->config_page.Header,
1241 sizeof(mpt_disk->config_page),
1242 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1243 if (rv != 0)
1244 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1245 "Failed to read RAID Disk Page(%d)\n",
1246 ioc_disk->PhysDiskNum);
1247 mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1248 }
1249
1250 static void
mpt_refresh_raid_vol(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol,CONFIG_PAGE_IOC_2_RAID_VOL * ioc_vol)1251 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1252 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1253 {
1254 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1255 struct mpt_raid_action_result *ar;
1256 request_t *req;
1257 int rv;
1258 int i;
1259
1260 vol_pg = mpt_vol->config_page;
1261 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1262
1263 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1264 ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1265 if (rv != 0) {
1266 mpt_vol_prt(mpt, mpt_vol,
1267 "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1268 ioc_vol->VolumePageNumber);
1269 return;
1270 }
1271
1272 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1273 &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1274 if (rv != 0) {
1275 mpt_vol_prt(mpt, mpt_vol,
1276 "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1277 ioc_vol->VolumePageNumber);
1278 return;
1279 }
1280 mpt2host_config_page_raid_vol_0(vol_pg);
1281
1282 mpt_vol->flags |= MPT_RVF_ACTIVE;
1283
1284 /* Update disk entry array data. */
1285 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1286 struct mpt_raid_disk *mpt_disk;
1287 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1288 mpt_disk->volume = mpt_vol;
1289 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1290 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1291 mpt_disk->member_number--;
1292 }
1293 }
1294
1295 if ((vol_pg->VolumeStatus.Flags
1296 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1297 return;
1298
1299 req = mpt_get_request(mpt, TRUE);
1300 if (req == NULL) {
1301 mpt_vol_prt(mpt, mpt_vol,
1302 "mpt_refresh_raid_vol: Get request failed!\n");
1303 return;
1304 }
1305 rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1306 MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1307 if (rv == ETIMEDOUT) {
1308 mpt_vol_prt(mpt, mpt_vol,
1309 "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1310 mpt_free_request(mpt, req);
1311 return;
1312 }
1313
1314 ar = REQ_TO_RAID_ACTION_RESULT(req);
1315 if (rv == 0
1316 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1317 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1318 memcpy(&mpt_vol->sync_progress,
1319 &ar->action_data.indicator_struct,
1320 sizeof(mpt_vol->sync_progress));
1321 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1322 } else {
1323 mpt_vol_prt(mpt, mpt_vol,
1324 "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1325 }
1326 mpt_free_request(mpt, req);
1327 }
1328
1329 /*
1330 * Update in-core information about RAID support. We update any entries
1331 * that didn't previously exists or have been marked as needing to
1332 * be updated by our event handler. Interesting changes are displayed
1333 * to the console.
1334 */
1335 static int
mpt_refresh_raid_data(struct mpt_softc * mpt)1336 mpt_refresh_raid_data(struct mpt_softc *mpt)
1337 {
1338 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1339 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1340 IOC_3_PHYS_DISK *ioc_disk;
1341 IOC_3_PHYS_DISK *ioc_last_disk;
1342 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1343 size_t len;
1344 int rv;
1345 int i;
1346 u_int nonopt_volumes;
1347
1348 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1349 return (0);
1350 }
1351
1352 /*
1353 * Mark all items as unreferenced by the configuration.
1354 * This allows us to find, report, and discard stale
1355 * entries.
1356 */
1357 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1358 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1359 }
1360 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1361 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1362 }
1363
1364 /*
1365 * Get Physical Disk information.
1366 */
1367 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1368 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1369 &mpt->ioc_page3->Header, len,
1370 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1371 if (rv) {
1372 mpt_prt(mpt,
1373 "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1374 return (-1);
1375 }
1376 mpt2host_config_page_ioc3(mpt->ioc_page3);
1377
1378 ioc_disk = mpt->ioc_page3->PhysDisk;
1379 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1380 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1381 struct mpt_raid_disk *mpt_disk;
1382
1383 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1384 mpt_disk->flags |= MPT_RDF_REFERENCED;
1385 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1386 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1387 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1388 }
1389 mpt_disk->flags |= MPT_RDF_ACTIVE;
1390 mpt->raid_rescan++;
1391 }
1392
1393 /*
1394 * Refresh volume data.
1395 */
1396 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1397 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1398 &mpt->ioc_page2->Header, len,
1399 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1400 if (rv) {
1401 mpt_prt(mpt, "mpt_refresh_raid_data: "
1402 "Failed to read IOC Page 2\n");
1403 return (-1);
1404 }
1405 mpt2host_config_page_ioc2(mpt->ioc_page2);
1406
1407 ioc_vol = mpt->ioc_page2->RaidVolume;
1408 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1409 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1410 struct mpt_raid_volume *mpt_vol;
1411
1412 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1413 mpt_vol->flags |= MPT_RVF_REFERENCED;
1414 vol_pg = mpt_vol->config_page;
1415 if (vol_pg == NULL)
1416 continue;
1417 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1418 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1419 || (vol_pg->VolumeStatus.Flags
1420 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1421 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1422 }
1423 mpt_vol->flags |= MPT_RVF_ACTIVE;
1424 }
1425
1426 nonopt_volumes = 0;
1427 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1428 struct mpt_raid_volume *mpt_vol;
1429 uint64_t total;
1430 uint64_t left;
1431 int m;
1432 u_int prio;
1433
1434 mpt_vol = &mpt->raid_volumes[i];
1435
1436 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1437 continue;
1438 }
1439
1440 vol_pg = mpt_vol->config_page;
1441 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1442 == MPT_RVF_ANNOUNCED) {
1443 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1444 mpt_vol->flags = 0;
1445 continue;
1446 }
1447
1448 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1449 mpt_announce_vol(mpt, mpt_vol);
1450 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1451 }
1452
1453 if (vol_pg->VolumeStatus.State !=
1454 MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1455 nonopt_volumes++;
1456
1457 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1458 continue;
1459
1460 mpt_vol->flags |= MPT_RVF_UP2DATE;
1461 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1462 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1463 mpt_verify_mwce(mpt, mpt_vol);
1464
1465 if (vol_pg->VolumeStatus.Flags == 0) {
1466 continue;
1467 }
1468
1469 mpt_vol_prt(mpt, mpt_vol, "Status (");
1470 for (m = 1; m <= 0x80; m <<= 1) {
1471 switch (vol_pg->VolumeStatus.Flags & m) {
1472 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1473 mpt_prtc(mpt, " Enabled");
1474 break;
1475 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1476 mpt_prtc(mpt, " Quiesced");
1477 break;
1478 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1479 mpt_prtc(mpt, " Re-Syncing");
1480 break;
1481 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1482 mpt_prtc(mpt, " Inactive");
1483 break;
1484 default:
1485 break;
1486 }
1487 }
1488 mpt_prtc(mpt, " )\n");
1489
1490 if ((vol_pg->VolumeStatus.Flags
1491 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1492 continue;
1493
1494 mpt_verify_resync_rate(mpt, mpt_vol);
1495
1496 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1497 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1498 if (vol_pg->ResyncRate != 0) {
1499 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1500 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1501 prio / 1000, prio % 1000);
1502 } else {
1503 prio = vol_pg->VolumeSettings.Settings
1504 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1505 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1506 prio ? "High" : "Low");
1507 }
1508 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1509 "blocks remaining\n", (uintmax_t)left,
1510 (uintmax_t)total);
1511
1512 /* Periodically report on sync progress. */
1513 mpt_schedule_raid_refresh(mpt);
1514 }
1515
1516 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1517 struct mpt_raid_disk *mpt_disk;
1518 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1519 int m;
1520
1521 mpt_disk = &mpt->raid_disks[i];
1522 disk_pg = &mpt_disk->config_page;
1523
1524 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1525 continue;
1526
1527 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1528 == MPT_RDF_ANNOUNCED) {
1529 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1530 mpt_disk->flags = 0;
1531 mpt->raid_rescan++;
1532 continue;
1533 }
1534
1535 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1536 mpt_announce_disk(mpt, mpt_disk);
1537 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1538 }
1539
1540 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1541 continue;
1542
1543 mpt_disk->flags |= MPT_RDF_UP2DATE;
1544 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1545 if (disk_pg->PhysDiskStatus.Flags == 0)
1546 continue;
1547
1548 mpt_disk_prt(mpt, mpt_disk, "Status (");
1549 for (m = 1; m <= 0x80; m <<= 1) {
1550 switch (disk_pg->PhysDiskStatus.Flags & m) {
1551 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1552 mpt_prtc(mpt, " Out-Of-Sync");
1553 break;
1554 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1555 mpt_prtc(mpt, " Quiesced");
1556 break;
1557 default:
1558 break;
1559 }
1560 }
1561 mpt_prtc(mpt, " )\n");
1562 }
1563
1564 mpt->raid_nonopt_volumes = nonopt_volumes;
1565 return (0);
1566 }
1567
1568 static void
mpt_raid_timer(void * arg)1569 mpt_raid_timer(void *arg)
1570 {
1571 struct mpt_softc *mpt;
1572
1573 mpt = (struct mpt_softc *)arg;
1574 MPT_LOCK_ASSERT(mpt);
1575 mpt_raid_wakeup(mpt);
1576 }
1577
1578 static void
mpt_schedule_raid_refresh(struct mpt_softc * mpt)1579 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1580 {
1581
1582 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1583 mpt_raid_timer, mpt);
1584 }
1585
1586 void
mpt_raid_free_mem(struct mpt_softc * mpt)1587 mpt_raid_free_mem(struct mpt_softc *mpt)
1588 {
1589
1590 if (mpt->raid_volumes) {
1591 struct mpt_raid_volume *mpt_raid;
1592 int i;
1593 for (i = 0; i < mpt->raid_max_volumes; i++) {
1594 mpt_raid = &mpt->raid_volumes[i];
1595 if (mpt_raid->config_page) {
1596 free(mpt_raid->config_page, M_DEVBUF);
1597 mpt_raid->config_page = NULL;
1598 }
1599 }
1600 free(mpt->raid_volumes, M_DEVBUF);
1601 mpt->raid_volumes = NULL;
1602 }
1603 if (mpt->raid_disks) {
1604 free(mpt->raid_disks, M_DEVBUF);
1605 mpt->raid_disks = NULL;
1606 }
1607 if (mpt->ioc_page2) {
1608 free(mpt->ioc_page2, M_DEVBUF);
1609 mpt->ioc_page2 = NULL;
1610 }
1611 if (mpt->ioc_page3) {
1612 free(mpt->ioc_page3, M_DEVBUF);
1613 mpt->ioc_page3 = NULL;
1614 }
1615 mpt->raid_max_volumes = 0;
1616 mpt->raid_max_disks = 0;
1617 }
1618
1619 static int
mpt_raid_set_vol_resync_rate(struct mpt_softc * mpt,u_int rate)1620 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1621 {
1622 struct mpt_raid_volume *mpt_vol;
1623
1624 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1625 || rate < MPT_RAID_RESYNC_RATE_MIN)
1626 && rate != MPT_RAID_RESYNC_RATE_NC)
1627 return (EINVAL);
1628
1629 MPT_LOCK(mpt);
1630 mpt->raid_resync_rate = rate;
1631 RAID_VOL_FOREACH(mpt, mpt_vol) {
1632 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1633 continue;
1634 }
1635 mpt_verify_resync_rate(mpt, mpt_vol);
1636 }
1637 MPT_UNLOCK(mpt);
1638 return (0);
1639 }
1640
1641 static int
mpt_raid_set_vol_queue_depth(struct mpt_softc * mpt,u_int vol_queue_depth)1642 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1643 {
1644 struct mpt_raid_volume *mpt_vol;
1645
1646 if (vol_queue_depth > 255 || vol_queue_depth < 1)
1647 return (EINVAL);
1648
1649 MPT_LOCK(mpt);
1650 mpt->raid_queue_depth = vol_queue_depth;
1651 RAID_VOL_FOREACH(mpt, mpt_vol) {
1652 struct cam_path *path;
1653 int error;
1654
1655 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1656 continue;
1657
1658 mpt->raid_rescan = 0;
1659
1660 error = xpt_create_path(&path, NULL,
1661 cam_sim_path(mpt->sim),
1662 mpt_vol->config_page->VolumeID,
1663 /*lun*/0);
1664 if (error != CAM_REQ_CMP) {
1665 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1666 continue;
1667 }
1668 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1669 xpt_free_path(path);
1670 }
1671 MPT_UNLOCK(mpt);
1672 return (0);
1673 }
1674
1675 static int
mpt_raid_set_vol_mwce(struct mpt_softc * mpt,mpt_raid_mwce_t mwce)1676 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1677 {
1678 struct mpt_raid_volume *mpt_vol;
1679 int force_full_resync;
1680
1681 MPT_LOCK(mpt);
1682 if (mwce == mpt->raid_mwce_setting) {
1683 MPT_UNLOCK(mpt);
1684 return (0);
1685 }
1686
1687 /*
1688 * Catch MWCE being left on due to a failed shutdown. Since
1689 * sysctls cannot be set by the loader, we treat the first
1690 * setting of this varible specially and force a full volume
1691 * resync if MWCE is enabled and a resync is in progress.
1692 */
1693 force_full_resync = 0;
1694 if (mpt->raid_mwce_set == 0
1695 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1696 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1697 force_full_resync = 1;
1698
1699 mpt->raid_mwce_setting = mwce;
1700 RAID_VOL_FOREACH(mpt, mpt_vol) {
1701 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1702 int resyncing;
1703 int mwce;
1704
1705 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1706 continue;
1707
1708 vol_pg = mpt_vol->config_page;
1709 resyncing = vol_pg->VolumeStatus.Flags
1710 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1711 mwce = vol_pg->VolumeSettings.Settings
1712 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1713 if (force_full_resync && resyncing && mwce) {
1714 /*
1715 * XXX disable/enable volume should force a resync,
1716 * but we'll need to queice, drain, and restart
1717 * I/O to do that.
1718 */
1719 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1720 "detected. Suggest full resync.\n");
1721 }
1722 mpt_verify_mwce(mpt, mpt_vol);
1723 }
1724 mpt->raid_mwce_set = 1;
1725 MPT_UNLOCK(mpt);
1726 return (0);
1727 }
1728
1729 static const char *mpt_vol_mwce_strs[] =
1730 {
1731 "On",
1732 "Off",
1733 "On-During-Rebuild",
1734 "NC"
1735 };
1736
1737 static int
mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)1738 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1739 {
1740 char inbuf[20];
1741 struct mpt_softc *mpt;
1742 const char *str;
1743 int error;
1744 u_int size;
1745 u_int i;
1746
1747 mpt = (struct mpt_softc *)arg1;
1748 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1749 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1750 if (error || !req->newptr) {
1751 return (error);
1752 }
1753
1754 size = req->newlen - req->newidx;
1755 if (size >= sizeof(inbuf)) {
1756 return (EINVAL);
1757 }
1758
1759 error = SYSCTL_IN(req, inbuf, size);
1760 if (error) {
1761 return (error);
1762 }
1763 inbuf[size] = '\0';
1764 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1765 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1766 return (mpt_raid_set_vol_mwce(mpt, i));
1767 }
1768 }
1769 return (EINVAL);
1770 }
1771
1772 static int
mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)1773 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1774 {
1775 struct mpt_softc *mpt;
1776 u_int raid_resync_rate;
1777 int error;
1778
1779 mpt = (struct mpt_softc *)arg1;
1780 raid_resync_rate = mpt->raid_resync_rate;
1781
1782 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1783 if (error || !req->newptr) {
1784 return error;
1785 }
1786
1787 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1788 }
1789
1790 static int
mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)1791 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1792 {
1793 struct mpt_softc *mpt;
1794 u_int raid_queue_depth;
1795 int error;
1796
1797 mpt = (struct mpt_softc *)arg1;
1798 raid_queue_depth = mpt->raid_queue_depth;
1799
1800 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1801 if (error || !req->newptr) {
1802 return error;
1803 }
1804
1805 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1806 }
1807
1808 static void
mpt_raid_sysctl_attach(struct mpt_softc * mpt)1809 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1810 {
1811 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1812 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1813
1814 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1815 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1816 mpt, 0, mpt_raid_sysctl_vol_member_wce, "A",
1817 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1818
1819 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1820 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1821 mpt, 0, mpt_raid_sysctl_vol_queue_depth, "I",
1822 "default volume queue depth");
1823
1824 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1825 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1826 mpt, 0, mpt_raid_sysctl_vol_resync_rate, "I",
1827 "volume resync priority (0 == NC, 1 - 255)");
1828 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829 "nonoptimal_volumes", CTLFLAG_RD,
1830 &mpt->raid_nonopt_volumes, 0,
1831 "number of nonoptimal volumes");
1832 }
1833