1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2016, 2017, Intel Corporation.
26 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
27 */
28
29 /*
30 * ZFS syseventd module.
31 *
32 * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
33 *
34 * The purpose of this module is to identify when devices are added to the
35 * system, and appropriately online or replace the affected vdevs.
36 *
37 * When a device is added to the system:
38 *
39 * 1. Search for any vdevs whose devid matches that of the newly added
40 * device.
41 *
42 * 2. If no vdevs are found, then search for any vdevs whose udev path
43 * matches that of the new device.
44 *
45 * 3. If no vdevs match by either method, then ignore the event.
46 *
47 * 4. Attempt to online the device with a flag to indicate that it should
48 * be unspared when resilvering completes. If this succeeds, then the
49 * same device was inserted and we should continue normally.
50 *
51 * 5. If the pool does not have the 'autoreplace' property set, attempt to
52 * online the device again without the unspare flag, which will
53 * generate a FMA fault.
54 *
55 * 6. If the pool has the 'autoreplace' property set, and the matching vdev
56 * is a whole disk, then label the new disk and attempt a 'zpool
57 * replace'.
58 *
59 * The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
60 * event indicates that a device failed to open during pool load, but the
61 * autoreplace property was set. In this case, we deferred the associated
62 * FMA fault until our module had a chance to process the autoreplace logic.
63 * If the device could not be replaced, then the second online attempt will
64 * trigger the FMA fault that we skipped earlier.
65 *
66 * On Linux udev provides a disk insert for both the disk and the partition.
67 */
68
69 #include <ctype.h>
70 #include <fcntl.h>
71 #include <libnvpair.h>
72 #include <libzfs.h>
73 #include <libzutil.h>
74 #include <limits.h>
75 #include <stddef.h>
76 #include <stdlib.h>
77 #include <string.h>
78 #include <syslog.h>
79 #include <sys/list.h>
80 #include <sys/sunddi.h>
81 #include <sys/sysevent/eventdefs.h>
82 #include <sys/sysevent/dev.h>
83 #include <thread_pool.h>
84 #include <pthread.h>
85 #include <unistd.h>
86 #include <errno.h>
87 #include "zfs_agents.h"
88 #include "../zed_log.h"
89
90 #define DEV_BYID_PATH "/dev/disk/by-id/"
91 #define DEV_BYPATH_PATH "/dev/disk/by-path/"
92 #define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
93
94 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
95
96 libzfs_handle_t *g_zfshdl;
97 list_t g_pool_list; /* list of unavailable pools at initialization */
98 list_t g_device_list; /* list of disks with asynchronous label request */
99 tpool_t *g_tpool;
100 boolean_t g_enumeration_done;
101 pthread_t g_zfs_tid; /* zfs_enum_pools() thread */
102
103 typedef struct unavailpool {
104 zpool_handle_t *uap_zhp;
105 list_node_t uap_node;
106 } unavailpool_t;
107
108 typedef struct pendingdev {
109 char pd_physpath[128];
110 list_node_t pd_node;
111 } pendingdev_t;
112
113 static int
zfs_toplevel_state(zpool_handle_t * zhp)114 zfs_toplevel_state(zpool_handle_t *zhp)
115 {
116 nvlist_t *nvroot;
117 vdev_stat_t *vs;
118 unsigned int c;
119
120 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
121 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
122 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
123 (uint64_t **)&vs, &c) == 0);
124 return (vs->vs_state);
125 }
126
127 static int
zfs_unavail_pool(zpool_handle_t * zhp,void * data)128 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
129 {
130 zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
131 zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
132
133 if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
134 unavailpool_t *uap;
135 uap = malloc(sizeof (unavailpool_t));
136 uap->uap_zhp = zhp;
137 list_insert_tail((list_t *)data, uap);
138 } else {
139 zpool_close(zhp);
140 }
141 return (0);
142 }
143
144 /*
145 * Two stage replace on Linux
146 * since we get disk notifications
147 * we can wait for partitioned disk slice to show up!
148 *
149 * First stage tags the disk, initiates async partitioning, and returns
150 * Second stage finds the tag and proceeds to ZFS labeling/replace
151 *
152 * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
153 *
154 * 1. physical match with no fs, no partition
155 * tag it top, partition disk
156 *
157 * 2. physical match again, see partition and tag
158 *
159 */
160
161 /*
162 * The device associated with the given vdev (either by devid or physical path)
163 * has been added to the system. If 'isdisk' is set, then we only attempt a
164 * replacement if it's a whole disk. This also implies that we should label the
165 * disk first.
166 *
167 * First, we attempt to online the device (making sure to undo any spare
168 * operation when finished). If this succeeds, then we're done. If it fails,
169 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
170 * but that the label was not what we expected. If the 'autoreplace' property
171 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
172 * replace'. If the online is successful, but the new state is something else
173 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
174 * race, and we should avoid attempting to relabel the disk.
175 *
176 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
177 */
178 static void
zfs_process_add(zpool_handle_t * zhp,nvlist_t * vdev,boolean_t labeled)179 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
180 {
181 char *path;
182 vdev_state_t newstate;
183 nvlist_t *nvroot, *newvd;
184 pendingdev_t *device;
185 uint64_t wholedisk = 0ULL;
186 uint64_t offline = 0ULL;
187 uint64_t guid = 0ULL;
188 char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
189 char rawpath[PATH_MAX], fullpath[PATH_MAX];
190 char devpath[PATH_MAX];
191 int ret;
192 boolean_t is_dm = B_FALSE;
193 boolean_t is_sd = B_FALSE;
194 uint_t c;
195 vdev_stat_t *vs;
196
197 if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
198 return;
199
200 /* Skip healthy disks */
201 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
202 (uint64_t **)&vs, &c) == 0);
203 if (vs->vs_state == VDEV_STATE_HEALTHY) {
204 zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
205 __func__, path);
206 return;
207 }
208
209 (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
210 (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
211 &enc_sysfs_path);
212 (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
213 (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
214 (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
215
216 if (offline)
217 return; /* don't intervene if it was taken offline */
218
219 is_dm = zfs_dev_is_dm(path);
220 zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
221 " wholedisk %d, %s dm (guid %llu)", zpool_get_name(zhp), path,
222 physpath ? physpath : "NULL", wholedisk, is_dm ? "is" : "not",
223 (long long unsigned int)guid);
224
225 /*
226 * The VDEV guid is preferred for identification (gets passed in path)
227 */
228 if (guid != 0) {
229 (void) snprintf(fullpath, sizeof (fullpath), "%llu",
230 (long long unsigned int)guid);
231 } else {
232 /*
233 * otherwise use path sans partition suffix for whole disks
234 */
235 (void) strlcpy(fullpath, path, sizeof (fullpath));
236 if (wholedisk) {
237 char *spath = zfs_strip_partition(fullpath);
238 if (!spath) {
239 zed_log_msg(LOG_INFO, "%s: Can't alloc",
240 __func__);
241 return;
242 }
243
244 (void) strlcpy(fullpath, spath, sizeof (fullpath));
245 free(spath);
246 }
247 }
248
249 /*
250 * Attempt to online the device.
251 */
252 if (zpool_vdev_online(zhp, fullpath,
253 ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
254 (newstate == VDEV_STATE_HEALTHY ||
255 newstate == VDEV_STATE_DEGRADED)) {
256 zed_log_msg(LOG_INFO, " zpool_vdev_online: vdev %s is %s",
257 fullpath, (newstate == VDEV_STATE_HEALTHY) ?
258 "HEALTHY" : "DEGRADED");
259 return;
260 }
261
262 /*
263 * vdev_id alias rule for using scsi_debug devices (FMA automated
264 * testing)
265 */
266 if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
267 is_sd = B_TRUE;
268
269 /*
270 * If the pool doesn't have the autoreplace property set, then use
271 * vdev online to trigger a FMA fault by posting an ereport.
272 */
273 if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
274 !(wholedisk || is_dm) || (physpath == NULL)) {
275 (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
276 &newstate);
277 zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
278 "not a whole disk for '%s'", fullpath);
279 return;
280 }
281
282 /*
283 * Convert physical path into its current device node. Rawpath
284 * needs to be /dev/disk/by-vdev for a scsi_debug device since
285 * /dev/disk/by-path will not be present.
286 */
287 (void) snprintf(rawpath, sizeof (rawpath), "%s%s",
288 is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
289
290 if (realpath(rawpath, devpath) == NULL && !is_dm) {
291 zed_log_msg(LOG_INFO, " realpath: %s failed (%s)",
292 rawpath, strerror(errno));
293
294 (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
295 &newstate);
296
297 zed_log_msg(LOG_INFO, " zpool_vdev_online: %s FORCEFAULT (%s)",
298 fullpath, libzfs_error_description(g_zfshdl));
299 return;
300 }
301
302 /* Only autoreplace bad disks */
303 if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
304 (vs->vs_state != VDEV_STATE_FAULTED) &&
305 (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
306 return;
307 }
308
309 nvlist_lookup_string(vdev, "new_devid", &new_devid);
310
311 if (is_dm) {
312 /* Don't label device mapper or multipath disks. */
313 } else if (!labeled) {
314 /*
315 * we're auto-replacing a raw disk, so label it first
316 */
317 char *leafname;
318
319 /*
320 * If this is a request to label a whole disk, then attempt to
321 * write out the label. Before we can label the disk, we need
322 * to map the physical string that was matched on to the under
323 * lying device node.
324 *
325 * If any part of this process fails, then do a force online
326 * to trigger a ZFS fault for the device (and any hot spare
327 * replacement).
328 */
329 leafname = strrchr(devpath, '/') + 1;
330
331 /*
332 * If this is a request to label a whole disk, then attempt to
333 * write out the label.
334 */
335 if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
336 zed_log_msg(LOG_INFO, " zpool_label_disk: could not "
337 "label '%s' (%s)", leafname,
338 libzfs_error_description(g_zfshdl));
339
340 (void) zpool_vdev_online(zhp, fullpath,
341 ZFS_ONLINE_FORCEFAULT, &newstate);
342 return;
343 }
344
345 /*
346 * The disk labeling is asynchronous on Linux. Just record
347 * this label request and return as there will be another
348 * disk add event for the partition after the labeling is
349 * completed.
350 */
351 device = malloc(sizeof (pendingdev_t));
352 (void) strlcpy(device->pd_physpath, physpath,
353 sizeof (device->pd_physpath));
354 list_insert_tail(&g_device_list, device);
355
356 zed_log_msg(LOG_INFO, " zpool_label_disk: async '%s' (%llu)",
357 leafname, (u_longlong_t)guid);
358
359 return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
360
361 } else /* labeled */ {
362 boolean_t found = B_FALSE;
363 /*
364 * match up with request above to label the disk
365 */
366 for (device = list_head(&g_device_list); device != NULL;
367 device = list_next(&g_device_list, device)) {
368 if (strcmp(physpath, device->pd_physpath) == 0) {
369 list_remove(&g_device_list, device);
370 free(device);
371 found = B_TRUE;
372 break;
373 }
374 zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
375 physpath, device->pd_physpath);
376 }
377 if (!found) {
378 /* unexpected partition slice encountered */
379 zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
380 fullpath);
381 (void) zpool_vdev_online(zhp, fullpath,
382 ZFS_ONLINE_FORCEFAULT, &newstate);
383 return;
384 }
385
386 zed_log_msg(LOG_INFO, " zpool_label_disk: resume '%s' (%llu)",
387 physpath, (u_longlong_t)guid);
388
389 (void) snprintf(devpath, sizeof (devpath), "%s%s",
390 DEV_BYID_PATH, new_devid);
391 }
392
393 /*
394 * Construct the root vdev to pass to zpool_vdev_attach(). While adding
395 * the entire vdev structure is harmless, we construct a reduced set of
396 * path/physpath/wholedisk to keep it simple.
397 */
398 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
399 zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
400 return;
401 }
402 if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
403 zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
404 nvlist_free(nvroot);
405 return;
406 }
407
408 if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
409 nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
410 nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
411 (physpath != NULL && nvlist_add_string(newvd,
412 ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
413 (enc_sysfs_path != NULL && nvlist_add_string(newvd,
414 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
415 nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
416 nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
417 nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
418 1) != 0) {
419 zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
420 nvlist_free(newvd);
421 nvlist_free(nvroot);
422 return;
423 }
424
425 nvlist_free(newvd);
426
427 /*
428 * Wait for udev to verify the links exist, then auto-replace
429 * the leaf disk at same physical location.
430 */
431 if (zpool_label_disk_wait(path, 3000) != 0) {
432 zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
433 "disk %s is missing", path);
434 nvlist_free(nvroot);
435 return;
436 }
437
438 /*
439 * Prefer sequential resilvering when supported (mirrors and dRAID),
440 * otherwise fallback to a traditional healing resilver.
441 */
442 ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);
443 if (ret != 0) {
444 ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,
445 B_TRUE, B_FALSE);
446 }
447
448 zed_log_msg(LOG_INFO, " zpool_vdev_replace: %s with %s (%s)",
449 fullpath, path, (ret == 0) ? "no errors" :
450 libzfs_error_description(g_zfshdl));
451
452 nvlist_free(nvroot);
453 }
454
455 /*
456 * Utility functions to find a vdev matching given criteria.
457 */
458 typedef struct dev_data {
459 const char *dd_compare;
460 const char *dd_prop;
461 zfs_process_func_t dd_func;
462 boolean_t dd_found;
463 boolean_t dd_islabeled;
464 uint64_t dd_pool_guid;
465 uint64_t dd_vdev_guid;
466 const char *dd_new_devid;
467 } dev_data_t;
468
469 static void
zfs_iter_vdev(zpool_handle_t * zhp,nvlist_t * nvl,void * data)470 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
471 {
472 dev_data_t *dp = data;
473 char *path = NULL;
474 uint_t c, children;
475 nvlist_t **child;
476
477 /*
478 * First iterate over any children.
479 */
480 if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
481 &child, &children) == 0) {
482 for (c = 0; c < children; c++)
483 zfs_iter_vdev(zhp, child[c], data);
484 }
485
486 /*
487 * Iterate over any spares and cache devices
488 */
489 if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
490 &child, &children) == 0) {
491 for (c = 0; c < children; c++)
492 zfs_iter_vdev(zhp, child[c], data);
493 }
494 if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
495 &child, &children) == 0) {
496 for (c = 0; c < children; c++)
497 zfs_iter_vdev(zhp, child[c], data);
498 }
499
500 /* once a vdev was matched and processed there is nothing left to do */
501 if (dp->dd_found)
502 return;
503
504 /*
505 * Match by GUID if available otherwise fallback to devid or physical
506 */
507 if (dp->dd_vdev_guid != 0) {
508 uint64_t guid;
509
510 if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
511 &guid) != 0 || guid != dp->dd_vdev_guid) {
512 return;
513 }
514 zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid);
515 dp->dd_found = B_TRUE;
516
517 } else if (dp->dd_compare != NULL) {
518 /*
519 * NOTE: On Linux there is an event for partition, so unlike
520 * illumos, substring matching is not required to accommodate
521 * the partition suffix. An exact match will be present in
522 * the dp->dd_compare value.
523 */
524 if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
525 strcmp(dp->dd_compare, path) != 0)
526 return;
527
528 zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s",
529 dp->dd_prop, path);
530 dp->dd_found = B_TRUE;
531
532 /* pass the new devid for use by replacing code */
533 if (dp->dd_new_devid != NULL) {
534 (void) nvlist_add_string(nvl, "new_devid",
535 dp->dd_new_devid);
536 }
537 }
538
539 (dp->dd_func)(zhp, nvl, dp->dd_islabeled);
540 }
541
542 static void
zfs_enable_ds(void * arg)543 zfs_enable_ds(void *arg)
544 {
545 unavailpool_t *pool = (unavailpool_t *)arg;
546
547 (void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
548 zpool_close(pool->uap_zhp);
549 free(pool);
550 }
551
552 static int
zfs_iter_pool(zpool_handle_t * zhp,void * data)553 zfs_iter_pool(zpool_handle_t *zhp, void *data)
554 {
555 nvlist_t *config, *nvl;
556 dev_data_t *dp = data;
557 uint64_t pool_guid;
558 unavailpool_t *pool;
559
560 zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
561 zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
562
563 /*
564 * For each vdev in this pool, look for a match to apply dd_func
565 */
566 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
567 if (dp->dd_pool_guid == 0 ||
568 (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
569 &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
570 (void) nvlist_lookup_nvlist(config,
571 ZPOOL_CONFIG_VDEV_TREE, &nvl);
572 zfs_iter_vdev(zhp, nvl, data);
573 }
574 }
575
576 /*
577 * if this pool was originally unavailable,
578 * then enable its datasets asynchronously
579 */
580 if (g_enumeration_done) {
581 for (pool = list_head(&g_pool_list); pool != NULL;
582 pool = list_next(&g_pool_list, pool)) {
583
584 if (strcmp(zpool_get_name(zhp),
585 zpool_get_name(pool->uap_zhp)))
586 continue;
587 if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
588 list_remove(&g_pool_list, pool);
589 (void) tpool_dispatch(g_tpool, zfs_enable_ds,
590 pool);
591 break;
592 }
593 }
594 }
595
596 zpool_close(zhp);
597 return (dp->dd_found); /* cease iteration after a match */
598 }
599
600 /*
601 * Given a physical device location, iterate over all
602 * (pool, vdev) pairs which correspond to that location.
603 */
604 static boolean_t
devphys_iter(const char * physical,const char * devid,zfs_process_func_t func,boolean_t is_slice)605 devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
606 boolean_t is_slice)
607 {
608 dev_data_t data = { 0 };
609
610 data.dd_compare = physical;
611 data.dd_func = func;
612 data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
613 data.dd_found = B_FALSE;
614 data.dd_islabeled = is_slice;
615 data.dd_new_devid = devid; /* used by auto replace code */
616
617 (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
618
619 return (data.dd_found);
620 }
621
622 /*
623 * Given a device identifier, find any vdevs with a matching devid.
624 * On Linux we can match devid directly which is always a whole disk.
625 */
626 static boolean_t
devid_iter(const char * devid,zfs_process_func_t func,boolean_t is_slice)627 devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
628 {
629 dev_data_t data = { 0 };
630
631 data.dd_compare = devid;
632 data.dd_func = func;
633 data.dd_prop = ZPOOL_CONFIG_DEVID;
634 data.dd_found = B_FALSE;
635 data.dd_islabeled = is_slice;
636 data.dd_new_devid = devid;
637
638 (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
639
640 return (data.dd_found);
641 }
642
643 /*
644 * Handle a EC_DEV_ADD.ESC_DISK event.
645 *
646 * illumos
647 * Expects: DEV_PHYS_PATH string in schema
648 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
649 *
650 * path: '/dev/dsk/c0t1d0s0' (persistent)
651 * devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
652 * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
653 *
654 * linux
655 * provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
656 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
657 *
658 * path: '/dev/sdc1' (not persistent)
659 * devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
660 * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
661 */
662 static int
zfs_deliver_add(nvlist_t * nvl,boolean_t is_lofi)663 zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
664 {
665 char *devpath = NULL, *devid;
666 boolean_t is_slice;
667
668 /*
669 * Expecting a devid string and an optional physical location
670 */
671 if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0)
672 return (-1);
673
674 (void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
675
676 is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
677
678 zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
679 devid, devpath ? devpath : "NULL", is_slice);
680
681 /*
682 * Iterate over all vdevs looking for a match in the following order:
683 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
684 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
685 *
686 * For disks, we only want to pay attention to vdevs marked as whole
687 * disks or are a multipath device.
688 */
689 if (!devid_iter(devid, zfs_process_add, is_slice) && devpath != NULL)
690 (void) devphys_iter(devpath, devid, zfs_process_add, is_slice);
691
692 return (0);
693 }
694
695 /*
696 * Called when we receive a VDEV_CHECK event, which indicates a device could not
697 * be opened during initial pool open, but the autoreplace property was set on
698 * the pool. In this case, we treat it as if it were an add event.
699 */
700 static int
zfs_deliver_check(nvlist_t * nvl)701 zfs_deliver_check(nvlist_t *nvl)
702 {
703 dev_data_t data = { 0 };
704
705 if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
706 &data.dd_pool_guid) != 0 ||
707 nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
708 &data.dd_vdev_guid) != 0 ||
709 data.dd_vdev_guid == 0)
710 return (0);
711
712 zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
713 data.dd_pool_guid, data.dd_vdev_guid);
714
715 data.dd_func = zfs_process_add;
716
717 (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
718
719 return (0);
720 }
721
722 static int
zfsdle_vdev_online(zpool_handle_t * zhp,void * data)723 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
724 {
725 char *devname = data;
726 boolean_t avail_spare, l2cache;
727 nvlist_t *tgt;
728 int error;
729
730 zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
731 devname, zpool_get_name(zhp));
732
733 if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
734 &avail_spare, &l2cache, NULL)) != NULL) {
735 char *path, fullpath[MAXPATHLEN];
736 uint64_t wholedisk;
737
738 error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
739 if (error) {
740 zpool_close(zhp);
741 return (0);
742 }
743
744 error = nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
745 &wholedisk);
746 if (error)
747 wholedisk = 0;
748
749 if (wholedisk) {
750 path = strrchr(path, '/');
751 if (path != NULL) {
752 path = zfs_strip_partition(path + 1);
753 if (path == NULL) {
754 zpool_close(zhp);
755 return (0);
756 }
757 } else {
758 zpool_close(zhp);
759 return (0);
760 }
761
762 (void) strlcpy(fullpath, path, sizeof (fullpath));
763 free(path);
764
765 /*
766 * We need to reopen the pool associated with this
767 * device so that the kernel can update the size of
768 * the expanded device. When expanding there is no
769 * need to restart the scrub from the beginning.
770 */
771 boolean_t scrub_restart = B_FALSE;
772 (void) zpool_reopen_one(zhp, &scrub_restart);
773 } else {
774 (void) strlcpy(fullpath, path, sizeof (fullpath));
775 }
776
777 if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
778 vdev_state_t newstate;
779
780 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
781 error = zpool_vdev_online(zhp, fullpath, 0,
782 &newstate);
783 zed_log_msg(LOG_INFO, "zfsdle_vdev_online: "
784 "setting device '%s' to ONLINE state "
785 "in pool '%s': %d", fullpath,
786 zpool_get_name(zhp), error);
787 }
788 }
789 zpool_close(zhp);
790 return (1);
791 }
792 zpool_close(zhp);
793 return (0);
794 }
795
796 /*
797 * This function handles the ESC_DEV_DLE device change event. Use the
798 * provided vdev guid when looking up a disk or partition, when the guid
799 * is not present assume the entire disk is owned by ZFS and append the
800 * expected -part1 partition information then lookup by physical path.
801 */
802 static int
zfs_deliver_dle(nvlist_t * nvl)803 zfs_deliver_dle(nvlist_t *nvl)
804 {
805 char *devname, name[MAXPATHLEN];
806 uint64_t guid;
807
808 if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
809 sprintf(name, "%llu", (u_longlong_t)guid);
810 } else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
811 strlcpy(name, devname, MAXPATHLEN);
812 zfs_append_partition(name, MAXPATHLEN);
813 } else {
814 zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
815 }
816
817 if (zpool_iter(g_zfshdl, zfsdle_vdev_online, name) != 1) {
818 zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
819 "found", name);
820 return (1);
821 }
822
823 return (0);
824 }
825
826 /*
827 * syseventd daemon module event handler
828 *
829 * Handles syseventd daemon zfs device related events:
830 *
831 * EC_DEV_ADD.ESC_DISK
832 * EC_DEV_STATUS.ESC_DEV_DLE
833 * EC_ZFS.ESC_ZFS_VDEV_CHECK
834 *
835 * Note: assumes only one thread active at a time (not thread safe)
836 */
837 static int
zfs_slm_deliver_event(const char * class,const char * subclass,nvlist_t * nvl)838 zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
839 {
840 int ret;
841 boolean_t is_lofi = B_FALSE, is_check = B_FALSE, is_dle = B_FALSE;
842
843 if (strcmp(class, EC_DEV_ADD) == 0) {
844 /*
845 * We're mainly interested in disk additions, but we also listen
846 * for new loop devices, to allow for simplified testing.
847 */
848 if (strcmp(subclass, ESC_DISK) == 0)
849 is_lofi = B_FALSE;
850 else if (strcmp(subclass, ESC_LOFI) == 0)
851 is_lofi = B_TRUE;
852 else
853 return (0);
854
855 is_check = B_FALSE;
856 } else if (strcmp(class, EC_ZFS) == 0 &&
857 strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
858 /*
859 * This event signifies that a device failed to open
860 * during pool load, but the 'autoreplace' property was
861 * set, so we should pretend it's just been added.
862 */
863 is_check = B_TRUE;
864 } else if (strcmp(class, EC_DEV_STATUS) == 0 &&
865 strcmp(subclass, ESC_DEV_DLE) == 0) {
866 is_dle = B_TRUE;
867 } else {
868 return (0);
869 }
870
871 if (is_dle)
872 ret = zfs_deliver_dle(nvl);
873 else if (is_check)
874 ret = zfs_deliver_check(nvl);
875 else
876 ret = zfs_deliver_add(nvl, is_lofi);
877
878 return (ret);
879 }
880
881 /*ARGSUSED*/
882 static void *
zfs_enum_pools(void * arg)883 zfs_enum_pools(void *arg)
884 {
885 (void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
886 /*
887 * Linux - instead of using a thread pool, each list entry
888 * will spawn a thread when an unavailable pool transitions
889 * to available. zfs_slm_fini will wait for these threads.
890 */
891 g_enumeration_done = B_TRUE;
892 return (NULL);
893 }
894
895 /*
896 * called from zed daemon at startup
897 *
898 * sent messages from zevents or udev monitor
899 *
900 * For now, each agent has its own libzfs instance
901 */
902 int
zfs_slm_init()903 zfs_slm_init()
904 {
905 if ((g_zfshdl = libzfs_init()) == NULL)
906 return (-1);
907
908 /*
909 * collect a list of unavailable pools (asynchronously,
910 * since this can take a while)
911 */
912 list_create(&g_pool_list, sizeof (struct unavailpool),
913 offsetof(struct unavailpool, uap_node));
914
915 if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
916 list_destroy(&g_pool_list);
917 libzfs_fini(g_zfshdl);
918 return (-1);
919 }
920
921 list_create(&g_device_list, sizeof (struct pendingdev),
922 offsetof(struct pendingdev, pd_node));
923
924 return (0);
925 }
926
927 void
zfs_slm_fini()928 zfs_slm_fini()
929 {
930 unavailpool_t *pool;
931 pendingdev_t *device;
932
933 /* wait for zfs_enum_pools thread to complete */
934 (void) pthread_join(g_zfs_tid, NULL);
935 /* destroy the thread pool */
936 if (g_tpool != NULL) {
937 tpool_wait(g_tpool);
938 tpool_destroy(g_tpool);
939 }
940
941 while ((pool = (list_head(&g_pool_list))) != NULL) {
942 list_remove(&g_pool_list, pool);
943 zpool_close(pool->uap_zhp);
944 free(pool);
945 }
946 list_destroy(&g_pool_list);
947
948 while ((device = (list_head(&g_device_list))) != NULL) {
949 list_remove(&g_device_list, device);
950 free(device);
951 }
952 list_destroy(&g_device_list);
953
954 libzfs_fini(g_zfshdl);
955 }
956
957 void
zfs_slm_event(const char * class,const char * subclass,nvlist_t * nvl)958 zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
959 {
960 zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
961 (void) zfs_slm_deliver_event(class, subclass, nvl);
962 }
963