1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2011, 2021 by Delphix. All rights reserved.
25  * Copyright 2017 Nexenta Systems, Inc.
26  * Copyright (c) 2014 Integros [integros.com]
27  * Copyright 2016 Toomas Soome <[email protected]>
28  * Copyright 2017 Joyent, Inc.
29  * Copyright (c) 2017, Intel Corporation.
30  * Copyright (c) 2019, Datto Inc. All rights reserved.
31  * Copyright (c) 2021, Klara Inc.
32  * Copyright (c) 2021, 2023 Hewlett Packard Enterprise Development LP.
33  */
34 
35 #include <sys/zfs_context.h>
36 #include <sys/fm/fs/zfs.h>
37 #include <sys/spa.h>
38 #include <sys/spa_impl.h>
39 #include <sys/bpobj.h>
40 #include <sys/dmu.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/dsl_dir.h>
43 #include <sys/vdev_impl.h>
44 #include <sys/vdev_rebuild.h>
45 #include <sys/vdev_draid.h>
46 #include <sys/uberblock_impl.h>
47 #include <sys/metaslab.h>
48 #include <sys/metaslab_impl.h>
49 #include <sys/space_map.h>
50 #include <sys/space_reftree.h>
51 #include <sys/zio.h>
52 #include <sys/zap.h>
53 #include <sys/fs/zfs.h>
54 #include <sys/arc.h>
55 #include <sys/zil.h>
56 #include <sys/dsl_scan.h>
57 #include <sys/vdev_raidz.h>
58 #include <sys/abd.h>
59 #include <sys/vdev_initialize.h>
60 #include <sys/vdev_trim.h>
61 #include <sys/zvol.h>
62 #include <sys/zfs_ratelimit.h>
63 #include "zfs_prop.h"
64 
65 /*
66  * One metaslab from each (normal-class) vdev is used by the ZIL.  These are
67  * called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
68  * part of the spa_embedded_log_class.  The metaslab with the most free space
69  * in each vdev is selected for this purpose when the pool is opened (or a
70  * vdev is added).  See vdev_metaslab_init().
71  *
72  * Log blocks can be allocated from the following locations.  Each one is tried
73  * in order until the allocation succeeds:
74  * 1. dedicated log vdevs, aka "slog" (spa_log_class)
75  * 2. embedded slog metaslabs (spa_embedded_log_class)
76  * 3. other metaslabs in normal vdevs (spa_normal_class)
77  *
78  * zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
79  * than this number of metaslabs in the vdev.  This ensures that we don't set
80  * aside an unreasonable amount of space for the ZIL.  If set to less than
81  * 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
82  * (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
83  */
84 static uint_t zfs_embedded_slog_min_ms = 64;
85 
86 /* default target for number of metaslabs per top-level vdev */
87 static uint_t zfs_vdev_default_ms_count = 200;
88 
89 /* minimum number of metaslabs per top-level vdev */
90 static uint_t zfs_vdev_min_ms_count = 16;
91 
92 /* practical upper limit of total metaslabs per top-level vdev */
93 static uint_t zfs_vdev_ms_count_limit = 1ULL << 17;
94 
95 /* lower limit for metaslab size (512M) */
96 static uint_t zfs_vdev_default_ms_shift = 29;
97 
98 /* upper limit for metaslab size (16G) */
99 static uint_t zfs_vdev_max_ms_shift = 34;
100 
101 int vdev_validate_skip = B_FALSE;
102 
103 /*
104  * Since the DTL space map of a vdev is not expected to have a lot of
105  * entries, we default its block size to 4K.
106  */
107 int zfs_vdev_dtl_sm_blksz = (1 << 12);
108 
109 /*
110  * Rate limit slow IO (delay) events to this many per second.
111  */
112 static unsigned int zfs_slow_io_events_per_second = 20;
113 
114 /*
115  * Rate limit checksum events after this many checksum errors per second.
116  */
117 static unsigned int zfs_checksum_events_per_second = 20;
118 
119 /*
120  * Ignore errors during scrub/resilver.  Allows to work around resilver
121  * upon import when there are pool errors.
122  */
123 static int zfs_scan_ignore_errors = 0;
124 
125 /*
126  * vdev-wide space maps that have lots of entries written to them at
127  * the end of each transaction can benefit from a higher I/O bandwidth
128  * (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
129  */
130 int zfs_vdev_standard_sm_blksz = (1 << 17);
131 
132 /*
133  * Tunable parameter for debugging or performance analysis. Setting this
134  * will cause pool corruption on power loss if a volatile out-of-order
135  * write cache is enabled.
136  */
137 int zfs_nocacheflush = 0;
138 
139 /*
140  * Maximum and minimum ashift values that can be automatically set based on
141  * vdev's physical ashift (disk's physical sector size).  While ASHIFT_MAX
142  * is higher than the maximum value, it is intentionally limited here to not
143  * excessively impact pool space efficiency.  Higher ashift values may still
144  * be forced by vdev logical ashift or by user via ashift property, but won't
145  * be set automatically as a performance optimization.
146  */
147 uint_t zfs_vdev_max_auto_ashift = 14;
148 uint_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
149 
150 void
vdev_dbgmsg(vdev_t * vd,const char * fmt,...)151 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
152 {
153 	va_list adx;
154 	char buf[256];
155 
156 	va_start(adx, fmt);
157 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
158 	va_end(adx);
159 
160 	if (vd->vdev_path != NULL) {
161 		zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
162 		    vd->vdev_path, buf);
163 	} else {
164 		zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
165 		    vd->vdev_ops->vdev_op_type,
166 		    (u_longlong_t)vd->vdev_id,
167 		    (u_longlong_t)vd->vdev_guid, buf);
168 	}
169 }
170 
171 void
vdev_dbgmsg_print_tree(vdev_t * vd,int indent)172 vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
173 {
174 	char state[20];
175 
176 	if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
177 		zfs_dbgmsg("%*svdev %llu: %s", indent, "",
178 		    (u_longlong_t)vd->vdev_id,
179 		    vd->vdev_ops->vdev_op_type);
180 		return;
181 	}
182 
183 	switch (vd->vdev_state) {
184 	case VDEV_STATE_UNKNOWN:
185 		(void) snprintf(state, sizeof (state), "unknown");
186 		break;
187 	case VDEV_STATE_CLOSED:
188 		(void) snprintf(state, sizeof (state), "closed");
189 		break;
190 	case VDEV_STATE_OFFLINE:
191 		(void) snprintf(state, sizeof (state), "offline");
192 		break;
193 	case VDEV_STATE_REMOVED:
194 		(void) snprintf(state, sizeof (state), "removed");
195 		break;
196 	case VDEV_STATE_CANT_OPEN:
197 		(void) snprintf(state, sizeof (state), "can't open");
198 		break;
199 	case VDEV_STATE_FAULTED:
200 		(void) snprintf(state, sizeof (state), "faulted");
201 		break;
202 	case VDEV_STATE_DEGRADED:
203 		(void) snprintf(state, sizeof (state), "degraded");
204 		break;
205 	case VDEV_STATE_HEALTHY:
206 		(void) snprintf(state, sizeof (state), "healthy");
207 		break;
208 	default:
209 		(void) snprintf(state, sizeof (state), "<state %u>",
210 		    (uint_t)vd->vdev_state);
211 	}
212 
213 	zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
214 	    "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
215 	    vd->vdev_islog ? " (log)" : "",
216 	    (u_longlong_t)vd->vdev_guid,
217 	    vd->vdev_path ? vd->vdev_path : "N/A", state);
218 
219 	for (uint64_t i = 0; i < vd->vdev_children; i++)
220 		vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
221 }
222 
223 /*
224  * Virtual device management.
225  */
226 
227 static vdev_ops_t *const vdev_ops_table[] = {
228 	&vdev_root_ops,
229 	&vdev_raidz_ops,
230 	&vdev_draid_ops,
231 	&vdev_draid_spare_ops,
232 	&vdev_mirror_ops,
233 	&vdev_replacing_ops,
234 	&vdev_spare_ops,
235 	&vdev_disk_ops,
236 	&vdev_file_ops,
237 	&vdev_missing_ops,
238 	&vdev_hole_ops,
239 	&vdev_indirect_ops,
240 	NULL
241 };
242 
243 /*
244  * Given a vdev type, return the appropriate ops vector.
245  */
246 static vdev_ops_t *
vdev_getops(const char * type)247 vdev_getops(const char *type)
248 {
249 	vdev_ops_t *ops, *const *opspp;
250 
251 	for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
252 		if (strcmp(ops->vdev_op_type, type) == 0)
253 			break;
254 
255 	return (ops);
256 }
257 
258 /*
259  * Given a vdev and a metaslab class, find which metaslab group we're
260  * interested in. All vdevs may belong to two different metaslab classes.
261  * Dedicated slog devices use only the primary metaslab group, rather than a
262  * separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
263  */
264 metaslab_group_t *
vdev_get_mg(vdev_t * vd,metaslab_class_t * mc)265 vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
266 {
267 	if (mc == spa_embedded_log_class(vd->vdev_spa) &&
268 	    vd->vdev_log_mg != NULL)
269 		return (vd->vdev_log_mg);
270 	else
271 		return (vd->vdev_mg);
272 }
273 
274 void
vdev_default_xlate(vdev_t * vd,const range_seg64_t * logical_rs,range_seg64_t * physical_rs,range_seg64_t * remain_rs)275 vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
276     range_seg64_t *physical_rs, range_seg64_t *remain_rs)
277 {
278 	(void) vd, (void) remain_rs;
279 
280 	physical_rs->rs_start = logical_rs->rs_start;
281 	physical_rs->rs_end = logical_rs->rs_end;
282 }
283 
284 /*
285  * Derive the enumerated allocation bias from string input.
286  * String origin is either the per-vdev zap or zpool(8).
287  */
288 static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char * bias)289 vdev_derive_alloc_bias(const char *bias)
290 {
291 	vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
292 
293 	if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
294 		alloc_bias = VDEV_BIAS_LOG;
295 	else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
296 		alloc_bias = VDEV_BIAS_SPECIAL;
297 	else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
298 		alloc_bias = VDEV_BIAS_DEDUP;
299 
300 	return (alloc_bias);
301 }
302 
303 /*
304  * Default asize function: return the MAX of psize with the asize of
305  * all children.  This is what's used by anything other than RAID-Z.
306  */
307 uint64_t
vdev_default_asize(vdev_t * vd,uint64_t psize)308 vdev_default_asize(vdev_t *vd, uint64_t psize)
309 {
310 	uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
311 	uint64_t csize;
312 
313 	for (int c = 0; c < vd->vdev_children; c++) {
314 		csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
315 		asize = MAX(asize, csize);
316 	}
317 
318 	return (asize);
319 }
320 
321 uint64_t
vdev_default_min_asize(vdev_t * vd)322 vdev_default_min_asize(vdev_t *vd)
323 {
324 	return (vd->vdev_min_asize);
325 }
326 
327 /*
328  * Get the minimum allocatable size. We define the allocatable size as
329  * the vdev's asize rounded to the nearest metaslab. This allows us to
330  * replace or attach devices which don't have the same physical size but
331  * can still satisfy the same number of allocations.
332  */
333 uint64_t
vdev_get_min_asize(vdev_t * vd)334 vdev_get_min_asize(vdev_t *vd)
335 {
336 	vdev_t *pvd = vd->vdev_parent;
337 
338 	/*
339 	 * If our parent is NULL (inactive spare or cache) or is the root,
340 	 * just return our own asize.
341 	 */
342 	if (pvd == NULL)
343 		return (vd->vdev_asize);
344 
345 	/*
346 	 * The top-level vdev just returns the allocatable size rounded
347 	 * to the nearest metaslab.
348 	 */
349 	if (vd == vd->vdev_top)
350 		return (P2ALIGN_TYPED(vd->vdev_asize, 1ULL << vd->vdev_ms_shift,
351 		    uint64_t));
352 
353 	return (pvd->vdev_ops->vdev_op_min_asize(pvd));
354 }
355 
356 void
vdev_set_min_asize(vdev_t * vd)357 vdev_set_min_asize(vdev_t *vd)
358 {
359 	vd->vdev_min_asize = vdev_get_min_asize(vd);
360 
361 	for (int c = 0; c < vd->vdev_children; c++)
362 		vdev_set_min_asize(vd->vdev_child[c]);
363 }
364 
365 /*
366  * Get the minimal allocation size for the top-level vdev.
367  */
368 uint64_t
vdev_get_min_alloc(vdev_t * vd)369 vdev_get_min_alloc(vdev_t *vd)
370 {
371 	uint64_t min_alloc = 1ULL << vd->vdev_ashift;
372 
373 	if (vd->vdev_ops->vdev_op_min_alloc != NULL)
374 		min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
375 
376 	return (min_alloc);
377 }
378 
379 /*
380  * Get the parity level for a top-level vdev.
381  */
382 uint64_t
vdev_get_nparity(vdev_t * vd)383 vdev_get_nparity(vdev_t *vd)
384 {
385 	uint64_t nparity = 0;
386 
387 	if (vd->vdev_ops->vdev_op_nparity != NULL)
388 		nparity = vd->vdev_ops->vdev_op_nparity(vd);
389 
390 	return (nparity);
391 }
392 
393 static int
vdev_prop_get_int(vdev_t * vd,vdev_prop_t prop,uint64_t * value)394 vdev_prop_get_int(vdev_t *vd, vdev_prop_t prop, uint64_t *value)
395 {
396 	spa_t *spa = vd->vdev_spa;
397 	objset_t *mos = spa->spa_meta_objset;
398 	uint64_t objid;
399 	int err;
400 
401 	if (vd->vdev_root_zap != 0) {
402 		objid = vd->vdev_root_zap;
403 	} else if (vd->vdev_top_zap != 0) {
404 		objid = vd->vdev_top_zap;
405 	} else if (vd->vdev_leaf_zap != 0) {
406 		objid = vd->vdev_leaf_zap;
407 	} else {
408 		return (EINVAL);
409 	}
410 
411 	err = zap_lookup(mos, objid, vdev_prop_to_name(prop),
412 	    sizeof (uint64_t), 1, value);
413 
414 	if (err == ENOENT)
415 		*value = vdev_prop_default_numeric(prop);
416 
417 	return (err);
418 }
419 
420 /*
421  * Get the number of data disks for a top-level vdev.
422  */
423 uint64_t
vdev_get_ndisks(vdev_t * vd)424 vdev_get_ndisks(vdev_t *vd)
425 {
426 	uint64_t ndisks = 1;
427 
428 	if (vd->vdev_ops->vdev_op_ndisks != NULL)
429 		ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
430 
431 	return (ndisks);
432 }
433 
434 vdev_t *
vdev_lookup_top(spa_t * spa,uint64_t vdev)435 vdev_lookup_top(spa_t *spa, uint64_t vdev)
436 {
437 	vdev_t *rvd = spa->spa_root_vdev;
438 
439 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
440 
441 	if (vdev < rvd->vdev_children) {
442 		ASSERT(rvd->vdev_child[vdev] != NULL);
443 		return (rvd->vdev_child[vdev]);
444 	}
445 
446 	return (NULL);
447 }
448 
449 vdev_t *
vdev_lookup_by_guid(vdev_t * vd,uint64_t guid)450 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
451 {
452 	vdev_t *mvd;
453 
454 	if (vd->vdev_guid == guid)
455 		return (vd);
456 
457 	for (int c = 0; c < vd->vdev_children; c++)
458 		if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
459 		    NULL)
460 			return (mvd);
461 
462 	return (NULL);
463 }
464 
465 static int
vdev_count_leaves_impl(vdev_t * vd)466 vdev_count_leaves_impl(vdev_t *vd)
467 {
468 	int n = 0;
469 
470 	if (vd->vdev_ops->vdev_op_leaf)
471 		return (1);
472 
473 	for (int c = 0; c < vd->vdev_children; c++)
474 		n += vdev_count_leaves_impl(vd->vdev_child[c]);
475 
476 	return (n);
477 }
478 
479 int
vdev_count_leaves(spa_t * spa)480 vdev_count_leaves(spa_t *spa)
481 {
482 	int rc;
483 
484 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
485 	rc = vdev_count_leaves_impl(spa->spa_root_vdev);
486 	spa_config_exit(spa, SCL_VDEV, FTAG);
487 
488 	return (rc);
489 }
490 
491 void
vdev_add_child(vdev_t * pvd,vdev_t * cvd)492 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
493 {
494 	size_t oldsize, newsize;
495 	uint64_t id = cvd->vdev_id;
496 	vdev_t **newchild;
497 
498 	ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
499 	ASSERT(cvd->vdev_parent == NULL);
500 
501 	cvd->vdev_parent = pvd;
502 
503 	if (pvd == NULL)
504 		return;
505 
506 	ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
507 
508 	oldsize = pvd->vdev_children * sizeof (vdev_t *);
509 	pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
510 	newsize = pvd->vdev_children * sizeof (vdev_t *);
511 
512 	newchild = kmem_alloc(newsize, KM_SLEEP);
513 	if (pvd->vdev_child != NULL) {
514 		memcpy(newchild, pvd->vdev_child, oldsize);
515 		kmem_free(pvd->vdev_child, oldsize);
516 	}
517 
518 	pvd->vdev_child = newchild;
519 	pvd->vdev_child[id] = cvd;
520 
521 	cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
522 	ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
523 
524 	/*
525 	 * Walk up all ancestors to update guid sum.
526 	 */
527 	for (; pvd != NULL; pvd = pvd->vdev_parent)
528 		pvd->vdev_guid_sum += cvd->vdev_guid_sum;
529 
530 	if (cvd->vdev_ops->vdev_op_leaf) {
531 		list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
532 		cvd->vdev_spa->spa_leaf_list_gen++;
533 	}
534 }
535 
536 void
vdev_remove_child(vdev_t * pvd,vdev_t * cvd)537 vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
538 {
539 	int c;
540 	uint_t id = cvd->vdev_id;
541 
542 	ASSERT(cvd->vdev_parent == pvd);
543 
544 	if (pvd == NULL)
545 		return;
546 
547 	ASSERT(id < pvd->vdev_children);
548 	ASSERT(pvd->vdev_child[id] == cvd);
549 
550 	pvd->vdev_child[id] = NULL;
551 	cvd->vdev_parent = NULL;
552 
553 	for (c = 0; c < pvd->vdev_children; c++)
554 		if (pvd->vdev_child[c])
555 			break;
556 
557 	if (c == pvd->vdev_children) {
558 		kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
559 		pvd->vdev_child = NULL;
560 		pvd->vdev_children = 0;
561 	}
562 
563 	if (cvd->vdev_ops->vdev_op_leaf) {
564 		spa_t *spa = cvd->vdev_spa;
565 		list_remove(&spa->spa_leaf_list, cvd);
566 		spa->spa_leaf_list_gen++;
567 	}
568 
569 	/*
570 	 * Walk up all ancestors to update guid sum.
571 	 */
572 	for (; pvd != NULL; pvd = pvd->vdev_parent)
573 		pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
574 }
575 
576 /*
577  * Remove any holes in the child array.
578  */
579 void
vdev_compact_children(vdev_t * pvd)580 vdev_compact_children(vdev_t *pvd)
581 {
582 	vdev_t **newchild, *cvd;
583 	int oldc = pvd->vdev_children;
584 	int newc;
585 
586 	ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
587 
588 	if (oldc == 0)
589 		return;
590 
591 	for (int c = newc = 0; c < oldc; c++)
592 		if (pvd->vdev_child[c])
593 			newc++;
594 
595 	if (newc > 0) {
596 		newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
597 
598 		for (int c = newc = 0; c < oldc; c++) {
599 			if ((cvd = pvd->vdev_child[c]) != NULL) {
600 				newchild[newc] = cvd;
601 				cvd->vdev_id = newc++;
602 			}
603 		}
604 	} else {
605 		newchild = NULL;
606 	}
607 
608 	kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
609 	pvd->vdev_child = newchild;
610 	pvd->vdev_children = newc;
611 }
612 
613 /*
614  * Allocate and minimally initialize a vdev_t.
615  */
616 vdev_t *
vdev_alloc_common(spa_t * spa,uint_t id,uint64_t guid,vdev_ops_t * ops)617 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
618 {
619 	vdev_t *vd;
620 	vdev_indirect_config_t *vic;
621 
622 	vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
623 	vic = &vd->vdev_indirect_config;
624 
625 	if (spa->spa_root_vdev == NULL) {
626 		ASSERT(ops == &vdev_root_ops);
627 		spa->spa_root_vdev = vd;
628 		spa->spa_load_guid = spa_generate_guid(NULL);
629 	}
630 
631 	if (guid == 0 && ops != &vdev_hole_ops) {
632 		if (spa->spa_root_vdev == vd) {
633 			/*
634 			 * The root vdev's guid will also be the pool guid,
635 			 * which must be unique among all pools.
636 			 */
637 			guid = spa_generate_guid(NULL);
638 		} else {
639 			/*
640 			 * Any other vdev's guid must be unique within the pool.
641 			 */
642 			guid = spa_generate_guid(spa);
643 		}
644 		ASSERT(!spa_guid_exists(spa_guid(spa), guid));
645 	}
646 
647 	vd->vdev_spa = spa;
648 	vd->vdev_id = id;
649 	vd->vdev_guid = guid;
650 	vd->vdev_guid_sum = guid;
651 	vd->vdev_ops = ops;
652 	vd->vdev_state = VDEV_STATE_CLOSED;
653 	vd->vdev_ishole = (ops == &vdev_hole_ops);
654 	vic->vic_prev_indirect_vdev = UINT64_MAX;
655 
656 	rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
657 	mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
658 	vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
659 	    0, 0);
660 
661 	/*
662 	 * Initialize rate limit structs for events.  We rate limit ZIO delay
663 	 * and checksum events so that we don't overwhelm ZED with thousands
664 	 * of events when a disk is acting up.
665 	 */
666 	zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
667 	    1);
668 	zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_slow_io_events_per_second,
669 	    1);
670 	zfs_ratelimit_init(&vd->vdev_checksum_rl,
671 	    &zfs_checksum_events_per_second, 1);
672 
673 	/*
674 	 * Default Thresholds for tuning ZED
675 	 */
676 	vd->vdev_checksum_n = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N);
677 	vd->vdev_checksum_t = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T);
678 	vd->vdev_io_n = vdev_prop_default_numeric(VDEV_PROP_IO_N);
679 	vd->vdev_io_t = vdev_prop_default_numeric(VDEV_PROP_IO_T);
680 	vd->vdev_slow_io_n = vdev_prop_default_numeric(VDEV_PROP_SLOW_IO_N);
681 	vd->vdev_slow_io_t = vdev_prop_default_numeric(VDEV_PROP_SLOW_IO_T);
682 
683 	list_link_init(&vd->vdev_config_dirty_node);
684 	list_link_init(&vd->vdev_state_dirty_node);
685 	list_link_init(&vd->vdev_initialize_node);
686 	list_link_init(&vd->vdev_leaf_node);
687 	list_link_init(&vd->vdev_trim_node);
688 
689 	mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
690 	mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
691 	mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
692 	mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
693 
694 	mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
695 	mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
696 	cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
697 	cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
698 
699 	mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
700 	mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
701 	mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
702 	cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
703 	cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
704 	cv_init(&vd->vdev_autotrim_kick_cv, NULL, CV_DEFAULT, NULL);
705 	cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
706 
707 	mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
708 	cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
709 
710 	for (int t = 0; t < DTL_TYPES; t++) {
711 		vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
712 		    0);
713 	}
714 
715 	txg_list_create(&vd->vdev_ms_list, spa,
716 	    offsetof(struct metaslab, ms_txg_node));
717 	txg_list_create(&vd->vdev_dtl_list, spa,
718 	    offsetof(struct vdev, vdev_dtl_node));
719 	vd->vdev_stat.vs_timestamp = gethrtime();
720 	vdev_queue_init(vd);
721 
722 	return (vd);
723 }
724 
725 /*
726  * Allocate a new vdev.  The 'alloctype' is used to control whether we are
727  * creating a new vdev or loading an existing one - the behavior is slightly
728  * different for each case.
729  */
730 int
vdev_alloc(spa_t * spa,vdev_t ** vdp,nvlist_t * nv,vdev_t * parent,uint_t id,int alloctype)731 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
732     int alloctype)
733 {
734 	vdev_ops_t *ops;
735 	const char *type;
736 	uint64_t guid = 0, islog;
737 	vdev_t *vd;
738 	vdev_indirect_config_t *vic;
739 	const char *tmp = NULL;
740 	int rc;
741 	vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
742 	boolean_t top_level = (parent && !parent->vdev_parent);
743 
744 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
745 
746 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
747 		return (SET_ERROR(EINVAL));
748 
749 	if ((ops = vdev_getops(type)) == NULL)
750 		return (SET_ERROR(EINVAL));
751 
752 	/*
753 	 * If this is a load, get the vdev guid from the nvlist.
754 	 * Otherwise, vdev_alloc_common() will generate one for us.
755 	 */
756 	if (alloctype == VDEV_ALLOC_LOAD) {
757 		uint64_t label_id;
758 
759 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
760 		    label_id != id)
761 			return (SET_ERROR(EINVAL));
762 
763 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
764 			return (SET_ERROR(EINVAL));
765 	} else if (alloctype == VDEV_ALLOC_SPARE) {
766 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
767 			return (SET_ERROR(EINVAL));
768 	} else if (alloctype == VDEV_ALLOC_L2CACHE) {
769 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
770 			return (SET_ERROR(EINVAL));
771 	} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
772 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
773 			return (SET_ERROR(EINVAL));
774 	}
775 
776 	/*
777 	 * The first allocated vdev must be of type 'root'.
778 	 */
779 	if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
780 		return (SET_ERROR(EINVAL));
781 
782 	/*
783 	 * Determine whether we're a log vdev.
784 	 */
785 	islog = 0;
786 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
787 	if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
788 		return (SET_ERROR(ENOTSUP));
789 
790 	if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
791 		return (SET_ERROR(ENOTSUP));
792 
793 	if (top_level && alloctype == VDEV_ALLOC_ADD) {
794 		const char *bias;
795 
796 		/*
797 		 * If creating a top-level vdev, check for allocation
798 		 * classes input.
799 		 */
800 		if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
801 		    &bias) == 0) {
802 			alloc_bias = vdev_derive_alloc_bias(bias);
803 
804 			/* spa_vdev_add() expects feature to be enabled */
805 			if (spa->spa_load_state != SPA_LOAD_CREATE &&
806 			    !spa_feature_is_enabled(spa,
807 			    SPA_FEATURE_ALLOCATION_CLASSES)) {
808 				return (SET_ERROR(ENOTSUP));
809 			}
810 		}
811 
812 		/* spa_vdev_add() expects feature to be enabled */
813 		if (ops == &vdev_draid_ops &&
814 		    spa->spa_load_state != SPA_LOAD_CREATE &&
815 		    !spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
816 			return (SET_ERROR(ENOTSUP));
817 		}
818 	}
819 
820 	/*
821 	 * Initialize the vdev specific data.  This is done before calling
822 	 * vdev_alloc_common() since it may fail and this simplifies the
823 	 * error reporting and cleanup code paths.
824 	 */
825 	void *tsd = NULL;
826 	if (ops->vdev_op_init != NULL) {
827 		rc = ops->vdev_op_init(spa, nv, &tsd);
828 		if (rc != 0) {
829 			return (rc);
830 		}
831 	}
832 
833 	vd = vdev_alloc_common(spa, id, guid, ops);
834 	vd->vdev_tsd = tsd;
835 	vd->vdev_islog = islog;
836 
837 	if (top_level && alloc_bias != VDEV_BIAS_NONE)
838 		vd->vdev_alloc_bias = alloc_bias;
839 
840 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tmp) == 0)
841 		vd->vdev_path = spa_strdup(tmp);
842 
843 	/*
844 	 * ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
845 	 * fault on a vdev and want it to persist across imports (like with
846 	 * zpool offline -f).
847 	 */
848 	rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
849 	if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
850 		vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
851 		vd->vdev_faulted = 1;
852 		vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
853 	}
854 
855 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &tmp) == 0)
856 		vd->vdev_devid = spa_strdup(tmp);
857 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, &tmp) == 0)
858 		vd->vdev_physpath = spa_strdup(tmp);
859 
860 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
861 	    &tmp) == 0)
862 		vd->vdev_enc_sysfs_path = spa_strdup(tmp);
863 
864 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &tmp) == 0)
865 		vd->vdev_fru = spa_strdup(tmp);
866 
867 	/*
868 	 * Set the whole_disk property.  If it's not specified, leave the value
869 	 * as -1.
870 	 */
871 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
872 	    &vd->vdev_wholedisk) != 0)
873 		vd->vdev_wholedisk = -1ULL;
874 
875 	vic = &vd->vdev_indirect_config;
876 
877 	ASSERT0(vic->vic_mapping_object);
878 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
879 	    &vic->vic_mapping_object);
880 	ASSERT0(vic->vic_births_object);
881 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
882 	    &vic->vic_births_object);
883 	ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
884 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
885 	    &vic->vic_prev_indirect_vdev);
886 
887 	/*
888 	 * Look for the 'not present' flag.  This will only be set if the device
889 	 * was not present at the time of import.
890 	 */
891 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
892 	    &vd->vdev_not_present);
893 
894 	/*
895 	 * Get the alignment requirement. Ignore pool ashift for vdev
896 	 * attach case.
897 	 */
898 	if (alloctype != VDEV_ALLOC_ATTACH) {
899 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT,
900 		    &vd->vdev_ashift);
901 	} else {
902 		vd->vdev_attaching = B_TRUE;
903 	}
904 
905 	/*
906 	 * Retrieve the vdev creation time.
907 	 */
908 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
909 	    &vd->vdev_crtxg);
910 
911 	if (vd->vdev_ops == &vdev_root_ops &&
912 	    (alloctype == VDEV_ALLOC_LOAD ||
913 	    alloctype == VDEV_ALLOC_SPLIT ||
914 	    alloctype == VDEV_ALLOC_ROOTPOOL)) {
915 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP,
916 		    &vd->vdev_root_zap);
917 	}
918 
919 	/*
920 	 * If we're a top-level vdev, try to load the allocation parameters.
921 	 */
922 	if (top_level &&
923 	    (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
924 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
925 		    &vd->vdev_ms_array);
926 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
927 		    &vd->vdev_ms_shift);
928 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
929 		    &vd->vdev_asize);
930 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
931 		    &vd->vdev_noalloc);
932 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
933 		    &vd->vdev_removing);
934 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
935 		    &vd->vdev_top_zap);
936 	} else {
937 		ASSERT0(vd->vdev_top_zap);
938 	}
939 
940 	if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
941 		ASSERT(alloctype == VDEV_ALLOC_LOAD ||
942 		    alloctype == VDEV_ALLOC_ADD ||
943 		    alloctype == VDEV_ALLOC_SPLIT ||
944 		    alloctype == VDEV_ALLOC_ROOTPOOL);
945 		/* Note: metaslab_group_create() is now deferred */
946 	}
947 
948 	if (vd->vdev_ops->vdev_op_leaf &&
949 	    (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
950 		(void) nvlist_lookup_uint64(nv,
951 		    ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
952 	} else {
953 		ASSERT0(vd->vdev_leaf_zap);
954 	}
955 
956 	/*
957 	 * If we're a leaf vdev, try to load the DTL object and other state.
958 	 */
959 
960 	if (vd->vdev_ops->vdev_op_leaf &&
961 	    (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
962 	    alloctype == VDEV_ALLOC_ROOTPOOL)) {
963 		if (alloctype == VDEV_ALLOC_LOAD) {
964 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
965 			    &vd->vdev_dtl_object);
966 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
967 			    &vd->vdev_unspare);
968 		}
969 
970 		if (alloctype == VDEV_ALLOC_ROOTPOOL) {
971 			uint64_t spare = 0;
972 
973 			if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
974 			    &spare) == 0 && spare)
975 				spa_spare_add(vd);
976 		}
977 
978 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
979 		    &vd->vdev_offline);
980 
981 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
982 		    &vd->vdev_resilver_txg);
983 
984 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
985 		    &vd->vdev_rebuild_txg);
986 
987 		if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
988 			vdev_defer_resilver(vd);
989 
990 		/*
991 		 * In general, when importing a pool we want to ignore the
992 		 * persistent fault state, as the diagnosis made on another
993 		 * system may not be valid in the current context.  The only
994 		 * exception is if we forced a vdev to a persistently faulted
995 		 * state with 'zpool offline -f'.  The persistent fault will
996 		 * remain across imports until cleared.
997 		 *
998 		 * Local vdevs will remain in the faulted state.
999 		 */
1000 		if (spa_load_state(spa) == SPA_LOAD_OPEN ||
1001 		    spa_load_state(spa) == SPA_LOAD_IMPORT) {
1002 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
1003 			    &vd->vdev_faulted);
1004 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
1005 			    &vd->vdev_degraded);
1006 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
1007 			    &vd->vdev_removed);
1008 
1009 			if (vd->vdev_faulted || vd->vdev_degraded) {
1010 				const char *aux;
1011 
1012 				vd->vdev_label_aux =
1013 				    VDEV_AUX_ERR_EXCEEDED;
1014 				if (nvlist_lookup_string(nv,
1015 				    ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
1016 				    strcmp(aux, "external") == 0)
1017 					vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
1018 				else
1019 					vd->vdev_faulted = 0ULL;
1020 			}
1021 		}
1022 	}
1023 
1024 	/*
1025 	 * Add ourselves to the parent's list of children.
1026 	 */
1027 	vdev_add_child(parent, vd);
1028 
1029 	*vdp = vd;
1030 
1031 	return (0);
1032 }
1033 
1034 void
vdev_free(vdev_t * vd)1035 vdev_free(vdev_t *vd)
1036 {
1037 	spa_t *spa = vd->vdev_spa;
1038 
1039 	ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
1040 	ASSERT3P(vd->vdev_trim_thread, ==, NULL);
1041 	ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
1042 	ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
1043 
1044 	/*
1045 	 * Scan queues are normally destroyed at the end of a scan. If the
1046 	 * queue exists here, that implies the vdev is being removed while
1047 	 * the scan is still running.
1048 	 */
1049 	if (vd->vdev_scan_io_queue != NULL) {
1050 		mutex_enter(&vd->vdev_scan_io_queue_lock);
1051 		dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
1052 		vd->vdev_scan_io_queue = NULL;
1053 		mutex_exit(&vd->vdev_scan_io_queue_lock);
1054 	}
1055 
1056 	/*
1057 	 * vdev_free() implies closing the vdev first.  This is simpler than
1058 	 * trying to ensure complicated semantics for all callers.
1059 	 */
1060 	vdev_close(vd);
1061 
1062 	ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
1063 	ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
1064 
1065 	/*
1066 	 * Free all children.
1067 	 */
1068 	for (int c = 0; c < vd->vdev_children; c++)
1069 		vdev_free(vd->vdev_child[c]);
1070 
1071 	ASSERT(vd->vdev_child == NULL);
1072 	ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
1073 
1074 	if (vd->vdev_ops->vdev_op_fini != NULL)
1075 		vd->vdev_ops->vdev_op_fini(vd);
1076 
1077 	/*
1078 	 * Discard allocation state.
1079 	 */
1080 	if (vd->vdev_mg != NULL) {
1081 		vdev_metaslab_fini(vd);
1082 		metaslab_group_destroy(vd->vdev_mg);
1083 		vd->vdev_mg = NULL;
1084 	}
1085 	if (vd->vdev_log_mg != NULL) {
1086 		ASSERT0(vd->vdev_ms_count);
1087 		metaslab_group_destroy(vd->vdev_log_mg);
1088 		vd->vdev_log_mg = NULL;
1089 	}
1090 
1091 	ASSERT0(vd->vdev_stat.vs_space);
1092 	ASSERT0(vd->vdev_stat.vs_dspace);
1093 	ASSERT0(vd->vdev_stat.vs_alloc);
1094 
1095 	/*
1096 	 * Remove this vdev from its parent's child list.
1097 	 */
1098 	vdev_remove_child(vd->vdev_parent, vd);
1099 
1100 	ASSERT(vd->vdev_parent == NULL);
1101 	ASSERT(!list_link_active(&vd->vdev_leaf_node));
1102 
1103 	/*
1104 	 * Clean up vdev structure.
1105 	 */
1106 	vdev_queue_fini(vd);
1107 
1108 	if (vd->vdev_path)
1109 		spa_strfree(vd->vdev_path);
1110 	if (vd->vdev_devid)
1111 		spa_strfree(vd->vdev_devid);
1112 	if (vd->vdev_physpath)
1113 		spa_strfree(vd->vdev_physpath);
1114 
1115 	if (vd->vdev_enc_sysfs_path)
1116 		spa_strfree(vd->vdev_enc_sysfs_path);
1117 
1118 	if (vd->vdev_fru)
1119 		spa_strfree(vd->vdev_fru);
1120 
1121 	if (vd->vdev_isspare)
1122 		spa_spare_remove(vd);
1123 	if (vd->vdev_isl2cache)
1124 		spa_l2cache_remove(vd);
1125 
1126 	txg_list_destroy(&vd->vdev_ms_list);
1127 	txg_list_destroy(&vd->vdev_dtl_list);
1128 
1129 	mutex_enter(&vd->vdev_dtl_lock);
1130 	space_map_close(vd->vdev_dtl_sm);
1131 	for (int t = 0; t < DTL_TYPES; t++) {
1132 		range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
1133 		range_tree_destroy(vd->vdev_dtl[t]);
1134 	}
1135 	mutex_exit(&vd->vdev_dtl_lock);
1136 
1137 	EQUIV(vd->vdev_indirect_births != NULL,
1138 	    vd->vdev_indirect_mapping != NULL);
1139 	if (vd->vdev_indirect_births != NULL) {
1140 		vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
1141 		vdev_indirect_births_close(vd->vdev_indirect_births);
1142 	}
1143 
1144 	if (vd->vdev_obsolete_sm != NULL) {
1145 		ASSERT(vd->vdev_removing ||
1146 		    vd->vdev_ops == &vdev_indirect_ops);
1147 		space_map_close(vd->vdev_obsolete_sm);
1148 		vd->vdev_obsolete_sm = NULL;
1149 	}
1150 	range_tree_destroy(vd->vdev_obsolete_segments);
1151 	rw_destroy(&vd->vdev_indirect_rwlock);
1152 	mutex_destroy(&vd->vdev_obsolete_lock);
1153 
1154 	mutex_destroy(&vd->vdev_dtl_lock);
1155 	mutex_destroy(&vd->vdev_stat_lock);
1156 	mutex_destroy(&vd->vdev_probe_lock);
1157 	mutex_destroy(&vd->vdev_scan_io_queue_lock);
1158 
1159 	mutex_destroy(&vd->vdev_initialize_lock);
1160 	mutex_destroy(&vd->vdev_initialize_io_lock);
1161 	cv_destroy(&vd->vdev_initialize_io_cv);
1162 	cv_destroy(&vd->vdev_initialize_cv);
1163 
1164 	mutex_destroy(&vd->vdev_trim_lock);
1165 	mutex_destroy(&vd->vdev_autotrim_lock);
1166 	mutex_destroy(&vd->vdev_trim_io_lock);
1167 	cv_destroy(&vd->vdev_trim_cv);
1168 	cv_destroy(&vd->vdev_autotrim_cv);
1169 	cv_destroy(&vd->vdev_autotrim_kick_cv);
1170 	cv_destroy(&vd->vdev_trim_io_cv);
1171 
1172 	mutex_destroy(&vd->vdev_rebuild_lock);
1173 	cv_destroy(&vd->vdev_rebuild_cv);
1174 
1175 	zfs_ratelimit_fini(&vd->vdev_delay_rl);
1176 	zfs_ratelimit_fini(&vd->vdev_deadman_rl);
1177 	zfs_ratelimit_fini(&vd->vdev_checksum_rl);
1178 
1179 	if (vd == spa->spa_root_vdev)
1180 		spa->spa_root_vdev = NULL;
1181 
1182 	kmem_free(vd, sizeof (vdev_t));
1183 }
1184 
1185 /*
1186  * Transfer top-level vdev state from svd to tvd.
1187  */
1188 static void
vdev_top_transfer(vdev_t * svd,vdev_t * tvd)1189 vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
1190 {
1191 	spa_t *spa = svd->vdev_spa;
1192 	metaslab_t *msp;
1193 	vdev_t *vd;
1194 	int t;
1195 
1196 	ASSERT(tvd == tvd->vdev_top);
1197 
1198 	tvd->vdev_ms_array = svd->vdev_ms_array;
1199 	tvd->vdev_ms_shift = svd->vdev_ms_shift;
1200 	tvd->vdev_ms_count = svd->vdev_ms_count;
1201 	tvd->vdev_top_zap = svd->vdev_top_zap;
1202 
1203 	svd->vdev_ms_array = 0;
1204 	svd->vdev_ms_shift = 0;
1205 	svd->vdev_ms_count = 0;
1206 	svd->vdev_top_zap = 0;
1207 
1208 	if (tvd->vdev_mg)
1209 		ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
1210 	if (tvd->vdev_log_mg)
1211 		ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
1212 	tvd->vdev_mg = svd->vdev_mg;
1213 	tvd->vdev_log_mg = svd->vdev_log_mg;
1214 	tvd->vdev_ms = svd->vdev_ms;
1215 
1216 	svd->vdev_mg = NULL;
1217 	svd->vdev_log_mg = NULL;
1218 	svd->vdev_ms = NULL;
1219 
1220 	if (tvd->vdev_mg != NULL)
1221 		tvd->vdev_mg->mg_vd = tvd;
1222 	if (tvd->vdev_log_mg != NULL)
1223 		tvd->vdev_log_mg->mg_vd = tvd;
1224 
1225 	tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
1226 	svd->vdev_checkpoint_sm = NULL;
1227 
1228 	tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
1229 	svd->vdev_alloc_bias = VDEV_BIAS_NONE;
1230 
1231 	tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
1232 	tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
1233 	tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
1234 
1235 	svd->vdev_stat.vs_alloc = 0;
1236 	svd->vdev_stat.vs_space = 0;
1237 	svd->vdev_stat.vs_dspace = 0;
1238 
1239 	/*
1240 	 * State which may be set on a top-level vdev that's in the
1241 	 * process of being removed.
1242 	 */
1243 	ASSERT0(tvd->vdev_indirect_config.vic_births_object);
1244 	ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
1245 	ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
1246 	ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
1247 	ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
1248 	ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
1249 	ASSERT0(tvd->vdev_noalloc);
1250 	ASSERT0(tvd->vdev_removing);
1251 	ASSERT0(tvd->vdev_rebuilding);
1252 	tvd->vdev_noalloc = svd->vdev_noalloc;
1253 	tvd->vdev_removing = svd->vdev_removing;
1254 	tvd->vdev_rebuilding = svd->vdev_rebuilding;
1255 	tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
1256 	tvd->vdev_indirect_config = svd->vdev_indirect_config;
1257 	tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
1258 	tvd->vdev_indirect_births = svd->vdev_indirect_births;
1259 	range_tree_swap(&svd->vdev_obsolete_segments,
1260 	    &tvd->vdev_obsolete_segments);
1261 	tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
1262 	svd->vdev_indirect_config.vic_mapping_object = 0;
1263 	svd->vdev_indirect_config.vic_births_object = 0;
1264 	svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
1265 	svd->vdev_indirect_mapping = NULL;
1266 	svd->vdev_indirect_births = NULL;
1267 	svd->vdev_obsolete_sm = NULL;
1268 	svd->vdev_noalloc = 0;
1269 	svd->vdev_removing = 0;
1270 	svd->vdev_rebuilding = 0;
1271 
1272 	for (t = 0; t < TXG_SIZE; t++) {
1273 		while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
1274 			(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
1275 		while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
1276 			(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
1277 		if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
1278 			(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
1279 	}
1280 
1281 	if (list_link_active(&svd->vdev_config_dirty_node)) {
1282 		vdev_config_clean(svd);
1283 		vdev_config_dirty(tvd);
1284 	}
1285 
1286 	if (list_link_active(&svd->vdev_state_dirty_node)) {
1287 		vdev_state_clean(svd);
1288 		vdev_state_dirty(tvd);
1289 	}
1290 
1291 	tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
1292 	svd->vdev_deflate_ratio = 0;
1293 
1294 	tvd->vdev_islog = svd->vdev_islog;
1295 	svd->vdev_islog = 0;
1296 
1297 	dsl_scan_io_queue_vdev_xfer(svd, tvd);
1298 }
1299 
1300 static void
vdev_top_update(vdev_t * tvd,vdev_t * vd)1301 vdev_top_update(vdev_t *tvd, vdev_t *vd)
1302 {
1303 	if (vd == NULL)
1304 		return;
1305 
1306 	vd->vdev_top = tvd;
1307 
1308 	for (int c = 0; c < vd->vdev_children; c++)
1309 		vdev_top_update(tvd, vd->vdev_child[c]);
1310 }
1311 
1312 /*
1313  * Add a mirror/replacing vdev above an existing vdev.  There is no need to
1314  * call .vdev_op_init() since mirror/replacing vdevs do not have private state.
1315  */
1316 vdev_t *
vdev_add_parent(vdev_t * cvd,vdev_ops_t * ops)1317 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
1318 {
1319 	spa_t *spa = cvd->vdev_spa;
1320 	vdev_t *pvd = cvd->vdev_parent;
1321 	vdev_t *mvd;
1322 
1323 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1324 
1325 	mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
1326 
1327 	mvd->vdev_asize = cvd->vdev_asize;
1328 	mvd->vdev_min_asize = cvd->vdev_min_asize;
1329 	mvd->vdev_max_asize = cvd->vdev_max_asize;
1330 	mvd->vdev_psize = cvd->vdev_psize;
1331 	mvd->vdev_ashift = cvd->vdev_ashift;
1332 	mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
1333 	mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
1334 	mvd->vdev_state = cvd->vdev_state;
1335 	mvd->vdev_crtxg = cvd->vdev_crtxg;
1336 
1337 	vdev_remove_child(pvd, cvd);
1338 	vdev_add_child(pvd, mvd);
1339 	cvd->vdev_id = mvd->vdev_children;
1340 	vdev_add_child(mvd, cvd);
1341 	vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1342 
1343 	if (mvd == mvd->vdev_top)
1344 		vdev_top_transfer(cvd, mvd);
1345 
1346 	return (mvd);
1347 }
1348 
1349 /*
1350  * Remove a 1-way mirror/replacing vdev from the tree.
1351  */
1352 void
vdev_remove_parent(vdev_t * cvd)1353 vdev_remove_parent(vdev_t *cvd)
1354 {
1355 	vdev_t *mvd = cvd->vdev_parent;
1356 	vdev_t *pvd = mvd->vdev_parent;
1357 
1358 	ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1359 
1360 	ASSERT(mvd->vdev_children == 1);
1361 	ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
1362 	    mvd->vdev_ops == &vdev_replacing_ops ||
1363 	    mvd->vdev_ops == &vdev_spare_ops);
1364 	cvd->vdev_ashift = mvd->vdev_ashift;
1365 	cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
1366 	cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
1367 	vdev_remove_child(mvd, cvd);
1368 	vdev_remove_child(pvd, mvd);
1369 
1370 	/*
1371 	 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
1372 	 * Otherwise, we could have detached an offline device, and when we
1373 	 * go to import the pool we'll think we have two top-level vdevs,
1374 	 * instead of a different version of the same top-level vdev.
1375 	 */
1376 	if (mvd->vdev_top == mvd) {
1377 		uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
1378 		cvd->vdev_orig_guid = cvd->vdev_guid;
1379 		cvd->vdev_guid += guid_delta;
1380 		cvd->vdev_guid_sum += guid_delta;
1381 
1382 		/*
1383 		 * If pool not set for autoexpand, we need to also preserve
1384 		 * mvd's asize to prevent automatic expansion of cvd.
1385 		 * Otherwise if we are adjusting the mirror by attaching and
1386 		 * detaching children of non-uniform sizes, the mirror could
1387 		 * autoexpand, unexpectedly requiring larger devices to
1388 		 * re-establish the mirror.
1389 		 */
1390 		if (!cvd->vdev_spa->spa_autoexpand)
1391 			cvd->vdev_asize = mvd->vdev_asize;
1392 	}
1393 	cvd->vdev_id = mvd->vdev_id;
1394 	vdev_add_child(pvd, cvd);
1395 	vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1396 
1397 	if (cvd == cvd->vdev_top)
1398 		vdev_top_transfer(mvd, cvd);
1399 
1400 	ASSERT(mvd->vdev_children == 0);
1401 	vdev_free(mvd);
1402 }
1403 
1404 /*
1405  * Choose GCD for spa_gcd_alloc.
1406  */
1407 static uint64_t
vdev_gcd(uint64_t a,uint64_t b)1408 vdev_gcd(uint64_t a, uint64_t b)
1409 {
1410 	while (b != 0) {
1411 		uint64_t t = b;
1412 		b = a % b;
1413 		a = t;
1414 	}
1415 	return (a);
1416 }
1417 
1418 /*
1419  * Set spa_min_alloc and spa_gcd_alloc.
1420  */
1421 static void
vdev_spa_set_alloc(spa_t * spa,uint64_t min_alloc)1422 vdev_spa_set_alloc(spa_t *spa, uint64_t min_alloc)
1423 {
1424 	if (min_alloc < spa->spa_min_alloc)
1425 		spa->spa_min_alloc = min_alloc;
1426 	if (spa->spa_gcd_alloc == INT_MAX) {
1427 		spa->spa_gcd_alloc = min_alloc;
1428 	} else {
1429 		spa->spa_gcd_alloc = vdev_gcd(min_alloc,
1430 		    spa->spa_gcd_alloc);
1431 	}
1432 }
1433 
1434 void
vdev_metaslab_group_create(vdev_t * vd)1435 vdev_metaslab_group_create(vdev_t *vd)
1436 {
1437 	spa_t *spa = vd->vdev_spa;
1438 
1439 	/*
1440 	 * metaslab_group_create was delayed until allocation bias was available
1441 	 */
1442 	if (vd->vdev_mg == NULL) {
1443 		metaslab_class_t *mc;
1444 
1445 		if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
1446 			vd->vdev_alloc_bias = VDEV_BIAS_LOG;
1447 
1448 		ASSERT3U(vd->vdev_islog, ==,
1449 		    (vd->vdev_alloc_bias == VDEV_BIAS_LOG));
1450 
1451 		switch (vd->vdev_alloc_bias) {
1452 		case VDEV_BIAS_LOG:
1453 			mc = spa_log_class(spa);
1454 			break;
1455 		case VDEV_BIAS_SPECIAL:
1456 			mc = spa_special_class(spa);
1457 			break;
1458 		case VDEV_BIAS_DEDUP:
1459 			mc = spa_dedup_class(spa);
1460 			break;
1461 		default:
1462 			mc = spa_normal_class(spa);
1463 		}
1464 
1465 		vd->vdev_mg = metaslab_group_create(mc, vd,
1466 		    spa->spa_alloc_count);
1467 
1468 		if (!vd->vdev_islog) {
1469 			vd->vdev_log_mg = metaslab_group_create(
1470 			    spa_embedded_log_class(spa), vd, 1);
1471 		}
1472 
1473 		/*
1474 		 * The spa ashift min/max only apply for the normal metaslab
1475 		 * class. Class destination is late binding so ashift boundary
1476 		 * setting had to wait until now.
1477 		 */
1478 		if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1479 		    mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
1480 			if (vd->vdev_ashift > spa->spa_max_ashift)
1481 				spa->spa_max_ashift = vd->vdev_ashift;
1482 			if (vd->vdev_ashift < spa->spa_min_ashift)
1483 				spa->spa_min_ashift = vd->vdev_ashift;
1484 
1485 			uint64_t min_alloc = vdev_get_min_alloc(vd);
1486 			vdev_spa_set_alloc(spa, min_alloc);
1487 		}
1488 	}
1489 }
1490 
1491 int
vdev_metaslab_init(vdev_t * vd,uint64_t txg)1492 vdev_metaslab_init(vdev_t *vd, uint64_t txg)
1493 {
1494 	spa_t *spa = vd->vdev_spa;
1495 	uint64_t oldc = vd->vdev_ms_count;
1496 	uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
1497 	metaslab_t **mspp;
1498 	int error;
1499 	boolean_t expanding = (oldc != 0);
1500 
1501 	ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1502 
1503 	/*
1504 	 * This vdev is not being allocated from yet or is a hole.
1505 	 */
1506 	if (vd->vdev_ms_shift == 0)
1507 		return (0);
1508 
1509 	ASSERT(!vd->vdev_ishole);
1510 
1511 	ASSERT(oldc <= newc);
1512 
1513 	mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
1514 
1515 	if (expanding) {
1516 		memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp));
1517 		vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
1518 	}
1519 
1520 	vd->vdev_ms = mspp;
1521 	vd->vdev_ms_count = newc;
1522 
1523 	for (uint64_t m = oldc; m < newc; m++) {
1524 		uint64_t object = 0;
1525 		/*
1526 		 * vdev_ms_array may be 0 if we are creating the "fake"
1527 		 * metaslabs for an indirect vdev for zdb's leak detection.
1528 		 * See zdb_leak_init().
1529 		 */
1530 		if (txg == 0 && vd->vdev_ms_array != 0) {
1531 			error = dmu_read(spa->spa_meta_objset,
1532 			    vd->vdev_ms_array,
1533 			    m * sizeof (uint64_t), sizeof (uint64_t), &object,
1534 			    DMU_READ_PREFETCH);
1535 			if (error != 0) {
1536 				vdev_dbgmsg(vd, "unable to read the metaslab "
1537 				    "array [error=%d]", error);
1538 				return (error);
1539 			}
1540 		}
1541 
1542 		error = metaslab_init(vd->vdev_mg, m, object, txg,
1543 		    &(vd->vdev_ms[m]));
1544 		if (error != 0) {
1545 			vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
1546 			    error);
1547 			return (error);
1548 		}
1549 	}
1550 
1551 	/*
1552 	 * Find the emptiest metaslab on the vdev and mark it for use for
1553 	 * embedded slog by moving it from the regular to the log metaslab
1554 	 * group.
1555 	 */
1556 	if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
1557 	    vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
1558 	    avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
1559 		uint64_t slog_msid = 0;
1560 		uint64_t smallest = UINT64_MAX;
1561 
1562 		/*
1563 		 * Note, we only search the new metaslabs, because the old
1564 		 * (pre-existing) ones may be active (e.g. have non-empty
1565 		 * range_tree's), and we don't move them to the new
1566 		 * metaslab_t.
1567 		 */
1568 		for (uint64_t m = oldc; m < newc; m++) {
1569 			uint64_t alloc =
1570 			    space_map_allocated(vd->vdev_ms[m]->ms_sm);
1571 			if (alloc < smallest) {
1572 				slog_msid = m;
1573 				smallest = alloc;
1574 			}
1575 		}
1576 		metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
1577 		/*
1578 		 * The metaslab was marked as dirty at the end of
1579 		 * metaslab_init(). Remove it from the dirty list so that we
1580 		 * can uninitialize and reinitialize it to the new class.
1581 		 */
1582 		if (txg != 0) {
1583 			(void) txg_list_remove_this(&vd->vdev_ms_list,
1584 			    slog_ms, txg);
1585 		}
1586 		uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
1587 		metaslab_fini(slog_ms);
1588 		VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
1589 		    &vd->vdev_ms[slog_msid]));
1590 	}
1591 
1592 	if (txg == 0)
1593 		spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
1594 
1595 	/*
1596 	 * If the vdev is marked as non-allocating then don't
1597 	 * activate the metaslabs since we want to ensure that
1598 	 * no allocations are performed on this device.
1599 	 */
1600 	if (vd->vdev_noalloc) {
1601 		/* track non-allocating vdev space */
1602 		spa->spa_nonallocating_dspace += spa_deflate(spa) ?
1603 		    vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
1604 	} else if (!expanding) {
1605 		metaslab_group_activate(vd->vdev_mg);
1606 		if (vd->vdev_log_mg != NULL)
1607 			metaslab_group_activate(vd->vdev_log_mg);
1608 	}
1609 
1610 	if (txg == 0)
1611 		spa_config_exit(spa, SCL_ALLOC, FTAG);
1612 
1613 	return (0);
1614 }
1615 
1616 void
vdev_metaslab_fini(vdev_t * vd)1617 vdev_metaslab_fini(vdev_t *vd)
1618 {
1619 	if (vd->vdev_checkpoint_sm != NULL) {
1620 		ASSERT(spa_feature_is_active(vd->vdev_spa,
1621 		    SPA_FEATURE_POOL_CHECKPOINT));
1622 		space_map_close(vd->vdev_checkpoint_sm);
1623 		/*
1624 		 * Even though we close the space map, we need to set its
1625 		 * pointer to NULL. The reason is that vdev_metaslab_fini()
1626 		 * may be called multiple times for certain operations
1627 		 * (i.e. when destroying a pool) so we need to ensure that
1628 		 * this clause never executes twice. This logic is similar
1629 		 * to the one used for the vdev_ms clause below.
1630 		 */
1631 		vd->vdev_checkpoint_sm = NULL;
1632 	}
1633 
1634 	if (vd->vdev_ms != NULL) {
1635 		metaslab_group_t *mg = vd->vdev_mg;
1636 
1637 		metaslab_group_passivate(mg);
1638 		if (vd->vdev_log_mg != NULL) {
1639 			ASSERT(!vd->vdev_islog);
1640 			metaslab_group_passivate(vd->vdev_log_mg);
1641 		}
1642 
1643 		uint64_t count = vd->vdev_ms_count;
1644 		for (uint64_t m = 0; m < count; m++) {
1645 			metaslab_t *msp = vd->vdev_ms[m];
1646 			if (msp != NULL)
1647 				metaslab_fini(msp);
1648 		}
1649 		vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
1650 		vd->vdev_ms = NULL;
1651 		vd->vdev_ms_count = 0;
1652 
1653 		for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1654 			ASSERT0(mg->mg_histogram[i]);
1655 			if (vd->vdev_log_mg != NULL)
1656 				ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
1657 		}
1658 	}
1659 	ASSERT0(vd->vdev_ms_count);
1660 }
1661 
1662 typedef struct vdev_probe_stats {
1663 	boolean_t	vps_readable;
1664 	boolean_t	vps_writeable;
1665 	boolean_t	vps_zio_done_probe;
1666 	int		vps_flags;
1667 } vdev_probe_stats_t;
1668 
1669 static void
vdev_probe_done(zio_t * zio)1670 vdev_probe_done(zio_t *zio)
1671 {
1672 	spa_t *spa = zio->io_spa;
1673 	vdev_t *vd = zio->io_vd;
1674 	vdev_probe_stats_t *vps = zio->io_private;
1675 
1676 	ASSERT(vd->vdev_probe_zio != NULL);
1677 
1678 	if (zio->io_type == ZIO_TYPE_READ) {
1679 		if (zio->io_error == 0)
1680 			vps->vps_readable = 1;
1681 		if (zio->io_error == 0 && spa_writeable(spa)) {
1682 			zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
1683 			    zio->io_offset, zio->io_size, zio->io_abd,
1684 			    ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1685 			    ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
1686 		} else {
1687 			abd_free(zio->io_abd);
1688 		}
1689 	} else if (zio->io_type == ZIO_TYPE_WRITE) {
1690 		if (zio->io_error == 0)
1691 			vps->vps_writeable = 1;
1692 		abd_free(zio->io_abd);
1693 	} else if (zio->io_type == ZIO_TYPE_NULL) {
1694 		zio_t *pio;
1695 		zio_link_t *zl;
1696 
1697 		vd->vdev_cant_read |= !vps->vps_readable;
1698 		vd->vdev_cant_write |= !vps->vps_writeable;
1699 
1700 		if (vdev_readable(vd) &&
1701 		    (vdev_writeable(vd) || !spa_writeable(spa))) {
1702 			zio->io_error = 0;
1703 		} else {
1704 			ASSERT(zio->io_error != 0);
1705 			vdev_dbgmsg(vd, "failed probe");
1706 			(void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
1707 			    spa, vd, NULL, NULL, 0);
1708 			zio->io_error = SET_ERROR(ENXIO);
1709 
1710 			/*
1711 			 * If this probe was initiated from zio pipeline, then
1712 			 * change the state in a spa_async_request. Probes that
1713 			 * were initiated from a vdev_open can change the state
1714 			 * as part of the open call.
1715 			 */
1716 			if (vps->vps_zio_done_probe) {
1717 				vd->vdev_fault_wanted = B_TRUE;
1718 				spa_async_request(spa, SPA_ASYNC_FAULT_VDEV);
1719 			}
1720 		}
1721 
1722 		mutex_enter(&vd->vdev_probe_lock);
1723 		ASSERT(vd->vdev_probe_zio == zio);
1724 		vd->vdev_probe_zio = NULL;
1725 		mutex_exit(&vd->vdev_probe_lock);
1726 
1727 		zl = NULL;
1728 		while ((pio = zio_walk_parents(zio, &zl)) != NULL)
1729 			if (!vdev_accessible(vd, pio))
1730 				pio->io_error = SET_ERROR(ENXIO);
1731 
1732 		kmem_free(vps, sizeof (*vps));
1733 	}
1734 }
1735 
1736 /*
1737  * Determine whether this device is accessible.
1738  *
1739  * Read and write to several known locations: the pad regions of each
1740  * vdev label but the first, which we leave alone in case it contains
1741  * a VTOC.
1742  */
1743 zio_t *
vdev_probe(vdev_t * vd,zio_t * zio)1744 vdev_probe(vdev_t *vd, zio_t *zio)
1745 {
1746 	spa_t *spa = vd->vdev_spa;
1747 	vdev_probe_stats_t *vps = NULL;
1748 	zio_t *pio;
1749 
1750 	ASSERT(vd->vdev_ops->vdev_op_leaf);
1751 
1752 	/*
1753 	 * Don't probe the probe.
1754 	 */
1755 	if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
1756 		return (NULL);
1757 
1758 	/*
1759 	 * To prevent 'probe storms' when a device fails, we create
1760 	 * just one probe i/o at a time.  All zios that want to probe
1761 	 * this vdev will become parents of the probe io.
1762 	 */
1763 	mutex_enter(&vd->vdev_probe_lock);
1764 
1765 	if ((pio = vd->vdev_probe_zio) == NULL) {
1766 		vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
1767 
1768 		vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
1769 		    ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_TRYHARD;
1770 		vps->vps_zio_done_probe = (zio != NULL);
1771 
1772 		if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
1773 			/*
1774 			 * vdev_cant_read and vdev_cant_write can only
1775 			 * transition from TRUE to FALSE when we have the
1776 			 * SCL_ZIO lock as writer; otherwise they can only
1777 			 * transition from FALSE to TRUE.  This ensures that
1778 			 * any zio looking at these values can assume that
1779 			 * failures persist for the life of the I/O.  That's
1780 			 * important because when a device has intermittent
1781 			 * connectivity problems, we want to ensure that
1782 			 * they're ascribed to the device (ENXIO) and not
1783 			 * the zio (EIO).
1784 			 *
1785 			 * Since we hold SCL_ZIO as writer here, clear both
1786 			 * values so the probe can reevaluate from first
1787 			 * principles.
1788 			 */
1789 			vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1790 			vd->vdev_cant_read = B_FALSE;
1791 			vd->vdev_cant_write = B_FALSE;
1792 		}
1793 
1794 		vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1795 		    vdev_probe_done, vps,
1796 		    vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1797 	}
1798 
1799 	if (zio != NULL)
1800 		zio_add_child(zio, pio);
1801 
1802 	mutex_exit(&vd->vdev_probe_lock);
1803 
1804 	if (vps == NULL) {
1805 		ASSERT(zio != NULL);
1806 		return (NULL);
1807 	}
1808 
1809 	for (int l = 1; l < VDEV_LABELS; l++) {
1810 		zio_nowait(zio_read_phys(pio, vd,
1811 		    vdev_label_offset(vd->vdev_psize, l,
1812 		    offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
1813 		    abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
1814 		    ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1815 		    ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1816 	}
1817 
1818 	if (zio == NULL)
1819 		return (pio);
1820 
1821 	zio_nowait(pio);
1822 	return (NULL);
1823 }
1824 
1825 static void
vdev_load_child(void * arg)1826 vdev_load_child(void *arg)
1827 {
1828 	vdev_t *vd = arg;
1829 
1830 	vd->vdev_load_error = vdev_load(vd);
1831 }
1832 
1833 static void
vdev_open_child(void * arg)1834 vdev_open_child(void *arg)
1835 {
1836 	vdev_t *vd = arg;
1837 
1838 	vd->vdev_open_thread = curthread;
1839 	vd->vdev_open_error = vdev_open(vd);
1840 	vd->vdev_open_thread = NULL;
1841 }
1842 
1843 static boolean_t
vdev_uses_zvols(vdev_t * vd)1844 vdev_uses_zvols(vdev_t *vd)
1845 {
1846 #ifdef _KERNEL
1847 	if (zvol_is_zvol(vd->vdev_path))
1848 		return (B_TRUE);
1849 #endif
1850 
1851 	for (int c = 0; c < vd->vdev_children; c++)
1852 		if (vdev_uses_zvols(vd->vdev_child[c]))
1853 			return (B_TRUE);
1854 
1855 	return (B_FALSE);
1856 }
1857 
1858 /*
1859  * Returns B_TRUE if the passed child should be opened.
1860  */
1861 static boolean_t
vdev_default_open_children_func(vdev_t * vd)1862 vdev_default_open_children_func(vdev_t *vd)
1863 {
1864 	(void) vd;
1865 	return (B_TRUE);
1866 }
1867 
1868 /*
1869  * Open the requested child vdevs.  If any of the leaf vdevs are using
1870  * a ZFS volume then do the opens in a single thread.  This avoids a
1871  * deadlock when the current thread is holding the spa_namespace_lock.
1872  */
1873 static void
vdev_open_children_impl(vdev_t * vd,vdev_open_children_func_t * open_func)1874 vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
1875 {
1876 	int children = vd->vdev_children;
1877 
1878 	taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
1879 	    children, children, TASKQ_PREPOPULATE);
1880 	vd->vdev_nonrot = B_TRUE;
1881 
1882 	for (int c = 0; c < children; c++) {
1883 		vdev_t *cvd = vd->vdev_child[c];
1884 
1885 		if (open_func(cvd) == B_FALSE)
1886 			continue;
1887 
1888 		if (tq == NULL || vdev_uses_zvols(vd)) {
1889 			cvd->vdev_open_error = vdev_open(cvd);
1890 		} else {
1891 			VERIFY(taskq_dispatch(tq, vdev_open_child,
1892 			    cvd, TQ_SLEEP) != TASKQID_INVALID);
1893 		}
1894 
1895 		vd->vdev_nonrot &= cvd->vdev_nonrot;
1896 	}
1897 
1898 	if (tq != NULL) {
1899 		taskq_wait(tq);
1900 		taskq_destroy(tq);
1901 	}
1902 }
1903 
1904 /*
1905  * Open all child vdevs.
1906  */
1907 void
vdev_open_children(vdev_t * vd)1908 vdev_open_children(vdev_t *vd)
1909 {
1910 	vdev_open_children_impl(vd, vdev_default_open_children_func);
1911 }
1912 
1913 /*
1914  * Conditionally open a subset of child vdevs.
1915  */
1916 void
vdev_open_children_subset(vdev_t * vd,vdev_open_children_func_t * open_func)1917 vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
1918 {
1919 	vdev_open_children_impl(vd, open_func);
1920 }
1921 
1922 /*
1923  * Compute the raidz-deflation ratio.  Note, we hard-code
1924  * in 128k (1 << 17) because it is the "typical" blocksize.
1925  * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
1926  * otherwise it would inconsistently account for existing bp's.
1927  */
1928 static void
vdev_set_deflate_ratio(vdev_t * vd)1929 vdev_set_deflate_ratio(vdev_t *vd)
1930 {
1931 	if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
1932 		vd->vdev_deflate_ratio = (1 << 17) /
1933 		    (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
1934 	}
1935 }
1936 
1937 /*
1938  * Choose the best of two ashifts, preferring one between logical ashift
1939  * (absolute minimum) and administrator defined maximum, otherwise take
1940  * the biggest of the two.
1941  */
1942 uint64_t
vdev_best_ashift(uint64_t logical,uint64_t a,uint64_t b)1943 vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b)
1944 {
1945 	if (a > logical && a <= zfs_vdev_max_auto_ashift) {
1946 		if (b <= logical || b > zfs_vdev_max_auto_ashift)
1947 			return (a);
1948 		else
1949 			return (MAX(a, b));
1950 	} else if (b <= logical || b > zfs_vdev_max_auto_ashift)
1951 		return (MAX(a, b));
1952 	return (b);
1953 }
1954 
1955 /*
1956  * Maximize performance by inflating the configured ashift for top level
1957  * vdevs to be as close to the physical ashift as possible while maintaining
1958  * administrator defined limits and ensuring it doesn't go below the
1959  * logical ashift.
1960  */
1961 static void
vdev_ashift_optimize(vdev_t * vd)1962 vdev_ashift_optimize(vdev_t *vd)
1963 {
1964 	ASSERT(vd == vd->vdev_top);
1965 
1966 	if (vd->vdev_ashift < vd->vdev_physical_ashift &&
1967 	    vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) {
1968 		vd->vdev_ashift = MIN(
1969 		    MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
1970 		    MAX(zfs_vdev_min_auto_ashift,
1971 		    vd->vdev_physical_ashift));
1972 	} else {
1973 		/*
1974 		 * If the logical and physical ashifts are the same, then
1975 		 * we ensure that the top-level vdev's ashift is not smaller
1976 		 * than our minimum ashift value. For the unusual case
1977 		 * where logical ashift > physical ashift, we can't cap
1978 		 * the calculated ashift based on max ashift as that
1979 		 * would cause failures.
1980 		 * We still check if we need to increase it to match
1981 		 * the min ashift.
1982 		 */
1983 		vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
1984 		    vd->vdev_ashift);
1985 	}
1986 }
1987 
1988 /*
1989  * Prepare a virtual device for access.
1990  */
1991 int
vdev_open(vdev_t * vd)1992 vdev_open(vdev_t *vd)
1993 {
1994 	spa_t *spa = vd->vdev_spa;
1995 	int error;
1996 	uint64_t osize = 0;
1997 	uint64_t max_osize = 0;
1998 	uint64_t asize, max_asize, psize;
1999 	uint64_t logical_ashift = 0;
2000 	uint64_t physical_ashift = 0;
2001 
2002 	ASSERT(vd->vdev_open_thread == curthread ||
2003 	    spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2004 	ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
2005 	    vd->vdev_state == VDEV_STATE_CANT_OPEN ||
2006 	    vd->vdev_state == VDEV_STATE_OFFLINE);
2007 
2008 	vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2009 	vd->vdev_cant_read = B_FALSE;
2010 	vd->vdev_cant_write = B_FALSE;
2011 	vd->vdev_fault_wanted = B_FALSE;
2012 	vd->vdev_min_asize = vdev_get_min_asize(vd);
2013 
2014 	/*
2015 	 * If this vdev is not removed, check its fault status.  If it's
2016 	 * faulted, bail out of the open.
2017 	 */
2018 	if (!vd->vdev_removed && vd->vdev_faulted) {
2019 		ASSERT(vd->vdev_children == 0);
2020 		ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
2021 		    vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
2022 		vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2023 		    vd->vdev_label_aux);
2024 		return (SET_ERROR(ENXIO));
2025 	} else if (vd->vdev_offline) {
2026 		ASSERT(vd->vdev_children == 0);
2027 		vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
2028 		return (SET_ERROR(ENXIO));
2029 	}
2030 
2031 	error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
2032 	    &logical_ashift, &physical_ashift);
2033 
2034 	/* Keep the device in removed state if unplugged */
2035 	if (error == ENOENT && vd->vdev_removed) {
2036 		vdev_set_state(vd, B_TRUE, VDEV_STATE_REMOVED,
2037 		    VDEV_AUX_NONE);
2038 		return (error);
2039 	}
2040 
2041 	/*
2042 	 * Physical volume size should never be larger than its max size, unless
2043 	 * the disk has shrunk while we were reading it or the device is buggy
2044 	 * or damaged: either way it's not safe for use, bail out of the open.
2045 	 */
2046 	if (osize > max_osize) {
2047 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2048 		    VDEV_AUX_OPEN_FAILED);
2049 		return (SET_ERROR(ENXIO));
2050 	}
2051 
2052 	/*
2053 	 * Reset the vdev_reopening flag so that we actually close
2054 	 * the vdev on error.
2055 	 */
2056 	vd->vdev_reopening = B_FALSE;
2057 	if (zio_injection_enabled && error == 0)
2058 		error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
2059 
2060 	if (error) {
2061 		if (vd->vdev_removed &&
2062 		    vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
2063 			vd->vdev_removed = B_FALSE;
2064 
2065 		if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
2066 			vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
2067 			    vd->vdev_stat.vs_aux);
2068 		} else {
2069 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2070 			    vd->vdev_stat.vs_aux);
2071 		}
2072 		return (error);
2073 	}
2074 
2075 	vd->vdev_removed = B_FALSE;
2076 
2077 	/*
2078 	 * Recheck the faulted flag now that we have confirmed that
2079 	 * the vdev is accessible.  If we're faulted, bail.
2080 	 */
2081 	if (vd->vdev_faulted) {
2082 		ASSERT(vd->vdev_children == 0);
2083 		ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
2084 		    vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
2085 		vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2086 		    vd->vdev_label_aux);
2087 		return (SET_ERROR(ENXIO));
2088 	}
2089 
2090 	if (vd->vdev_degraded) {
2091 		ASSERT(vd->vdev_children == 0);
2092 		vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
2093 		    VDEV_AUX_ERR_EXCEEDED);
2094 	} else {
2095 		vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
2096 	}
2097 
2098 	/*
2099 	 * For hole or missing vdevs we just return success.
2100 	 */
2101 	if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
2102 		return (0);
2103 
2104 	for (int c = 0; c < vd->vdev_children; c++) {
2105 		if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
2106 			vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
2107 			    VDEV_AUX_NONE);
2108 			break;
2109 		}
2110 	}
2111 
2112 	osize = P2ALIGN_TYPED(osize, sizeof (vdev_label_t), uint64_t);
2113 	max_osize = P2ALIGN_TYPED(max_osize, sizeof (vdev_label_t), uint64_t);
2114 
2115 	if (vd->vdev_children == 0) {
2116 		if (osize < SPA_MINDEVSIZE) {
2117 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2118 			    VDEV_AUX_TOO_SMALL);
2119 			return (SET_ERROR(EOVERFLOW));
2120 		}
2121 		psize = osize;
2122 		asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
2123 		max_asize = max_osize - (VDEV_LABEL_START_SIZE +
2124 		    VDEV_LABEL_END_SIZE);
2125 	} else {
2126 		if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
2127 		    (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
2128 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2129 			    VDEV_AUX_TOO_SMALL);
2130 			return (SET_ERROR(EOVERFLOW));
2131 		}
2132 		psize = 0;
2133 		asize = osize;
2134 		max_asize = max_osize;
2135 	}
2136 
2137 	/*
2138 	 * If the vdev was expanded, record this so that we can re-create the
2139 	 * uberblock rings in labels {2,3}, during the next sync.
2140 	 */
2141 	if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
2142 		vd->vdev_copy_uberblocks = B_TRUE;
2143 
2144 	vd->vdev_psize = psize;
2145 
2146 	/*
2147 	 * Make sure the allocatable size hasn't shrunk too much.
2148 	 */
2149 	if (asize < vd->vdev_min_asize) {
2150 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2151 		    VDEV_AUX_BAD_LABEL);
2152 		return (SET_ERROR(EINVAL));
2153 	}
2154 
2155 	/*
2156 	 * We can always set the logical/physical ashift members since
2157 	 * their values are only used to calculate the vdev_ashift when
2158 	 * the device is first added to the config. These values should
2159 	 * not be used for anything else since they may change whenever
2160 	 * the device is reopened and we don't store them in the label.
2161 	 */
2162 	vd->vdev_physical_ashift =
2163 	    MAX(physical_ashift, vd->vdev_physical_ashift);
2164 	vd->vdev_logical_ashift = MAX(logical_ashift,
2165 	    vd->vdev_logical_ashift);
2166 
2167 	if (vd->vdev_asize == 0) {
2168 		/*
2169 		 * This is the first-ever open, so use the computed values.
2170 		 * For compatibility, a different ashift can be requested.
2171 		 */
2172 		vd->vdev_asize = asize;
2173 		vd->vdev_max_asize = max_asize;
2174 
2175 		/*
2176 		 * If the vdev_ashift was not overridden at creation time,
2177 		 * then set it the logical ashift and optimize the ashift.
2178 		 */
2179 		if (vd->vdev_ashift == 0) {
2180 			vd->vdev_ashift = vd->vdev_logical_ashift;
2181 
2182 			if (vd->vdev_logical_ashift > ASHIFT_MAX) {
2183 				vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2184 				    VDEV_AUX_ASHIFT_TOO_BIG);
2185 				return (SET_ERROR(EDOM));
2186 			}
2187 
2188 			if (vd->vdev_top == vd && vd->vdev_attaching == B_FALSE)
2189 				vdev_ashift_optimize(vd);
2190 			vd->vdev_attaching = B_FALSE;
2191 		}
2192 		if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
2193 		    vd->vdev_ashift > ASHIFT_MAX)) {
2194 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2195 			    VDEV_AUX_BAD_ASHIFT);
2196 			return (SET_ERROR(EDOM));
2197 		}
2198 	} else {
2199 		/*
2200 		 * Make sure the alignment required hasn't increased.
2201 		 */
2202 		if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
2203 		    vd->vdev_ops->vdev_op_leaf) {
2204 			(void) zfs_ereport_post(
2205 			    FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
2206 			    spa, vd, NULL, NULL, 0);
2207 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2208 			    VDEV_AUX_BAD_LABEL);
2209 			return (SET_ERROR(EDOM));
2210 		}
2211 		vd->vdev_max_asize = max_asize;
2212 	}
2213 
2214 	/*
2215 	 * If all children are healthy we update asize if either:
2216 	 * The asize has increased, due to a device expansion caused by dynamic
2217 	 * LUN growth or vdev replacement, and automatic expansion is enabled;
2218 	 * making the additional space available.
2219 	 *
2220 	 * The asize has decreased, due to a device shrink usually caused by a
2221 	 * vdev replace with a smaller device. This ensures that calculations
2222 	 * based of max_asize and asize e.g. esize are always valid. It's safe
2223 	 * to do this as we've already validated that asize is greater than
2224 	 * vdev_min_asize.
2225 	 */
2226 	if (vd->vdev_state == VDEV_STATE_HEALTHY &&
2227 	    ((asize > vd->vdev_asize &&
2228 	    (vd->vdev_expanding || spa->spa_autoexpand)) ||
2229 	    (asize < vd->vdev_asize)))
2230 		vd->vdev_asize = asize;
2231 
2232 	vdev_set_min_asize(vd);
2233 
2234 	/*
2235 	 * Ensure we can issue some IO before declaring the
2236 	 * vdev open for business.
2237 	 */
2238 	if (vd->vdev_ops->vdev_op_leaf &&
2239 	    (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
2240 		vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2241 		    VDEV_AUX_ERR_EXCEEDED);
2242 		return (error);
2243 	}
2244 
2245 	/*
2246 	 * Track the minimum allocation size.
2247 	 */
2248 	if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
2249 	    vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
2250 		uint64_t min_alloc = vdev_get_min_alloc(vd);
2251 		vdev_spa_set_alloc(spa, min_alloc);
2252 	}
2253 
2254 	/*
2255 	 * If this is a leaf vdev, assess whether a resilver is needed.
2256 	 * But don't do this if we are doing a reopen for a scrub, since
2257 	 * this would just restart the scrub we are already doing.
2258 	 */
2259 	if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
2260 		dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
2261 
2262 	return (0);
2263 }
2264 
2265 static void
vdev_validate_child(void * arg)2266 vdev_validate_child(void *arg)
2267 {
2268 	vdev_t *vd = arg;
2269 
2270 	vd->vdev_validate_thread = curthread;
2271 	vd->vdev_validate_error = vdev_validate(vd);
2272 	vd->vdev_validate_thread = NULL;
2273 }
2274 
2275 /*
2276  * Called once the vdevs are all opened, this routine validates the label
2277  * contents. This needs to be done before vdev_load() so that we don't
2278  * inadvertently do repair I/Os to the wrong device.
2279  *
2280  * This function will only return failure if one of the vdevs indicates that it
2281  * has since been destroyed or exported.  This is only possible if
2282  * /etc/zfs/zpool.cache was readonly at the time.  Otherwise, the vdev state
2283  * will be updated but the function will return 0.
2284  */
2285 int
vdev_validate(vdev_t * vd)2286 vdev_validate(vdev_t *vd)
2287 {
2288 	spa_t *spa = vd->vdev_spa;
2289 	taskq_t *tq = NULL;
2290 	nvlist_t *label;
2291 	uint64_t guid = 0, aux_guid = 0, top_guid;
2292 	uint64_t state;
2293 	nvlist_t *nvl;
2294 	uint64_t txg;
2295 	int children = vd->vdev_children;
2296 
2297 	if (vdev_validate_skip)
2298 		return (0);
2299 
2300 	if (children > 0) {
2301 		tq = taskq_create("vdev_validate", children, minclsyspri,
2302 		    children, children, TASKQ_PREPOPULATE);
2303 	}
2304 
2305 	for (uint64_t c = 0; c < children; c++) {
2306 		vdev_t *cvd = vd->vdev_child[c];
2307 
2308 		if (tq == NULL || vdev_uses_zvols(cvd)) {
2309 			vdev_validate_child(cvd);
2310 		} else {
2311 			VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
2312 			    TQ_SLEEP) != TASKQID_INVALID);
2313 		}
2314 	}
2315 	if (tq != NULL) {
2316 		taskq_wait(tq);
2317 		taskq_destroy(tq);
2318 	}
2319 	for (int c = 0; c < children; c++) {
2320 		int error = vd->vdev_child[c]->vdev_validate_error;
2321 
2322 		if (error != 0)
2323 			return (SET_ERROR(EBADF));
2324 	}
2325 
2326 
2327 	/*
2328 	 * If the device has already failed, or was marked offline, don't do
2329 	 * any further validation.  Otherwise, label I/O will fail and we will
2330 	 * overwrite the previous state.
2331 	 */
2332 	if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
2333 		return (0);
2334 
2335 	/*
2336 	 * If we are performing an extreme rewind, we allow for a label that
2337 	 * was modified at a point after the current txg.
2338 	 * If config lock is not held do not check for the txg. spa_sync could
2339 	 * be updating the vdev's label before updating spa_last_synced_txg.
2340 	 */
2341 	if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
2342 	    spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
2343 		txg = UINT64_MAX;
2344 	else
2345 		txg = spa_last_synced_txg(spa);
2346 
2347 	if ((label = vdev_label_read_config(vd, txg)) == NULL) {
2348 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2349 		    VDEV_AUX_BAD_LABEL);
2350 		vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
2351 		    "txg %llu", (u_longlong_t)txg);
2352 		return (0);
2353 	}
2354 
2355 	/*
2356 	 * Determine if this vdev has been split off into another
2357 	 * pool.  If so, then refuse to open it.
2358 	 */
2359 	if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
2360 	    &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
2361 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2362 		    VDEV_AUX_SPLIT_POOL);
2363 		nvlist_free(label);
2364 		vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
2365 		return (0);
2366 	}
2367 
2368 	if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
2369 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2370 		    VDEV_AUX_CORRUPT_DATA);
2371 		nvlist_free(label);
2372 		vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2373 		    ZPOOL_CONFIG_POOL_GUID);
2374 		return (0);
2375 	}
2376 
2377 	/*
2378 	 * If config is not trusted then ignore the spa guid check. This is
2379 	 * necessary because if the machine crashed during a re-guid the new
2380 	 * guid might have been written to all of the vdev labels, but not the
2381 	 * cached config. The check will be performed again once we have the
2382 	 * trusted config from the MOS.
2383 	 */
2384 	if (spa->spa_trust_config && guid != spa_guid(spa)) {
2385 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2386 		    VDEV_AUX_CORRUPT_DATA);
2387 		nvlist_free(label);
2388 		vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
2389 		    "match config (%llu != %llu)", (u_longlong_t)guid,
2390 		    (u_longlong_t)spa_guid(spa));
2391 		return (0);
2392 	}
2393 
2394 	if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
2395 	    != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
2396 	    &aux_guid) != 0)
2397 		aux_guid = 0;
2398 
2399 	if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
2400 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2401 		    VDEV_AUX_CORRUPT_DATA);
2402 		nvlist_free(label);
2403 		vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2404 		    ZPOOL_CONFIG_GUID);
2405 		return (0);
2406 	}
2407 
2408 	if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
2409 	    != 0) {
2410 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2411 		    VDEV_AUX_CORRUPT_DATA);
2412 		nvlist_free(label);
2413 		vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2414 		    ZPOOL_CONFIG_TOP_GUID);
2415 		return (0);
2416 	}
2417 
2418 	/*
2419 	 * If this vdev just became a top-level vdev because its sibling was
2420 	 * detached, it will have adopted the parent's vdev guid -- but the
2421 	 * label may or may not be on disk yet. Fortunately, either version
2422 	 * of the label will have the same top guid, so if we're a top-level
2423 	 * vdev, we can safely compare to that instead.
2424 	 * However, if the config comes from a cachefile that failed to update
2425 	 * after the detach, a top-level vdev will appear as a non top-level
2426 	 * vdev in the config. Also relax the constraints if we perform an
2427 	 * extreme rewind.
2428 	 *
2429 	 * If we split this vdev off instead, then we also check the
2430 	 * original pool's guid. We don't want to consider the vdev
2431 	 * corrupt if it is partway through a split operation.
2432 	 */
2433 	if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
2434 		boolean_t mismatch = B_FALSE;
2435 		if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
2436 			if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
2437 				mismatch = B_TRUE;
2438 		} else {
2439 			if (vd->vdev_guid != top_guid &&
2440 			    vd->vdev_top->vdev_guid != guid)
2441 				mismatch = B_TRUE;
2442 		}
2443 
2444 		if (mismatch) {
2445 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2446 			    VDEV_AUX_CORRUPT_DATA);
2447 			nvlist_free(label);
2448 			vdev_dbgmsg(vd, "vdev_validate: config guid "
2449 			    "doesn't match label guid");
2450 			vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
2451 			    (u_longlong_t)vd->vdev_guid,
2452 			    (u_longlong_t)vd->vdev_top->vdev_guid);
2453 			vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
2454 			    "aux_guid %llu", (u_longlong_t)guid,
2455 			    (u_longlong_t)top_guid, (u_longlong_t)aux_guid);
2456 			return (0);
2457 		}
2458 	}
2459 
2460 	if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
2461 	    &state) != 0) {
2462 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2463 		    VDEV_AUX_CORRUPT_DATA);
2464 		nvlist_free(label);
2465 		vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2466 		    ZPOOL_CONFIG_POOL_STATE);
2467 		return (0);
2468 	}
2469 
2470 	nvlist_free(label);
2471 
2472 	/*
2473 	 * If this is a verbatim import, no need to check the
2474 	 * state of the pool.
2475 	 */
2476 	if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
2477 	    spa_load_state(spa) == SPA_LOAD_OPEN &&
2478 	    state != POOL_STATE_ACTIVE) {
2479 		vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
2480 		    "for spa %s", (u_longlong_t)state, spa->spa_name);
2481 		return (SET_ERROR(EBADF));
2482 	}
2483 
2484 	/*
2485 	 * If we were able to open and validate a vdev that was
2486 	 * previously marked permanently unavailable, clear that state
2487 	 * now.
2488 	 */
2489 	if (vd->vdev_not_present)
2490 		vd->vdev_not_present = 0;
2491 
2492 	return (0);
2493 }
2494 
2495 static void
vdev_update_path(const char * prefix,char * svd,char ** dvd,uint64_t guid)2496 vdev_update_path(const char *prefix, char *svd, char **dvd, uint64_t guid)
2497 {
2498 	if (svd != NULL && *dvd != NULL) {
2499 		if (strcmp(svd, *dvd) != 0) {
2500 			zfs_dbgmsg("vdev_copy_path: vdev %llu: %s changed "
2501 			    "from '%s' to '%s'", (u_longlong_t)guid, prefix,
2502 			    *dvd, svd);
2503 			spa_strfree(*dvd);
2504 			*dvd = spa_strdup(svd);
2505 		}
2506 	} else if (svd != NULL) {
2507 		*dvd = spa_strdup(svd);
2508 		zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
2509 		    (u_longlong_t)guid, *dvd);
2510 	}
2511 }
2512 
2513 static void
vdev_copy_path_impl(vdev_t * svd,vdev_t * dvd)2514 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
2515 {
2516 	char *old, *new;
2517 
2518 	vdev_update_path("vdev_path", svd->vdev_path, &dvd->vdev_path,
2519 	    dvd->vdev_guid);
2520 
2521 	vdev_update_path("vdev_devid", svd->vdev_devid, &dvd->vdev_devid,
2522 	    dvd->vdev_guid);
2523 
2524 	vdev_update_path("vdev_physpath", svd->vdev_physpath,
2525 	    &dvd->vdev_physpath, dvd->vdev_guid);
2526 
2527 	/*
2528 	 * Our enclosure sysfs path may have changed between imports
2529 	 */
2530 	old = dvd->vdev_enc_sysfs_path;
2531 	new = svd->vdev_enc_sysfs_path;
2532 	if ((old != NULL && new == NULL) ||
2533 	    (old == NULL && new != NULL) ||
2534 	    ((old != NULL && new != NULL) && strcmp(new, old) != 0)) {
2535 		zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path "
2536 		    "changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
2537 		    old, new);
2538 
2539 		if (dvd->vdev_enc_sysfs_path)
2540 			spa_strfree(dvd->vdev_enc_sysfs_path);
2541 
2542 		if (svd->vdev_enc_sysfs_path) {
2543 			dvd->vdev_enc_sysfs_path = spa_strdup(
2544 			    svd->vdev_enc_sysfs_path);
2545 		} else {
2546 			dvd->vdev_enc_sysfs_path = NULL;
2547 		}
2548 	}
2549 }
2550 
2551 /*
2552  * Recursively copy vdev paths from one vdev to another. Source and destination
2553  * vdev trees must have same geometry otherwise return error. Intended to copy
2554  * paths from userland config into MOS config.
2555  */
2556 int
vdev_copy_path_strict(vdev_t * svd,vdev_t * dvd)2557 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
2558 {
2559 	if ((svd->vdev_ops == &vdev_missing_ops) ||
2560 	    (svd->vdev_ishole && dvd->vdev_ishole) ||
2561 	    (dvd->vdev_ops == &vdev_indirect_ops))
2562 		return (0);
2563 
2564 	if (svd->vdev_ops != dvd->vdev_ops) {
2565 		vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
2566 		    svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
2567 		return (SET_ERROR(EINVAL));
2568 	}
2569 
2570 	if (svd->vdev_guid != dvd->vdev_guid) {
2571 		vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
2572 		    "%llu)", (u_longlong_t)svd->vdev_guid,
2573 		    (u_longlong_t)dvd->vdev_guid);
2574 		return (SET_ERROR(EINVAL));
2575 	}
2576 
2577 	if (svd->vdev_children != dvd->vdev_children) {
2578 		vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
2579 		    "%llu != %llu", (u_longlong_t)svd->vdev_children,
2580 		    (u_longlong_t)dvd->vdev_children);
2581 		return (SET_ERROR(EINVAL));
2582 	}
2583 
2584 	for (uint64_t i = 0; i < svd->vdev_children; i++) {
2585 		int error = vdev_copy_path_strict(svd->vdev_child[i],
2586 		    dvd->vdev_child[i]);
2587 		if (error != 0)
2588 			return (error);
2589 	}
2590 
2591 	if (svd->vdev_ops->vdev_op_leaf)
2592 		vdev_copy_path_impl(svd, dvd);
2593 
2594 	return (0);
2595 }
2596 
2597 static void
vdev_copy_path_search(vdev_t * stvd,vdev_t * dvd)2598 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
2599 {
2600 	ASSERT(stvd->vdev_top == stvd);
2601 	ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
2602 
2603 	for (uint64_t i = 0; i < dvd->vdev_children; i++) {
2604 		vdev_copy_path_search(stvd, dvd->vdev_child[i]);
2605 	}
2606 
2607 	if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
2608 		return;
2609 
2610 	/*
2611 	 * The idea here is that while a vdev can shift positions within
2612 	 * a top vdev (when replacing, attaching mirror, etc.) it cannot
2613 	 * step outside of it.
2614 	 */
2615 	vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
2616 
2617 	if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
2618 		return;
2619 
2620 	ASSERT(vd->vdev_ops->vdev_op_leaf);
2621 
2622 	vdev_copy_path_impl(vd, dvd);
2623 }
2624 
2625 /*
2626  * Recursively copy vdev paths from one root vdev to another. Source and
2627  * destination vdev trees may differ in geometry. For each destination leaf
2628  * vdev, search a vdev with the same guid and top vdev id in the source.
2629  * Intended to copy paths from userland config into MOS config.
2630  */
2631 void
vdev_copy_path_relaxed(vdev_t * srvd,vdev_t * drvd)2632 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
2633 {
2634 	uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
2635 	ASSERT(srvd->vdev_ops == &vdev_root_ops);
2636 	ASSERT(drvd->vdev_ops == &vdev_root_ops);
2637 
2638 	for (uint64_t i = 0; i < children; i++) {
2639 		vdev_copy_path_search(srvd->vdev_child[i],
2640 		    drvd->vdev_child[i]);
2641 	}
2642 }
2643 
2644 /*
2645  * Close a virtual device.
2646  */
2647 void
vdev_close(vdev_t * vd)2648 vdev_close(vdev_t *vd)
2649 {
2650 	vdev_t *pvd = vd->vdev_parent;
2651 	spa_t *spa __maybe_unused = vd->vdev_spa;
2652 
2653 	ASSERT(vd != NULL);
2654 	ASSERT(vd->vdev_open_thread == curthread ||
2655 	    spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2656 
2657 	/*
2658 	 * If our parent is reopening, then we are as well, unless we are
2659 	 * going offline.
2660 	 */
2661 	if (pvd != NULL && pvd->vdev_reopening)
2662 		vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
2663 
2664 	vd->vdev_ops->vdev_op_close(vd);
2665 
2666 	/*
2667 	 * We record the previous state before we close it, so that if we are
2668 	 * doing a reopen(), we don't generate FMA ereports if we notice that
2669 	 * it's still faulted.
2670 	 */
2671 	vd->vdev_prevstate = vd->vdev_state;
2672 
2673 	if (vd->vdev_offline)
2674 		vd->vdev_state = VDEV_STATE_OFFLINE;
2675 	else
2676 		vd->vdev_state = VDEV_STATE_CLOSED;
2677 	vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2678 }
2679 
2680 void
vdev_hold(vdev_t * vd)2681 vdev_hold(vdev_t *vd)
2682 {
2683 	spa_t *spa = vd->vdev_spa;
2684 
2685 	ASSERT(spa_is_root(spa));
2686 	if (spa->spa_state == POOL_STATE_UNINITIALIZED)
2687 		return;
2688 
2689 	for (int c = 0; c < vd->vdev_children; c++)
2690 		vdev_hold(vd->vdev_child[c]);
2691 
2692 	if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
2693 		vd->vdev_ops->vdev_op_hold(vd);
2694 }
2695 
2696 void
vdev_rele(vdev_t * vd)2697 vdev_rele(vdev_t *vd)
2698 {
2699 	ASSERT(spa_is_root(vd->vdev_spa));
2700 	for (int c = 0; c < vd->vdev_children; c++)
2701 		vdev_rele(vd->vdev_child[c]);
2702 
2703 	if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
2704 		vd->vdev_ops->vdev_op_rele(vd);
2705 }
2706 
2707 /*
2708  * Reopen all interior vdevs and any unopened leaves.  We don't actually
2709  * reopen leaf vdevs which had previously been opened as they might deadlock
2710  * on the spa_config_lock.  Instead we only obtain the leaf's physical size.
2711  * If the leaf has never been opened then open it, as usual.
2712  */
2713 void
vdev_reopen(vdev_t * vd)2714 vdev_reopen(vdev_t *vd)
2715 {
2716 	spa_t *spa = vd->vdev_spa;
2717 
2718 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2719 
2720 	/* set the reopening flag unless we're taking the vdev offline */
2721 	vd->vdev_reopening = !vd->vdev_offline;
2722 	vdev_close(vd);
2723 	(void) vdev_open(vd);
2724 
2725 	/*
2726 	 * Call vdev_validate() here to make sure we have the same device.
2727 	 * Otherwise, a device with an invalid label could be successfully
2728 	 * opened in response to vdev_reopen().
2729 	 */
2730 	if (vd->vdev_aux) {
2731 		(void) vdev_validate_aux(vd);
2732 		if (vdev_readable(vd) && vdev_writeable(vd) &&
2733 		    vd->vdev_aux == &spa->spa_l2cache) {
2734 			/*
2735 			 * In case the vdev is present we should evict all ARC
2736 			 * buffers and pointers to log blocks and reclaim their
2737 			 * space before restoring its contents to L2ARC.
2738 			 */
2739 			if (l2arc_vdev_present(vd)) {
2740 				l2arc_rebuild_vdev(vd, B_TRUE);
2741 			} else {
2742 				l2arc_add_vdev(spa, vd);
2743 			}
2744 			spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
2745 			spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
2746 		}
2747 	} else {
2748 		(void) vdev_validate(vd);
2749 	}
2750 
2751 	/*
2752 	 * Recheck if resilver is still needed and cancel any
2753 	 * scheduled resilver if resilver is unneeded.
2754 	 */
2755 	if (!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL) &&
2756 	    spa->spa_async_tasks & SPA_ASYNC_RESILVER) {
2757 		mutex_enter(&spa->spa_async_lock);
2758 		spa->spa_async_tasks &= ~SPA_ASYNC_RESILVER;
2759 		mutex_exit(&spa->spa_async_lock);
2760 	}
2761 
2762 	/*
2763 	 * Reassess parent vdev's health.
2764 	 */
2765 	vdev_propagate_state(vd);
2766 }
2767 
2768 int
vdev_create(vdev_t * vd,uint64_t txg,boolean_t isreplacing)2769 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
2770 {
2771 	int error;
2772 
2773 	/*
2774 	 * Normally, partial opens (e.g. of a mirror) are allowed.
2775 	 * For a create, however, we want to fail the request if
2776 	 * there are any components we can't open.
2777 	 */
2778 	error = vdev_open(vd);
2779 
2780 	if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
2781 		vdev_close(vd);
2782 		return (error ? error : SET_ERROR(ENXIO));
2783 	}
2784 
2785 	/*
2786 	 * Recursively load DTLs and initialize all labels.
2787 	 */
2788 	if ((error = vdev_dtl_load(vd)) != 0 ||
2789 	    (error = vdev_label_init(vd, txg, isreplacing ?
2790 	    VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
2791 		vdev_close(vd);
2792 		return (error);
2793 	}
2794 
2795 	return (0);
2796 }
2797 
2798 void
vdev_metaslab_set_size(vdev_t * vd)2799 vdev_metaslab_set_size(vdev_t *vd)
2800 {
2801 	uint64_t asize = vd->vdev_asize;
2802 	uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
2803 	uint64_t ms_shift;
2804 
2805 	/*
2806 	 * There are two dimensions to the metaslab sizing calculation:
2807 	 * the size of the metaslab and the count of metaslabs per vdev.
2808 	 *
2809 	 * The default values used below are a good balance between memory
2810 	 * usage (larger metaslab size means more memory needed for loaded
2811 	 * metaslabs; more metaslabs means more memory needed for the
2812 	 * metaslab_t structs), metaslab load time (larger metaslabs take
2813 	 * longer to load), and metaslab sync time (more metaslabs means
2814 	 * more time spent syncing all of them).
2815 	 *
2816 	 * In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
2817 	 * The range of the dimensions are as follows:
2818 	 *
2819 	 *	2^29 <= ms_size  <= 2^34
2820 	 *	  16 <= ms_count <= 131,072
2821 	 *
2822 	 * On the lower end of vdev sizes, we aim for metaslabs sizes of
2823 	 * at least 512MB (2^29) to minimize fragmentation effects when
2824 	 * testing with smaller devices.  However, the count constraint
2825 	 * of at least 16 metaslabs will override this minimum size goal.
2826 	 *
2827 	 * On the upper end of vdev sizes, we aim for a maximum metaslab
2828 	 * size of 16GB.  However, we will cap the total count to 2^17
2829 	 * metaslabs to keep our memory footprint in check and let the
2830 	 * metaslab size grow from there if that limit is hit.
2831 	 *
2832 	 * The net effect of applying above constrains is summarized below.
2833 	 *
2834 	 *   vdev size       metaslab count
2835 	 *  --------------|-----------------
2836 	 *      < 8GB        ~16
2837 	 *  8GB   - 100GB   one per 512MB
2838 	 *  100GB - 3TB     ~200
2839 	 *  3TB   - 2PB     one per 16GB
2840 	 *      > 2PB       ~131,072
2841 	 *  --------------------------------
2842 	 *
2843 	 *  Finally, note that all of the above calculate the initial
2844 	 *  number of metaslabs. Expanding a top-level vdev will result
2845 	 *  in additional metaslabs being allocated making it possible
2846 	 *  to exceed the zfs_vdev_ms_count_limit.
2847 	 */
2848 
2849 	if (ms_count < zfs_vdev_min_ms_count)
2850 		ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
2851 	else if (ms_count > zfs_vdev_default_ms_count)
2852 		ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
2853 	else
2854 		ms_shift = zfs_vdev_default_ms_shift;
2855 
2856 	if (ms_shift < SPA_MAXBLOCKSHIFT) {
2857 		ms_shift = SPA_MAXBLOCKSHIFT;
2858 	} else if (ms_shift > zfs_vdev_max_ms_shift) {
2859 		ms_shift = zfs_vdev_max_ms_shift;
2860 		/* cap the total count to constrain memory footprint */
2861 		if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
2862 			ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
2863 	}
2864 
2865 	vd->vdev_ms_shift = ms_shift;
2866 	ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
2867 }
2868 
2869 void
vdev_dirty(vdev_t * vd,int flags,void * arg,uint64_t txg)2870 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
2871 {
2872 	ASSERT(vd == vd->vdev_top);
2873 	/* indirect vdevs don't have metaslabs or dtls */
2874 	ASSERT(vdev_is_concrete(vd) || flags == 0);
2875 	ASSERT(ISP2(flags));
2876 	ASSERT(spa_writeable(vd->vdev_spa));
2877 
2878 	if (flags & VDD_METASLAB)
2879 		(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
2880 
2881 	if (flags & VDD_DTL)
2882 		(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
2883 
2884 	(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
2885 }
2886 
2887 void
vdev_dirty_leaves(vdev_t * vd,int flags,uint64_t txg)2888 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
2889 {
2890 	for (int c = 0; c < vd->vdev_children; c++)
2891 		vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
2892 
2893 	if (vd->vdev_ops->vdev_op_leaf)
2894 		vdev_dirty(vd->vdev_top, flags, vd, txg);
2895 }
2896 
2897 /*
2898  * DTLs.
2899  *
2900  * A vdev's DTL (dirty time log) is the set of transaction groups for which
2901  * the vdev has less than perfect replication.  There are four kinds of DTL:
2902  *
2903  * DTL_MISSING: txgs for which the vdev has no valid copies of the data
2904  *
2905  * DTL_PARTIAL: txgs for which data is available, but not fully replicated
2906  *
2907  * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
2908  *	scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
2909  *	txgs that was scrubbed.
2910  *
2911  * DTL_OUTAGE: txgs which cannot currently be read, whether due to
2912  *	persistent errors or just some device being offline.
2913  *	Unlike the other three, the DTL_OUTAGE map is not generally
2914  *	maintained; it's only computed when needed, typically to
2915  *	determine whether a device can be detached.
2916  *
2917  * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
2918  * either has the data or it doesn't.
2919  *
2920  * For interior vdevs such as mirror and RAID-Z the picture is more complex.
2921  * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
2922  * if any child is less than fully replicated, then so is its parent.
2923  * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
2924  * comprising only those txgs which appear in 'maxfaults' or more children;
2925  * those are the txgs we don't have enough replication to read.  For example,
2926  * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
2927  * thus, its DTL_MISSING consists of the set of txgs that appear in more than
2928  * two child DTL_MISSING maps.
2929  *
2930  * It should be clear from the above that to compute the DTLs and outage maps
2931  * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
2932  * Therefore, that is all we keep on disk.  When loading the pool, or after
2933  * a configuration change, we generate all other DTLs from first principles.
2934  */
2935 void
vdev_dtl_dirty(vdev_t * vd,vdev_dtl_type_t t,uint64_t txg,uint64_t size)2936 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2937 {
2938 	range_tree_t *rt = vd->vdev_dtl[t];
2939 
2940 	ASSERT(t < DTL_TYPES);
2941 	ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2942 	ASSERT(spa_writeable(vd->vdev_spa));
2943 
2944 	mutex_enter(&vd->vdev_dtl_lock);
2945 	if (!range_tree_contains(rt, txg, size))
2946 		range_tree_add(rt, txg, size);
2947 	mutex_exit(&vd->vdev_dtl_lock);
2948 }
2949 
2950 boolean_t
vdev_dtl_contains(vdev_t * vd,vdev_dtl_type_t t,uint64_t txg,uint64_t size)2951 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2952 {
2953 	range_tree_t *rt = vd->vdev_dtl[t];
2954 	boolean_t dirty = B_FALSE;
2955 
2956 	ASSERT(t < DTL_TYPES);
2957 	ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2958 
2959 	/*
2960 	 * While we are loading the pool, the DTLs have not been loaded yet.
2961 	 * This isn't a problem but it can result in devices being tried
2962 	 * which are known to not have the data.  In which case, the import
2963 	 * is relying on the checksum to ensure that we get the right data.
2964 	 * Note that while importing we are only reading the MOS, which is
2965 	 * always checksummed.
2966 	 */
2967 	mutex_enter(&vd->vdev_dtl_lock);
2968 	if (!range_tree_is_empty(rt))
2969 		dirty = range_tree_contains(rt, txg, size);
2970 	mutex_exit(&vd->vdev_dtl_lock);
2971 
2972 	return (dirty);
2973 }
2974 
2975 boolean_t
vdev_dtl_empty(vdev_t * vd,vdev_dtl_type_t t)2976 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
2977 {
2978 	range_tree_t *rt = vd->vdev_dtl[t];
2979 	boolean_t empty;
2980 
2981 	mutex_enter(&vd->vdev_dtl_lock);
2982 	empty = range_tree_is_empty(rt);
2983 	mutex_exit(&vd->vdev_dtl_lock);
2984 
2985 	return (empty);
2986 }
2987 
2988 /*
2989  * Check if the txg falls within the range which must be
2990  * resilvered.  DVAs outside this range can always be skipped.
2991  */
2992 boolean_t
vdev_default_need_resilver(vdev_t * vd,const dva_t * dva,size_t psize,uint64_t phys_birth)2993 vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
2994     uint64_t phys_birth)
2995 {
2996 	(void) dva, (void) psize;
2997 
2998 	/* Set by sequential resilver. */
2999 	if (phys_birth == TXG_UNKNOWN)
3000 		return (B_TRUE);
3001 
3002 	return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
3003 }
3004 
3005 /*
3006  * Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
3007  */
3008 boolean_t
vdev_dtl_need_resilver(vdev_t * vd,const dva_t * dva,size_t psize,uint64_t phys_birth)3009 vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
3010     uint64_t phys_birth)
3011 {
3012 	ASSERT(vd != vd->vdev_spa->spa_root_vdev);
3013 
3014 	if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
3015 	    vd->vdev_ops->vdev_op_leaf)
3016 		return (B_TRUE);
3017 
3018 	return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
3019 	    phys_birth));
3020 }
3021 
3022 /*
3023  * Returns the lowest txg in the DTL range.
3024  */
3025 static uint64_t
vdev_dtl_min(vdev_t * vd)3026 vdev_dtl_min(vdev_t *vd)
3027 {
3028 	ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
3029 	ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
3030 	ASSERT0(vd->vdev_children);
3031 
3032 	return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
3033 }
3034 
3035 /*
3036  * Returns the highest txg in the DTL.
3037  */
3038 static uint64_t
vdev_dtl_max(vdev_t * vd)3039 vdev_dtl_max(vdev_t *vd)
3040 {
3041 	ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
3042 	ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
3043 	ASSERT0(vd->vdev_children);
3044 
3045 	return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
3046 }
3047 
3048 /*
3049  * Determine if a resilvering vdev should remove any DTL entries from
3050  * its range. If the vdev was resilvering for the entire duration of the
3051  * scan then it should excise that range from its DTLs. Otherwise, this
3052  * vdev is considered partially resilvered and should leave its DTL
3053  * entries intact. The comment in vdev_dtl_reassess() describes how we
3054  * excise the DTLs.
3055  */
3056 static boolean_t
vdev_dtl_should_excise(vdev_t * vd,boolean_t rebuild_done)3057 vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
3058 {
3059 	ASSERT0(vd->vdev_children);
3060 
3061 	if (vd->vdev_state < VDEV_STATE_DEGRADED)
3062 		return (B_FALSE);
3063 
3064 	if (vd->vdev_resilver_deferred)
3065 		return (B_FALSE);
3066 
3067 	if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
3068 		return (B_TRUE);
3069 
3070 	if (rebuild_done) {
3071 		vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
3072 		vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
3073 
3074 		/* Rebuild not initiated by attach */
3075 		if (vd->vdev_rebuild_txg == 0)
3076 			return (B_TRUE);
3077 
3078 		/*
3079 		 * When a rebuild completes without error then all missing data
3080 		 * up to the rebuild max txg has been reconstructed and the DTL
3081 		 * is eligible for excision.
3082 		 */
3083 		if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
3084 		    vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
3085 			ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
3086 			ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
3087 			ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
3088 			return (B_TRUE);
3089 		}
3090 	} else {
3091 		dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
3092 		dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
3093 
3094 		/* Resilver not initiated by attach */
3095 		if (vd->vdev_resilver_txg == 0)
3096 			return (B_TRUE);
3097 
3098 		/*
3099 		 * When a resilver is initiated the scan will assign the
3100 		 * scn_max_txg value to the highest txg value that exists
3101 		 * in all DTLs. If this device's max DTL is not part of this
3102 		 * scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
3103 		 * then it is not eligible for excision.
3104 		 */
3105 		if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
3106 			ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
3107 			ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
3108 			ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
3109 			return (B_TRUE);
3110 		}
3111 	}
3112 
3113 	return (B_FALSE);
3114 }
3115 
3116 /*
3117  * Reassess DTLs after a config change or scrub completion. If txg == 0 no
3118  * write operations will be issued to the pool.
3119  */
3120 void
vdev_dtl_reassess(vdev_t * vd,uint64_t txg,uint64_t scrub_txg,boolean_t scrub_done,boolean_t rebuild_done)3121 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
3122     boolean_t scrub_done, boolean_t rebuild_done)
3123 {
3124 	spa_t *spa = vd->vdev_spa;
3125 	avl_tree_t reftree;
3126 	int minref;
3127 
3128 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
3129 
3130 	for (int c = 0; c < vd->vdev_children; c++)
3131 		vdev_dtl_reassess(vd->vdev_child[c], txg,
3132 		    scrub_txg, scrub_done, rebuild_done);
3133 
3134 	if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
3135 		return;
3136 
3137 	if (vd->vdev_ops->vdev_op_leaf) {
3138 		dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
3139 		vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
3140 		boolean_t check_excise = B_FALSE;
3141 		boolean_t wasempty = B_TRUE;
3142 
3143 		mutex_enter(&vd->vdev_dtl_lock);
3144 
3145 		/*
3146 		 * If requested, pretend the scan or rebuild completed cleanly.
3147 		 */
3148 		if (zfs_scan_ignore_errors) {
3149 			if (scn != NULL)
3150 				scn->scn_phys.scn_errors = 0;
3151 			if (vr != NULL)
3152 				vr->vr_rebuild_phys.vrp_errors = 0;
3153 		}
3154 
3155 		if (scrub_txg != 0 &&
3156 		    !range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
3157 			wasempty = B_FALSE;
3158 			zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
3159 			    "dtl:%llu/%llu errors:%llu",
3160 			    (u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
3161 			    (u_longlong_t)scrub_txg, spa->spa_scrub_started,
3162 			    (u_longlong_t)vdev_dtl_min(vd),
3163 			    (u_longlong_t)vdev_dtl_max(vd),
3164 			    (u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
3165 		}
3166 
3167 		/*
3168 		 * If we've completed a scrub/resilver or a rebuild cleanly
3169 		 * then determine if this vdev should remove any DTLs. We
3170 		 * only want to excise regions on vdevs that were available
3171 		 * during the entire duration of this scan.
3172 		 */
3173 		if (rebuild_done &&
3174 		    vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
3175 			check_excise = B_TRUE;
3176 		} else {
3177 			if (spa->spa_scrub_started ||
3178 			    (scn != NULL && scn->scn_phys.scn_errors == 0)) {
3179 				check_excise = B_TRUE;
3180 			}
3181 		}
3182 
3183 		if (scrub_txg && check_excise &&
3184 		    vdev_dtl_should_excise(vd, rebuild_done)) {
3185 			/*
3186 			 * We completed a scrub, resilver or rebuild up to
3187 			 * scrub_txg.  If we did it without rebooting, then
3188 			 * the scrub dtl will be valid, so excise the old
3189 			 * region and fold in the scrub dtl.  Otherwise,
3190 			 * leave the dtl as-is if there was an error.
3191 			 *
3192 			 * There's little trick here: to excise the beginning
3193 			 * of the DTL_MISSING map, we put it into a reference
3194 			 * tree and then add a segment with refcnt -1 that
3195 			 * covers the range [0, scrub_txg).  This means
3196 			 * that each txg in that range has refcnt -1 or 0.
3197 			 * We then add DTL_SCRUB with a refcnt of 2, so that
3198 			 * entries in the range [0, scrub_txg) will have a
3199 			 * positive refcnt -- either 1 or 2.  We then convert
3200 			 * the reference tree into the new DTL_MISSING map.
3201 			 */
3202 			space_reftree_create(&reftree);
3203 			space_reftree_add_map(&reftree,
3204 			    vd->vdev_dtl[DTL_MISSING], 1);
3205 			space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
3206 			space_reftree_add_map(&reftree,
3207 			    vd->vdev_dtl[DTL_SCRUB], 2);
3208 			space_reftree_generate_map(&reftree,
3209 			    vd->vdev_dtl[DTL_MISSING], 1);
3210 			space_reftree_destroy(&reftree);
3211 
3212 			if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
3213 				zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
3214 				    (u_longlong_t)vdev_dtl_min(vd),
3215 				    (u_longlong_t)vdev_dtl_max(vd));
3216 			} else if (!wasempty) {
3217 				zfs_dbgmsg("DTL_MISSING is now empty");
3218 			}
3219 		}
3220 		range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
3221 		range_tree_walk(vd->vdev_dtl[DTL_MISSING],
3222 		    range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
3223 		if (scrub_done)
3224 			range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
3225 		range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
3226 		if (!vdev_readable(vd))
3227 			range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
3228 		else
3229 			range_tree_walk(vd->vdev_dtl[DTL_MISSING],
3230 			    range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
3231 
3232 		/*
3233 		 * If the vdev was resilvering or rebuilding and no longer
3234 		 * has any DTLs then reset the appropriate flag and dirty
3235 		 * the top level so that we persist the change.
3236 		 */
3237 		if (txg != 0 &&
3238 		    range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
3239 		    range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
3240 			if (vd->vdev_rebuild_txg != 0) {
3241 				vd->vdev_rebuild_txg = 0;
3242 				vdev_config_dirty(vd->vdev_top);
3243 			} else if (vd->vdev_resilver_txg != 0) {
3244 				vd->vdev_resilver_txg = 0;
3245 				vdev_config_dirty(vd->vdev_top);
3246 			}
3247 		}
3248 
3249 		mutex_exit(&vd->vdev_dtl_lock);
3250 
3251 		if (txg != 0)
3252 			vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
3253 		return;
3254 	}
3255 
3256 	mutex_enter(&vd->vdev_dtl_lock);
3257 	for (int t = 0; t < DTL_TYPES; t++) {
3258 		/* account for child's outage in parent's missing map */
3259 		int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
3260 		if (t == DTL_SCRUB)
3261 			continue;			/* leaf vdevs only */
3262 		if (t == DTL_PARTIAL)
3263 			minref = 1;			/* i.e. non-zero */
3264 		else if (vdev_get_nparity(vd) != 0)
3265 			minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */
3266 		else
3267 			minref = vd->vdev_children;	/* any kind of mirror */
3268 		space_reftree_create(&reftree);
3269 		for (int c = 0; c < vd->vdev_children; c++) {
3270 			vdev_t *cvd = vd->vdev_child[c];
3271 			mutex_enter(&cvd->vdev_dtl_lock);
3272 			space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
3273 			mutex_exit(&cvd->vdev_dtl_lock);
3274 		}
3275 		space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
3276 		space_reftree_destroy(&reftree);
3277 	}
3278 	mutex_exit(&vd->vdev_dtl_lock);
3279 }
3280 
3281 /*
3282  * Iterate over all the vdevs except spare, and post kobj events
3283  */
3284 void
vdev_post_kobj_evt(vdev_t * vd)3285 vdev_post_kobj_evt(vdev_t *vd)
3286 {
3287 	if (vd->vdev_ops->vdev_op_kobj_evt_post &&
3288 	    vd->vdev_kobj_flag == B_FALSE) {
3289 		vd->vdev_kobj_flag = B_TRUE;
3290 		vd->vdev_ops->vdev_op_kobj_evt_post(vd);
3291 	}
3292 
3293 	for (int c = 0; c < vd->vdev_children; c++)
3294 		vdev_post_kobj_evt(vd->vdev_child[c]);
3295 }
3296 
3297 /*
3298  * Iterate over all the vdevs except spare, and clear kobj events
3299  */
3300 void
vdev_clear_kobj_evt(vdev_t * vd)3301 vdev_clear_kobj_evt(vdev_t *vd)
3302 {
3303 	vd->vdev_kobj_flag = B_FALSE;
3304 
3305 	for (int c = 0; c < vd->vdev_children; c++)
3306 		vdev_clear_kobj_evt(vd->vdev_child[c]);
3307 }
3308 
3309 int
vdev_dtl_load(vdev_t * vd)3310 vdev_dtl_load(vdev_t *vd)
3311 {
3312 	spa_t *spa = vd->vdev_spa;
3313 	objset_t *mos = spa->spa_meta_objset;
3314 	range_tree_t *rt;
3315 	int error = 0;
3316 
3317 	if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
3318 		ASSERT(vdev_is_concrete(vd));
3319 
3320 		/*
3321 		 * If the dtl cannot be sync'd there is no need to open it.
3322 		 */
3323 		if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps)
3324 			return (0);
3325 
3326 		error = space_map_open(&vd->vdev_dtl_sm, mos,
3327 		    vd->vdev_dtl_object, 0, -1ULL, 0);
3328 		if (error)
3329 			return (error);
3330 		ASSERT(vd->vdev_dtl_sm != NULL);
3331 
3332 		rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
3333 		error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
3334 		if (error == 0) {
3335 			mutex_enter(&vd->vdev_dtl_lock);
3336 			range_tree_walk(rt, range_tree_add,
3337 			    vd->vdev_dtl[DTL_MISSING]);
3338 			mutex_exit(&vd->vdev_dtl_lock);
3339 		}
3340 
3341 		range_tree_vacate(rt, NULL, NULL);
3342 		range_tree_destroy(rt);
3343 
3344 		return (error);
3345 	}
3346 
3347 	for (int c = 0; c < vd->vdev_children; c++) {
3348 		error = vdev_dtl_load(vd->vdev_child[c]);
3349 		if (error != 0)
3350 			break;
3351 	}
3352 
3353 	return (error);
3354 }
3355 
3356 static void
vdev_zap_allocation_data(vdev_t * vd,dmu_tx_t * tx)3357 vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
3358 {
3359 	spa_t *spa = vd->vdev_spa;
3360 	objset_t *mos = spa->spa_meta_objset;
3361 	vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
3362 	const char *string;
3363 
3364 	ASSERT(alloc_bias != VDEV_BIAS_NONE);
3365 
3366 	string =
3367 	    (alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
3368 	    (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
3369 	    (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
3370 
3371 	ASSERT(string != NULL);
3372 	VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
3373 	    1, strlen(string) + 1, string, tx));
3374 
3375 	if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
3376 		spa_activate_allocation_classes(spa, tx);
3377 	}
3378 }
3379 
3380 void
vdev_destroy_unlink_zap(vdev_t * vd,uint64_t zapobj,dmu_tx_t * tx)3381 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
3382 {
3383 	spa_t *spa = vd->vdev_spa;
3384 
3385 	VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
3386 	VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
3387 	    zapobj, tx));
3388 }
3389 
3390 uint64_t
vdev_create_link_zap(vdev_t * vd,dmu_tx_t * tx)3391 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
3392 {
3393 	spa_t *spa = vd->vdev_spa;
3394 	uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
3395 	    DMU_OT_NONE, 0, tx);
3396 
3397 	ASSERT(zap != 0);
3398 	VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
3399 	    zap, tx));
3400 
3401 	return (zap);
3402 }
3403 
3404 void
vdev_construct_zaps(vdev_t * vd,dmu_tx_t * tx)3405 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
3406 {
3407 	if (vd->vdev_ops != &vdev_hole_ops &&
3408 	    vd->vdev_ops != &vdev_missing_ops &&
3409 	    vd->vdev_ops != &vdev_root_ops &&
3410 	    !vd->vdev_top->vdev_removing) {
3411 		if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
3412 			vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
3413 		}
3414 		if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
3415 			vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
3416 			if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
3417 				vdev_zap_allocation_data(vd, tx);
3418 		}
3419 	}
3420 	if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap == 0 &&
3421 	    spa_feature_is_enabled(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) {
3422 		if (!spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2))
3423 			spa_feature_incr(vd->vdev_spa, SPA_FEATURE_AVZ_V2, tx);
3424 		vd->vdev_root_zap = vdev_create_link_zap(vd, tx);
3425 	}
3426 
3427 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
3428 		vdev_construct_zaps(vd->vdev_child[i], tx);
3429 	}
3430 }
3431 
3432 static void
vdev_dtl_sync(vdev_t * vd,uint64_t txg)3433 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
3434 {
3435 	spa_t *spa = vd->vdev_spa;
3436 	range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
3437 	objset_t *mos = spa->spa_meta_objset;
3438 	range_tree_t *rtsync;
3439 	dmu_tx_t *tx;
3440 	uint64_t object = space_map_object(vd->vdev_dtl_sm);
3441 
3442 	ASSERT(vdev_is_concrete(vd));
3443 	ASSERT(vd->vdev_ops->vdev_op_leaf);
3444 
3445 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3446 
3447 	if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
3448 		mutex_enter(&vd->vdev_dtl_lock);
3449 		space_map_free(vd->vdev_dtl_sm, tx);
3450 		space_map_close(vd->vdev_dtl_sm);
3451 		vd->vdev_dtl_sm = NULL;
3452 		mutex_exit(&vd->vdev_dtl_lock);
3453 
3454 		/*
3455 		 * We only destroy the leaf ZAP for detached leaves or for
3456 		 * removed log devices. Removed data devices handle leaf ZAP
3457 		 * cleanup later, once cancellation is no longer possible.
3458 		 */
3459 		if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
3460 		    vd->vdev_top->vdev_islog)) {
3461 			vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
3462 			vd->vdev_leaf_zap = 0;
3463 		}
3464 
3465 		dmu_tx_commit(tx);
3466 		return;
3467 	}
3468 
3469 	if (vd->vdev_dtl_sm == NULL) {
3470 		uint64_t new_object;
3471 
3472 		new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
3473 		VERIFY3U(new_object, !=, 0);
3474 
3475 		VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
3476 		    0, -1ULL, 0));
3477 		ASSERT(vd->vdev_dtl_sm != NULL);
3478 	}
3479 
3480 	rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
3481 
3482 	mutex_enter(&vd->vdev_dtl_lock);
3483 	range_tree_walk(rt, range_tree_add, rtsync);
3484 	mutex_exit(&vd->vdev_dtl_lock);
3485 
3486 	space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
3487 	space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
3488 	range_tree_vacate(rtsync, NULL, NULL);
3489 
3490 	range_tree_destroy(rtsync);
3491 
3492 	/*
3493 	 * If the object for the space map has changed then dirty
3494 	 * the top level so that we update the config.
3495 	 */
3496 	if (object != space_map_object(vd->vdev_dtl_sm)) {
3497 		vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
3498 		    "new object %llu", (u_longlong_t)txg, spa_name(spa),
3499 		    (u_longlong_t)object,
3500 		    (u_longlong_t)space_map_object(vd->vdev_dtl_sm));
3501 		vdev_config_dirty(vd->vdev_top);
3502 	}
3503 
3504 	dmu_tx_commit(tx);
3505 }
3506 
3507 /*
3508  * Determine whether the specified vdev can be offlined/detached/removed
3509  * without losing data.
3510  */
3511 boolean_t
vdev_dtl_required(vdev_t * vd)3512 vdev_dtl_required(vdev_t *vd)
3513 {
3514 	spa_t *spa = vd->vdev_spa;
3515 	vdev_t *tvd = vd->vdev_top;
3516 	uint8_t cant_read = vd->vdev_cant_read;
3517 	boolean_t required;
3518 
3519 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3520 
3521 	if (vd == spa->spa_root_vdev || vd == tvd)
3522 		return (B_TRUE);
3523 
3524 	/*
3525 	 * Temporarily mark the device as unreadable, and then determine
3526 	 * whether this results in any DTL outages in the top-level vdev.
3527 	 * If not, we can safely offline/detach/remove the device.
3528 	 */
3529 	vd->vdev_cant_read = B_TRUE;
3530 	vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
3531 	required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
3532 	vd->vdev_cant_read = cant_read;
3533 	vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
3534 
3535 	if (!required && zio_injection_enabled) {
3536 		required = !!zio_handle_device_injection(vd, NULL,
3537 		    SET_ERROR(ECHILD));
3538 	}
3539 
3540 	return (required);
3541 }
3542 
3543 /*
3544  * Determine if resilver is needed, and if so the txg range.
3545  */
3546 boolean_t
vdev_resilver_needed(vdev_t * vd,uint64_t * minp,uint64_t * maxp)3547 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
3548 {
3549 	boolean_t needed = B_FALSE;
3550 	uint64_t thismin = UINT64_MAX;
3551 	uint64_t thismax = 0;
3552 
3553 	if (vd->vdev_children == 0) {
3554 		mutex_enter(&vd->vdev_dtl_lock);
3555 		if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
3556 		    vdev_writeable(vd)) {
3557 
3558 			thismin = vdev_dtl_min(vd);
3559 			thismax = vdev_dtl_max(vd);
3560 			needed = B_TRUE;
3561 		}
3562 		mutex_exit(&vd->vdev_dtl_lock);
3563 	} else {
3564 		for (int c = 0; c < vd->vdev_children; c++) {
3565 			vdev_t *cvd = vd->vdev_child[c];
3566 			uint64_t cmin, cmax;
3567 
3568 			if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
3569 				thismin = MIN(thismin, cmin);
3570 				thismax = MAX(thismax, cmax);
3571 				needed = B_TRUE;
3572 			}
3573 		}
3574 	}
3575 
3576 	if (needed && minp) {
3577 		*minp = thismin;
3578 		*maxp = thismax;
3579 	}
3580 	return (needed);
3581 }
3582 
3583 /*
3584  * Gets the checkpoint space map object from the vdev's ZAP.  On success sm_obj
3585  * will contain either the checkpoint spacemap object or zero if none exists.
3586  * All other errors are returned to the caller.
3587  */
3588 int
vdev_checkpoint_sm_object(vdev_t * vd,uint64_t * sm_obj)3589 vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
3590 {
3591 	ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
3592 
3593 	if (vd->vdev_top_zap == 0) {
3594 		*sm_obj = 0;
3595 		return (0);
3596 	}
3597 
3598 	int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
3599 	    VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
3600 	if (error == ENOENT) {
3601 		*sm_obj = 0;
3602 		error = 0;
3603 	}
3604 
3605 	return (error);
3606 }
3607 
3608 int
vdev_load(vdev_t * vd)3609 vdev_load(vdev_t *vd)
3610 {
3611 	int children = vd->vdev_children;
3612 	int error = 0;
3613 	taskq_t *tq = NULL;
3614 
3615 	/*
3616 	 * It's only worthwhile to use the taskq for the root vdev, because the
3617 	 * slow part is metaslab_init, and that only happens for top-level
3618 	 * vdevs.
3619 	 */
3620 	if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
3621 		tq = taskq_create("vdev_load", children, minclsyspri,
3622 		    children, children, TASKQ_PREPOPULATE);
3623 	}
3624 
3625 	/*
3626 	 * Recursively load all children.
3627 	 */
3628 	for (int c = 0; c < vd->vdev_children; c++) {
3629 		vdev_t *cvd = vd->vdev_child[c];
3630 
3631 		if (tq == NULL || vdev_uses_zvols(cvd)) {
3632 			cvd->vdev_load_error = vdev_load(cvd);
3633 		} else {
3634 			VERIFY(taskq_dispatch(tq, vdev_load_child,
3635 			    cvd, TQ_SLEEP) != TASKQID_INVALID);
3636 		}
3637 	}
3638 
3639 	if (tq != NULL) {
3640 		taskq_wait(tq);
3641 		taskq_destroy(tq);
3642 	}
3643 
3644 	for (int c = 0; c < vd->vdev_children; c++) {
3645 		int error = vd->vdev_child[c]->vdev_load_error;
3646 
3647 		if (error != 0)
3648 			return (error);
3649 	}
3650 
3651 	vdev_set_deflate_ratio(vd);
3652 
3653 	/*
3654 	 * On spa_load path, grab the allocation bias from our zap
3655 	 */
3656 	if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3657 		spa_t *spa = vd->vdev_spa;
3658 		char bias_str[64];
3659 
3660 		error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
3661 		    VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
3662 		    bias_str);
3663 		if (error == 0) {
3664 			ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
3665 			vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
3666 		} else if (error != ENOENT) {
3667 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3668 			    VDEV_AUX_CORRUPT_DATA);
3669 			vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
3670 			    "failed [error=%d]",
3671 			    (u_longlong_t)vd->vdev_top_zap, error);
3672 			return (error);
3673 		}
3674 	}
3675 
3676 	if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3677 		spa_t *spa = vd->vdev_spa;
3678 		uint64_t failfast;
3679 
3680 		error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
3681 		    vdev_prop_to_name(VDEV_PROP_FAILFAST), sizeof (failfast),
3682 		    1, &failfast);
3683 		if (error == 0) {
3684 			vd->vdev_failfast = failfast & 1;
3685 		} else if (error == ENOENT) {
3686 			vd->vdev_failfast = vdev_prop_default_numeric(
3687 			    VDEV_PROP_FAILFAST);
3688 		} else {
3689 			vdev_dbgmsg(vd,
3690 			    "vdev_load: zap_lookup(top_zap=%llu) "
3691 			    "failed [error=%d]",
3692 			    (u_longlong_t)vd->vdev_top_zap, error);
3693 		}
3694 	}
3695 
3696 	/*
3697 	 * Load any rebuild state from the top-level vdev zap.
3698 	 */
3699 	if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3700 		error = vdev_rebuild_load(vd);
3701 		if (error && error != ENOTSUP) {
3702 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3703 			    VDEV_AUX_CORRUPT_DATA);
3704 			vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
3705 			    "failed [error=%d]", error);
3706 			return (error);
3707 		}
3708 	}
3709 
3710 	if (vd->vdev_top_zap != 0 || vd->vdev_leaf_zap != 0) {
3711 		uint64_t zapobj;
3712 
3713 		if (vd->vdev_top_zap != 0)
3714 			zapobj = vd->vdev_top_zap;
3715 		else
3716 			zapobj = vd->vdev_leaf_zap;
3717 
3718 		error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_N,
3719 		    &vd->vdev_checksum_n);
3720 		if (error && error != ENOENT)
3721 			vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3722 			    "failed [error=%d]", (u_longlong_t)zapobj, error);
3723 
3724 		error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_T,
3725 		    &vd->vdev_checksum_t);
3726 		if (error && error != ENOENT)
3727 			vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3728 			    "failed [error=%d]", (u_longlong_t)zapobj, error);
3729 
3730 		error = vdev_prop_get_int(vd, VDEV_PROP_IO_N,
3731 		    &vd->vdev_io_n);
3732 		if (error && error != ENOENT)
3733 			vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3734 			    "failed [error=%d]", (u_longlong_t)zapobj, error);
3735 
3736 		error = vdev_prop_get_int(vd, VDEV_PROP_IO_T,
3737 		    &vd->vdev_io_t);
3738 		if (error && error != ENOENT)
3739 			vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3740 			    "failed [error=%d]", (u_longlong_t)zapobj, error);
3741 
3742 		error = vdev_prop_get_int(vd, VDEV_PROP_SLOW_IO_N,
3743 		    &vd->vdev_slow_io_n);
3744 		if (error && error != ENOENT)
3745 			vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3746 			    "failed [error=%d]", (u_longlong_t)zapobj, error);
3747 
3748 		error = vdev_prop_get_int(vd, VDEV_PROP_SLOW_IO_T,
3749 		    &vd->vdev_slow_io_t);
3750 		if (error && error != ENOENT)
3751 			vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3752 			    "failed [error=%d]", (u_longlong_t)zapobj, error);
3753 	}
3754 
3755 	/*
3756 	 * If this is a top-level vdev, initialize its metaslabs.
3757 	 */
3758 	if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
3759 		vdev_metaslab_group_create(vd);
3760 
3761 		if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
3762 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3763 			    VDEV_AUX_CORRUPT_DATA);
3764 			vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
3765 			    "asize=%llu", (u_longlong_t)vd->vdev_ashift,
3766 			    (u_longlong_t)vd->vdev_asize);
3767 			return (SET_ERROR(ENXIO));
3768 		}
3769 
3770 		error = vdev_metaslab_init(vd, 0);
3771 		if (error != 0) {
3772 			vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
3773 			    "[error=%d]", error);
3774 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3775 			    VDEV_AUX_CORRUPT_DATA);
3776 			return (error);
3777 		}
3778 
3779 		uint64_t checkpoint_sm_obj;
3780 		error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
3781 		if (error == 0 && checkpoint_sm_obj != 0) {
3782 			objset_t *mos = spa_meta_objset(vd->vdev_spa);
3783 			ASSERT(vd->vdev_asize != 0);
3784 			ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
3785 
3786 			error = space_map_open(&vd->vdev_checkpoint_sm,
3787 			    mos, checkpoint_sm_obj, 0, vd->vdev_asize,
3788 			    vd->vdev_ashift);
3789 			if (error != 0) {
3790 				vdev_dbgmsg(vd, "vdev_load: space_map_open "
3791 				    "failed for checkpoint spacemap (obj %llu) "
3792 				    "[error=%d]",
3793 				    (u_longlong_t)checkpoint_sm_obj, error);
3794 				return (error);
3795 			}
3796 			ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
3797 
3798 			/*
3799 			 * Since the checkpoint_sm contains free entries
3800 			 * exclusively we can use space_map_allocated() to
3801 			 * indicate the cumulative checkpointed space that
3802 			 * has been freed.
3803 			 */
3804 			vd->vdev_stat.vs_checkpoint_space =
3805 			    -space_map_allocated(vd->vdev_checkpoint_sm);
3806 			vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
3807 			    vd->vdev_stat.vs_checkpoint_space;
3808 		} else if (error != 0) {
3809 			vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
3810 			    "checkpoint space map object from vdev ZAP "
3811 			    "[error=%d]", error);
3812 			return (error);
3813 		}
3814 	}
3815 
3816 	/*
3817 	 * If this is a leaf vdev, load its DTL.
3818 	 */
3819 	if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
3820 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3821 		    VDEV_AUX_CORRUPT_DATA);
3822 		vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
3823 		    "[error=%d]", error);
3824 		return (error);
3825 	}
3826 
3827 	uint64_t obsolete_sm_object;
3828 	error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
3829 	if (error == 0 && obsolete_sm_object != 0) {
3830 		objset_t *mos = vd->vdev_spa->spa_meta_objset;
3831 		ASSERT(vd->vdev_asize != 0);
3832 		ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
3833 
3834 		if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
3835 		    obsolete_sm_object, 0, vd->vdev_asize, 0))) {
3836 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3837 			    VDEV_AUX_CORRUPT_DATA);
3838 			vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
3839 			    "obsolete spacemap (obj %llu) [error=%d]",
3840 			    (u_longlong_t)obsolete_sm_object, error);
3841 			return (error);
3842 		}
3843 	} else if (error != 0) {
3844 		vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
3845 		    "space map object from vdev ZAP [error=%d]", error);
3846 		return (error);
3847 	}
3848 
3849 	return (0);
3850 }
3851 
3852 /*
3853  * The special vdev case is used for hot spares and l2cache devices.  Its
3854  * sole purpose it to set the vdev state for the associated vdev.  To do this,
3855  * we make sure that we can open the underlying device, then try to read the
3856  * label, and make sure that the label is sane and that it hasn't been
3857  * repurposed to another pool.
3858  */
3859 int
vdev_validate_aux(vdev_t * vd)3860 vdev_validate_aux(vdev_t *vd)
3861 {
3862 	nvlist_t *label;
3863 	uint64_t guid, version;
3864 	uint64_t state;
3865 
3866 	if (!vdev_readable(vd))
3867 		return (0);
3868 
3869 	if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
3870 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
3871 		    VDEV_AUX_CORRUPT_DATA);
3872 		return (-1);
3873 	}
3874 
3875 	if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
3876 	    !SPA_VERSION_IS_SUPPORTED(version) ||
3877 	    nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
3878 	    guid != vd->vdev_guid ||
3879 	    nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
3880 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
3881 		    VDEV_AUX_CORRUPT_DATA);
3882 		nvlist_free(label);
3883 		return (-1);
3884 	}
3885 
3886 	/*
3887 	 * We don't actually check the pool state here.  If it's in fact in
3888 	 * use by another pool, we update this fact on the fly when requested.
3889 	 */
3890 	nvlist_free(label);
3891 	return (0);
3892 }
3893 
3894 static void
vdev_destroy_ms_flush_data(vdev_t * vd,dmu_tx_t * tx)3895 vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
3896 {
3897 	objset_t *mos = spa_meta_objset(vd->vdev_spa);
3898 
3899 	if (vd->vdev_top_zap == 0)
3900 		return;
3901 
3902 	uint64_t object = 0;
3903 	int err = zap_lookup(mos, vd->vdev_top_zap,
3904 	    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
3905 	if (err == ENOENT)
3906 		return;
3907 	VERIFY0(err);
3908 
3909 	VERIFY0(dmu_object_free(mos, object, tx));
3910 	VERIFY0(zap_remove(mos, vd->vdev_top_zap,
3911 	    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
3912 }
3913 
3914 /*
3915  * Free the objects used to store this vdev's spacemaps, and the array
3916  * that points to them.
3917  */
3918 void
vdev_destroy_spacemaps(vdev_t * vd,dmu_tx_t * tx)3919 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
3920 {
3921 	if (vd->vdev_ms_array == 0)
3922 		return;
3923 
3924 	objset_t *mos = vd->vdev_spa->spa_meta_objset;
3925 	uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
3926 	size_t array_bytes = array_count * sizeof (uint64_t);
3927 	uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
3928 	VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
3929 	    array_bytes, smobj_array, 0));
3930 
3931 	for (uint64_t i = 0; i < array_count; i++) {
3932 		uint64_t smobj = smobj_array[i];
3933 		if (smobj == 0)
3934 			continue;
3935 
3936 		space_map_free_obj(mos, smobj, tx);
3937 	}
3938 
3939 	kmem_free(smobj_array, array_bytes);
3940 	VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
3941 	vdev_destroy_ms_flush_data(vd, tx);
3942 	vd->vdev_ms_array = 0;
3943 }
3944 
3945 static void
vdev_remove_empty_log(vdev_t * vd,uint64_t txg)3946 vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
3947 {
3948 	spa_t *spa = vd->vdev_spa;
3949 
3950 	ASSERT(vd->vdev_islog);
3951 	ASSERT(vd == vd->vdev_top);
3952 	ASSERT3U(txg, ==, spa_syncing_txg(spa));
3953 
3954 	dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
3955 
3956 	vdev_destroy_spacemaps(vd, tx);
3957 	if (vd->vdev_top_zap != 0) {
3958 		vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
3959 		vd->vdev_top_zap = 0;
3960 	}
3961 
3962 	dmu_tx_commit(tx);
3963 }
3964 
3965 void
vdev_sync_done(vdev_t * vd,uint64_t txg)3966 vdev_sync_done(vdev_t *vd, uint64_t txg)
3967 {
3968 	metaslab_t *msp;
3969 	boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
3970 
3971 	ASSERT(vdev_is_concrete(vd));
3972 
3973 	while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
3974 	    != NULL)
3975 		metaslab_sync_done(msp, txg);
3976 
3977 	if (reassess) {
3978 		metaslab_sync_reassess(vd->vdev_mg);
3979 		if (vd->vdev_log_mg != NULL)
3980 			metaslab_sync_reassess(vd->vdev_log_mg);
3981 	}
3982 }
3983 
3984 void
vdev_sync(vdev_t * vd,uint64_t txg)3985 vdev_sync(vdev_t *vd, uint64_t txg)
3986 {
3987 	spa_t *spa = vd->vdev_spa;
3988 	vdev_t *lvd;
3989 	metaslab_t *msp;
3990 
3991 	ASSERT3U(txg, ==, spa->spa_syncing_txg);
3992 	dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3993 	if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
3994 		ASSERT(vd->vdev_removing ||
3995 		    vd->vdev_ops == &vdev_indirect_ops);
3996 
3997 		vdev_indirect_sync_obsolete(vd, tx);
3998 
3999 		/*
4000 		 * If the vdev is indirect, it can't have dirty
4001 		 * metaslabs or DTLs.
4002 		 */
4003 		if (vd->vdev_ops == &vdev_indirect_ops) {
4004 			ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
4005 			ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
4006 			dmu_tx_commit(tx);
4007 			return;
4008 		}
4009 	}
4010 
4011 	ASSERT(vdev_is_concrete(vd));
4012 
4013 	if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
4014 	    !vd->vdev_removing) {
4015 		ASSERT(vd == vd->vdev_top);
4016 		ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
4017 		vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
4018 		    DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
4019 		ASSERT(vd->vdev_ms_array != 0);
4020 		vdev_config_dirty(vd);
4021 	}
4022 
4023 	while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
4024 		metaslab_sync(msp, txg);
4025 		(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
4026 	}
4027 
4028 	while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
4029 		vdev_dtl_sync(lvd, txg);
4030 
4031 	/*
4032 	 * If this is an empty log device being removed, destroy the
4033 	 * metadata associated with it.
4034 	 */
4035 	if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
4036 		vdev_remove_empty_log(vd, txg);
4037 
4038 	(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
4039 	dmu_tx_commit(tx);
4040 }
4041 
4042 uint64_t
vdev_psize_to_asize(vdev_t * vd,uint64_t psize)4043 vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
4044 {
4045 	return (vd->vdev_ops->vdev_op_asize(vd, psize));
4046 }
4047 
4048 /*
4049  * Mark the given vdev faulted.  A faulted vdev behaves as if the device could
4050  * not be opened, and no I/O is attempted.
4051  */
4052 int
vdev_fault(spa_t * spa,uint64_t guid,vdev_aux_t aux)4053 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
4054 {
4055 	vdev_t *vd, *tvd;
4056 
4057 	spa_vdev_state_enter(spa, SCL_NONE);
4058 
4059 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4060 		return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4061 
4062 	if (!vd->vdev_ops->vdev_op_leaf)
4063 		return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4064 
4065 	tvd = vd->vdev_top;
4066 
4067 	/*
4068 	 * If user did a 'zpool offline -f' then make the fault persist across
4069 	 * reboots.
4070 	 */
4071 	if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
4072 		/*
4073 		 * There are two kinds of forced faults: temporary and
4074 		 * persistent.  Temporary faults go away at pool import, while
4075 		 * persistent faults stay set.  Both types of faults can be
4076 		 * cleared with a zpool clear.
4077 		 *
4078 		 * We tell if a vdev is persistently faulted by looking at the
4079 		 * ZPOOL_CONFIG_AUX_STATE nvpair.  If it's set to "external" at
4080 		 * import then it's a persistent fault.  Otherwise, it's
4081 		 * temporary.  We get ZPOOL_CONFIG_AUX_STATE set to "external"
4082 		 * by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL.  This
4083 		 * tells vdev_config_generate() (which gets run later) to set
4084 		 * ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
4085 		 */
4086 		vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
4087 		vd->vdev_tmpoffline = B_FALSE;
4088 		aux = VDEV_AUX_EXTERNAL;
4089 	} else {
4090 		vd->vdev_tmpoffline = B_TRUE;
4091 	}
4092 
4093 	/*
4094 	 * We don't directly use the aux state here, but if we do a
4095 	 * vdev_reopen(), we need this value to be present to remember why we
4096 	 * were faulted.
4097 	 */
4098 	vd->vdev_label_aux = aux;
4099 
4100 	/*
4101 	 * Faulted state takes precedence over degraded.
4102 	 */
4103 	vd->vdev_delayed_close = B_FALSE;
4104 	vd->vdev_faulted = 1ULL;
4105 	vd->vdev_degraded = 0ULL;
4106 	vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
4107 
4108 	/*
4109 	 * If this device has the only valid copy of the data, then
4110 	 * back off and simply mark the vdev as degraded instead.
4111 	 */
4112 	if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
4113 		vd->vdev_degraded = 1ULL;
4114 		vd->vdev_faulted = 0ULL;
4115 
4116 		/*
4117 		 * If we reopen the device and it's not dead, only then do we
4118 		 * mark it degraded.
4119 		 */
4120 		vdev_reopen(tvd);
4121 
4122 		if (vdev_readable(vd))
4123 			vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
4124 	}
4125 
4126 	return (spa_vdev_state_exit(spa, vd, 0));
4127 }
4128 
4129 /*
4130  * Mark the given vdev degraded.  A degraded vdev is purely an indication to the
4131  * user that something is wrong.  The vdev continues to operate as normal as far
4132  * as I/O is concerned.
4133  */
4134 int
vdev_degrade(spa_t * spa,uint64_t guid,vdev_aux_t aux)4135 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
4136 {
4137 	vdev_t *vd;
4138 
4139 	spa_vdev_state_enter(spa, SCL_NONE);
4140 
4141 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4142 		return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4143 
4144 	if (!vd->vdev_ops->vdev_op_leaf)
4145 		return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4146 
4147 	/*
4148 	 * If the vdev is already faulted, then don't do anything.
4149 	 */
4150 	if (vd->vdev_faulted || vd->vdev_degraded)
4151 		return (spa_vdev_state_exit(spa, NULL, 0));
4152 
4153 	vd->vdev_degraded = 1ULL;
4154 	if (!vdev_is_dead(vd))
4155 		vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
4156 		    aux);
4157 
4158 	return (spa_vdev_state_exit(spa, vd, 0));
4159 }
4160 
4161 int
vdev_remove_wanted(spa_t * spa,uint64_t guid)4162 vdev_remove_wanted(spa_t *spa, uint64_t guid)
4163 {
4164 	vdev_t *vd;
4165 
4166 	spa_vdev_state_enter(spa, SCL_NONE);
4167 
4168 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4169 		return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4170 
4171 	/*
4172 	 * If the vdev is already removed, or expanding which can trigger
4173 	 * repartition add/remove events, then don't do anything.
4174 	 */
4175 	if (vd->vdev_removed || vd->vdev_expanding)
4176 		return (spa_vdev_state_exit(spa, NULL, 0));
4177 
4178 	/*
4179 	 * Confirm the vdev has been removed, otherwise don't do anything.
4180 	 */
4181 	if (vd->vdev_ops->vdev_op_leaf && !zio_wait(vdev_probe(vd, NULL)))
4182 		return (spa_vdev_state_exit(spa, NULL, SET_ERROR(EEXIST)));
4183 
4184 	vd->vdev_remove_wanted = B_TRUE;
4185 	spa_async_request(spa, SPA_ASYNC_REMOVE);
4186 
4187 	return (spa_vdev_state_exit(spa, vd, 0));
4188 }
4189 
4190 
4191 /*
4192  * Online the given vdev.
4193  *
4194  * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things.  First, any attached
4195  * spare device should be detached when the device finishes resilvering.
4196  * Second, the online should be treated like a 'test' online case, so no FMA
4197  * events are generated if the device fails to open.
4198  */
4199 int
vdev_online(spa_t * spa,uint64_t guid,uint64_t flags,vdev_state_t * newstate)4200 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
4201 {
4202 	vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
4203 	boolean_t wasoffline;
4204 	vdev_state_t oldstate;
4205 
4206 	spa_vdev_state_enter(spa, SCL_NONE);
4207 
4208 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4209 		return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4210 
4211 	if (!vd->vdev_ops->vdev_op_leaf)
4212 		return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4213 
4214 	wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
4215 	oldstate = vd->vdev_state;
4216 
4217 	tvd = vd->vdev_top;
4218 	vd->vdev_offline = B_FALSE;
4219 	vd->vdev_tmpoffline = B_FALSE;
4220 	vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
4221 	vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
4222 
4223 	/* XXX - L2ARC 1.0 does not support expansion */
4224 	if (!vd->vdev_aux) {
4225 		for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
4226 			pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
4227 			    spa->spa_autoexpand);
4228 		vd->vdev_expansion_time = gethrestime_sec();
4229 	}
4230 
4231 	vdev_reopen(tvd);
4232 	vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
4233 
4234 	if (!vd->vdev_aux) {
4235 		for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
4236 			pvd->vdev_expanding = B_FALSE;
4237 	}
4238 
4239 	if (newstate)
4240 		*newstate = vd->vdev_state;
4241 	if ((flags & ZFS_ONLINE_UNSPARE) &&
4242 	    !vdev_is_dead(vd) && vd->vdev_parent &&
4243 	    vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4244 	    vd->vdev_parent->vdev_child[0] == vd)
4245 		vd->vdev_unspare = B_TRUE;
4246 
4247 	if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
4248 
4249 		/* XXX - L2ARC 1.0 does not support expansion */
4250 		if (vd->vdev_aux)
4251 			return (spa_vdev_state_exit(spa, vd, ENOTSUP));
4252 		spa->spa_ccw_fail_time = 0;
4253 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
4254 	}
4255 
4256 	/* Restart initializing if necessary */
4257 	mutex_enter(&vd->vdev_initialize_lock);
4258 	if (vdev_writeable(vd) &&
4259 	    vd->vdev_initialize_thread == NULL &&
4260 	    vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
4261 		(void) vdev_initialize(vd);
4262 	}
4263 	mutex_exit(&vd->vdev_initialize_lock);
4264 
4265 	/*
4266 	 * Restart trimming if necessary. We do not restart trimming for cache
4267 	 * devices here. This is triggered by l2arc_rebuild_vdev()
4268 	 * asynchronously for the whole device or in l2arc_evict() as it evicts
4269 	 * space for upcoming writes.
4270 	 */
4271 	mutex_enter(&vd->vdev_trim_lock);
4272 	if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
4273 	    vd->vdev_trim_thread == NULL &&
4274 	    vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
4275 		(void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
4276 		    vd->vdev_trim_secure);
4277 	}
4278 	mutex_exit(&vd->vdev_trim_lock);
4279 
4280 	if (wasoffline ||
4281 	    (oldstate < VDEV_STATE_DEGRADED &&
4282 	    vd->vdev_state >= VDEV_STATE_DEGRADED)) {
4283 		spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
4284 
4285 		/*
4286 		 * Asynchronously detach spare vdev if resilver or
4287 		 * rebuild is not required
4288 		 */
4289 		if (vd->vdev_unspare &&
4290 		    !dsl_scan_resilvering(spa->spa_dsl_pool) &&
4291 		    !dsl_scan_resilver_scheduled(spa->spa_dsl_pool) &&
4292 		    !vdev_rebuild_active(tvd))
4293 			spa_async_request(spa, SPA_ASYNC_DETACH_SPARE);
4294 	}
4295 	return (spa_vdev_state_exit(spa, vd, 0));
4296 }
4297 
4298 static int
vdev_offline_locked(spa_t * spa,uint64_t guid,uint64_t flags)4299 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
4300 {
4301 	vdev_t *vd, *tvd;
4302 	int error = 0;
4303 	uint64_t generation;
4304 	metaslab_group_t *mg;
4305 
4306 top:
4307 	spa_vdev_state_enter(spa, SCL_ALLOC);
4308 
4309 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4310 		return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4311 
4312 	if (!vd->vdev_ops->vdev_op_leaf)
4313 		return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4314 
4315 	if (vd->vdev_ops == &vdev_draid_spare_ops)
4316 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
4317 
4318 	tvd = vd->vdev_top;
4319 	mg = tvd->vdev_mg;
4320 	generation = spa->spa_config_generation + 1;
4321 
4322 	/*
4323 	 * If the device isn't already offline, try to offline it.
4324 	 */
4325 	if (!vd->vdev_offline) {
4326 		/*
4327 		 * If this device has the only valid copy of some data,
4328 		 * don't allow it to be offlined. Log devices are always
4329 		 * expendable.
4330 		 */
4331 		if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
4332 		    vdev_dtl_required(vd))
4333 			return (spa_vdev_state_exit(spa, NULL,
4334 			    SET_ERROR(EBUSY)));
4335 
4336 		/*
4337 		 * If the top-level is a slog and it has had allocations
4338 		 * then proceed.  We check that the vdev's metaslab group
4339 		 * is not NULL since it's possible that we may have just
4340 		 * added this vdev but not yet initialized its metaslabs.
4341 		 */
4342 		if (tvd->vdev_islog && mg != NULL) {
4343 			/*
4344 			 * Prevent any future allocations.
4345 			 */
4346 			ASSERT3P(tvd->vdev_log_mg, ==, NULL);
4347 			metaslab_group_passivate(mg);
4348 			(void) spa_vdev_state_exit(spa, vd, 0);
4349 
4350 			error = spa_reset_logs(spa);
4351 
4352 			/*
4353 			 * If the log device was successfully reset but has
4354 			 * checkpointed data, do not offline it.
4355 			 */
4356 			if (error == 0 &&
4357 			    tvd->vdev_checkpoint_sm != NULL) {
4358 				ASSERT3U(space_map_allocated(
4359 				    tvd->vdev_checkpoint_sm), !=, 0);
4360 				error = ZFS_ERR_CHECKPOINT_EXISTS;
4361 			}
4362 
4363 			spa_vdev_state_enter(spa, SCL_ALLOC);
4364 
4365 			/*
4366 			 * Check to see if the config has changed.
4367 			 */
4368 			if (error || generation != spa->spa_config_generation) {
4369 				metaslab_group_activate(mg);
4370 				if (error)
4371 					return (spa_vdev_state_exit(spa,
4372 					    vd, error));
4373 				(void) spa_vdev_state_exit(spa, vd, 0);
4374 				goto top;
4375 			}
4376 			ASSERT0(tvd->vdev_stat.vs_alloc);
4377 		}
4378 
4379 		/*
4380 		 * Offline this device and reopen its top-level vdev.
4381 		 * If the top-level vdev is a log device then just offline
4382 		 * it. Otherwise, if this action results in the top-level
4383 		 * vdev becoming unusable, undo it and fail the request.
4384 		 */
4385 		vd->vdev_offline = B_TRUE;
4386 		vdev_reopen(tvd);
4387 
4388 		if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
4389 		    vdev_is_dead(tvd)) {
4390 			vd->vdev_offline = B_FALSE;
4391 			vdev_reopen(tvd);
4392 			return (spa_vdev_state_exit(spa, NULL,
4393 			    SET_ERROR(EBUSY)));
4394 		}
4395 
4396 		/*
4397 		 * Add the device back into the metaslab rotor so that
4398 		 * once we online the device it's open for business.
4399 		 */
4400 		if (tvd->vdev_islog && mg != NULL)
4401 			metaslab_group_activate(mg);
4402 	}
4403 
4404 	vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
4405 
4406 	return (spa_vdev_state_exit(spa, vd, 0));
4407 }
4408 
4409 int
vdev_offline(spa_t * spa,uint64_t guid,uint64_t flags)4410 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
4411 {
4412 	int error;
4413 
4414 	mutex_enter(&spa->spa_vdev_top_lock);
4415 	error = vdev_offline_locked(spa, guid, flags);
4416 	mutex_exit(&spa->spa_vdev_top_lock);
4417 
4418 	return (error);
4419 }
4420 
4421 /*
4422  * Clear the error counts associated with this vdev.  Unlike vdev_online() and
4423  * vdev_offline(), we assume the spa config is locked.  We also clear all
4424  * children.  If 'vd' is NULL, then the user wants to clear all vdevs.
4425  */
4426 void
vdev_clear(spa_t * spa,vdev_t * vd)4427 vdev_clear(spa_t *spa, vdev_t *vd)
4428 {
4429 	vdev_t *rvd = spa->spa_root_vdev;
4430 
4431 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
4432 
4433 	if (vd == NULL)
4434 		vd = rvd;
4435 
4436 	vd->vdev_stat.vs_read_errors = 0;
4437 	vd->vdev_stat.vs_write_errors = 0;
4438 	vd->vdev_stat.vs_checksum_errors = 0;
4439 	vd->vdev_stat.vs_slow_ios = 0;
4440 
4441 	for (int c = 0; c < vd->vdev_children; c++)
4442 		vdev_clear(spa, vd->vdev_child[c]);
4443 
4444 	/*
4445 	 * It makes no sense to "clear" an indirect  or removed vdev.
4446 	 */
4447 	if (!vdev_is_concrete(vd) || vd->vdev_removed)
4448 		return;
4449 
4450 	/*
4451 	 * If we're in the FAULTED state or have experienced failed I/O, then
4452 	 * clear the persistent state and attempt to reopen the device.  We
4453 	 * also mark the vdev config dirty, so that the new faulted state is
4454 	 * written out to disk.
4455 	 */
4456 	if (vd->vdev_faulted || vd->vdev_degraded ||
4457 	    !vdev_readable(vd) || !vdev_writeable(vd)) {
4458 		/*
4459 		 * When reopening in response to a clear event, it may be due to
4460 		 * a fmadm repair request.  In this case, if the device is
4461 		 * still broken, we want to still post the ereport again.
4462 		 */
4463 		vd->vdev_forcefault = B_TRUE;
4464 
4465 		vd->vdev_faulted = vd->vdev_degraded = 0ULL;
4466 		vd->vdev_cant_read = B_FALSE;
4467 		vd->vdev_cant_write = B_FALSE;
4468 		vd->vdev_stat.vs_aux = 0;
4469 
4470 		vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
4471 
4472 		vd->vdev_forcefault = B_FALSE;
4473 
4474 		if (vd != rvd && vdev_writeable(vd->vdev_top))
4475 			vdev_state_dirty(vd->vdev_top);
4476 
4477 		/* If a resilver isn't required, check if vdevs can be culled */
4478 		if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
4479 		    !dsl_scan_resilvering(spa->spa_dsl_pool) &&
4480 		    !dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
4481 			spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
4482 
4483 		spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
4484 	}
4485 
4486 	/*
4487 	 * When clearing a FMA-diagnosed fault, we always want to
4488 	 * unspare the device, as we assume that the original spare was
4489 	 * done in response to the FMA fault.
4490 	 */
4491 	if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
4492 	    vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4493 	    vd->vdev_parent->vdev_child[0] == vd)
4494 		vd->vdev_unspare = B_TRUE;
4495 
4496 	/* Clear recent error events cache (i.e. duplicate events tracking) */
4497 	zfs_ereport_clear(spa, vd);
4498 }
4499 
4500 boolean_t
vdev_is_dead(vdev_t * vd)4501 vdev_is_dead(vdev_t *vd)
4502 {
4503 	/*
4504 	 * Holes and missing devices are always considered "dead".
4505 	 * This simplifies the code since we don't have to check for
4506 	 * these types of devices in the various code paths.
4507 	 * Instead we rely on the fact that we skip over dead devices
4508 	 * before issuing I/O to them.
4509 	 */
4510 	return (vd->vdev_state < VDEV_STATE_DEGRADED ||
4511 	    vd->vdev_ops == &vdev_hole_ops ||
4512 	    vd->vdev_ops == &vdev_missing_ops);
4513 }
4514 
4515 boolean_t
vdev_readable(vdev_t * vd)4516 vdev_readable(vdev_t *vd)
4517 {
4518 	return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
4519 }
4520 
4521 boolean_t
vdev_writeable(vdev_t * vd)4522 vdev_writeable(vdev_t *vd)
4523 {
4524 	return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
4525 	    vdev_is_concrete(vd));
4526 }
4527 
4528 boolean_t
vdev_allocatable(vdev_t * vd)4529 vdev_allocatable(vdev_t *vd)
4530 {
4531 	uint64_t state = vd->vdev_state;
4532 
4533 	/*
4534 	 * We currently allow allocations from vdevs which may be in the
4535 	 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
4536 	 * fails to reopen then we'll catch it later when we're holding
4537 	 * the proper locks.  Note that we have to get the vdev state
4538 	 * in a local variable because although it changes atomically,
4539 	 * we're asking two separate questions about it.
4540 	 */
4541 	return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
4542 	    !vd->vdev_cant_write && vdev_is_concrete(vd) &&
4543 	    vd->vdev_mg->mg_initialized);
4544 }
4545 
4546 boolean_t
vdev_accessible(vdev_t * vd,zio_t * zio)4547 vdev_accessible(vdev_t *vd, zio_t *zio)
4548 {
4549 	ASSERT(zio->io_vd == vd);
4550 
4551 	if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
4552 		return (B_FALSE);
4553 
4554 	if (zio->io_type == ZIO_TYPE_READ)
4555 		return (!vd->vdev_cant_read);
4556 
4557 	if (zio->io_type == ZIO_TYPE_WRITE)
4558 		return (!vd->vdev_cant_write);
4559 
4560 	return (B_TRUE);
4561 }
4562 
4563 static void
vdev_get_child_stat(vdev_t * cvd,vdev_stat_t * vs,vdev_stat_t * cvs)4564 vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
4565 {
4566 	/*
4567 	 * Exclude the dRAID spare when aggregating to avoid double counting
4568 	 * the ops and bytes.  These IOs are counted by the physical leaves.
4569 	 */
4570 	if (cvd->vdev_ops == &vdev_draid_spare_ops)
4571 		return;
4572 
4573 	for (int t = 0; t < VS_ZIO_TYPES; t++) {
4574 		vs->vs_ops[t] += cvs->vs_ops[t];
4575 		vs->vs_bytes[t] += cvs->vs_bytes[t];
4576 	}
4577 
4578 	cvs->vs_scan_removing = cvd->vdev_removing;
4579 }
4580 
4581 /*
4582  * Get extended stats
4583  */
4584 static void
vdev_get_child_stat_ex(vdev_t * cvd,vdev_stat_ex_t * vsx,vdev_stat_ex_t * cvsx)4585 vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
4586 {
4587 	(void) cvd;
4588 
4589 	int t, b;
4590 	for (t = 0; t < ZIO_TYPES; t++) {
4591 		for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
4592 			vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
4593 
4594 		for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
4595 			vsx->vsx_total_histo[t][b] +=
4596 			    cvsx->vsx_total_histo[t][b];
4597 		}
4598 	}
4599 
4600 	for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
4601 		for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
4602 			vsx->vsx_queue_histo[t][b] +=
4603 			    cvsx->vsx_queue_histo[t][b];
4604 		}
4605 		vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
4606 		vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
4607 
4608 		for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
4609 			vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
4610 
4611 		for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
4612 			vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
4613 	}
4614 
4615 }
4616 
4617 boolean_t
vdev_is_spacemap_addressable(vdev_t * vd)4618 vdev_is_spacemap_addressable(vdev_t *vd)
4619 {
4620 	if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
4621 		return (B_TRUE);
4622 
4623 	/*
4624 	 * If double-word space map entries are not enabled we assume
4625 	 * 47 bits of the space map entry are dedicated to the entry's
4626 	 * offset (see SM_OFFSET_BITS in space_map.h). We then use that
4627 	 * to calculate the maximum address that can be described by a
4628 	 * space map entry for the given device.
4629 	 */
4630 	uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
4631 
4632 	if (shift >= 63) /* detect potential overflow */
4633 		return (B_TRUE);
4634 
4635 	return (vd->vdev_asize < (1ULL << shift));
4636 }
4637 
4638 /*
4639  * Get statistics for the given vdev.
4640  */
4641 static void
vdev_get_stats_ex_impl(vdev_t * vd,vdev_stat_t * vs,vdev_stat_ex_t * vsx)4642 vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
4643 {
4644 	int t;
4645 	/*
4646 	 * If we're getting stats on the root vdev, aggregate the I/O counts
4647 	 * over all top-level vdevs (i.e. the direct children of the root).
4648 	 */
4649 	if (!vd->vdev_ops->vdev_op_leaf) {
4650 		if (vs) {
4651 			memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
4652 			memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
4653 		}
4654 		if (vsx)
4655 			memset(vsx, 0, sizeof (*vsx));
4656 
4657 		for (int c = 0; c < vd->vdev_children; c++) {
4658 			vdev_t *cvd = vd->vdev_child[c];
4659 			vdev_stat_t *cvs = &cvd->vdev_stat;
4660 			vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
4661 
4662 			vdev_get_stats_ex_impl(cvd, cvs, cvsx);
4663 			if (vs)
4664 				vdev_get_child_stat(cvd, vs, cvs);
4665 			if (vsx)
4666 				vdev_get_child_stat_ex(cvd, vsx, cvsx);
4667 		}
4668 	} else {
4669 		/*
4670 		 * We're a leaf.  Just copy our ZIO active queue stats in.  The
4671 		 * other leaf stats are updated in vdev_stat_update().
4672 		 */
4673 		if (!vsx)
4674 			return;
4675 
4676 		memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
4677 
4678 		for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
4679 			vsx->vsx_active_queue[t] = vd->vdev_queue.vq_cactive[t];
4680 			vsx->vsx_pend_queue[t] = vdev_queue_class_length(vd, t);
4681 		}
4682 	}
4683 }
4684 
4685 void
vdev_get_stats_ex(vdev_t * vd,vdev_stat_t * vs,vdev_stat_ex_t * vsx)4686 vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
4687 {
4688 	vdev_t *tvd = vd->vdev_top;
4689 	mutex_enter(&vd->vdev_stat_lock);
4690 	if (vs) {
4691 		memcpy(vs, &vd->vdev_stat, sizeof (*vs));
4692 		vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
4693 		vs->vs_state = vd->vdev_state;
4694 		vs->vs_rsize = vdev_get_min_asize(vd);
4695 
4696 		if (vd->vdev_ops->vdev_op_leaf) {
4697 			vs->vs_pspace = vd->vdev_psize;
4698 			vs->vs_rsize += VDEV_LABEL_START_SIZE +
4699 			    VDEV_LABEL_END_SIZE;
4700 			/*
4701 			 * Report initializing progress. Since we don't
4702 			 * have the initializing locks held, this is only
4703 			 * an estimate (although a fairly accurate one).
4704 			 */
4705 			vs->vs_initialize_bytes_done =
4706 			    vd->vdev_initialize_bytes_done;
4707 			vs->vs_initialize_bytes_est =
4708 			    vd->vdev_initialize_bytes_est;
4709 			vs->vs_initialize_state = vd->vdev_initialize_state;
4710 			vs->vs_initialize_action_time =
4711 			    vd->vdev_initialize_action_time;
4712 
4713 			/*
4714 			 * Report manual TRIM progress. Since we don't have
4715 			 * the manual TRIM locks held, this is only an
4716 			 * estimate (although fairly accurate one).
4717 			 */
4718 			vs->vs_trim_notsup = !vd->vdev_has_trim;
4719 			vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
4720 			vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
4721 			vs->vs_trim_state = vd->vdev_trim_state;
4722 			vs->vs_trim_action_time = vd->vdev_trim_action_time;
4723 
4724 			/* Set when there is a deferred resilver. */
4725 			vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
4726 		}
4727 
4728 		/*
4729 		 * Report expandable space on top-level, non-auxiliary devices
4730 		 * only. The expandable space is reported in terms of metaslab
4731 		 * sized units since that determines how much space the pool
4732 		 * can expand.
4733 		 */
4734 		if (vd->vdev_aux == NULL && tvd != NULL) {
4735 			vs->vs_esize = P2ALIGN_TYPED(
4736 			    vd->vdev_max_asize - vd->vdev_asize,
4737 			    1ULL << tvd->vdev_ms_shift, uint64_t);
4738 		}
4739 
4740 		vs->vs_configured_ashift = vd->vdev_top != NULL
4741 		    ? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
4742 		vs->vs_logical_ashift = vd->vdev_logical_ashift;
4743 		if (vd->vdev_physical_ashift <= ASHIFT_MAX)
4744 			vs->vs_physical_ashift = vd->vdev_physical_ashift;
4745 		else
4746 			vs->vs_physical_ashift = 0;
4747 
4748 		/*
4749 		 * Report fragmentation and rebuild progress for top-level,
4750 		 * non-auxiliary, concrete devices.
4751 		 */
4752 		if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
4753 		    vdev_is_concrete(vd)) {
4754 			/*
4755 			 * The vdev fragmentation rating doesn't take into
4756 			 * account the embedded slog metaslab (vdev_log_mg).
4757 			 * Since it's only one metaslab, it would have a tiny
4758 			 * impact on the overall fragmentation.
4759 			 */
4760 			vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
4761 			    vd->vdev_mg->mg_fragmentation : 0;
4762 		}
4763 		vs->vs_noalloc = MAX(vd->vdev_noalloc,
4764 		    tvd ? tvd->vdev_noalloc : 0);
4765 	}
4766 
4767 	vdev_get_stats_ex_impl(vd, vs, vsx);
4768 	mutex_exit(&vd->vdev_stat_lock);
4769 }
4770 
4771 void
vdev_get_stats(vdev_t * vd,vdev_stat_t * vs)4772 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
4773 {
4774 	return (vdev_get_stats_ex(vd, vs, NULL));
4775 }
4776 
4777 void
vdev_clear_stats(vdev_t * vd)4778 vdev_clear_stats(vdev_t *vd)
4779 {
4780 	mutex_enter(&vd->vdev_stat_lock);
4781 	vd->vdev_stat.vs_space = 0;
4782 	vd->vdev_stat.vs_dspace = 0;
4783 	vd->vdev_stat.vs_alloc = 0;
4784 	mutex_exit(&vd->vdev_stat_lock);
4785 }
4786 
4787 void
vdev_scan_stat_init(vdev_t * vd)4788 vdev_scan_stat_init(vdev_t *vd)
4789 {
4790 	vdev_stat_t *vs = &vd->vdev_stat;
4791 
4792 	for (int c = 0; c < vd->vdev_children; c++)
4793 		vdev_scan_stat_init(vd->vdev_child[c]);
4794 
4795 	mutex_enter(&vd->vdev_stat_lock);
4796 	vs->vs_scan_processed = 0;
4797 	mutex_exit(&vd->vdev_stat_lock);
4798 }
4799 
4800 void
vdev_stat_update(zio_t * zio,uint64_t psize)4801 vdev_stat_update(zio_t *zio, uint64_t psize)
4802 {
4803 	spa_t *spa = zio->io_spa;
4804 	vdev_t *rvd = spa->spa_root_vdev;
4805 	vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
4806 	vdev_t *pvd;
4807 	uint64_t txg = zio->io_txg;
4808 /* Suppress ASAN false positive */
4809 #ifdef __SANITIZE_ADDRESS__
4810 	vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL;
4811 	vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL;
4812 #else
4813 	vdev_stat_t *vs = &vd->vdev_stat;
4814 	vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
4815 #endif
4816 	zio_type_t type = zio->io_type;
4817 	int flags = zio->io_flags;
4818 
4819 	/*
4820 	 * If this i/o is a gang leader, it didn't do any actual work.
4821 	 */
4822 	if (zio->io_gang_tree)
4823 		return;
4824 
4825 	if (zio->io_error == 0) {
4826 		/*
4827 		 * If this is a root i/o, don't count it -- we've already
4828 		 * counted the top-level vdevs, and vdev_get_stats() will
4829 		 * aggregate them when asked.  This reduces contention on
4830 		 * the root vdev_stat_lock and implicitly handles blocks
4831 		 * that compress away to holes, for which there is no i/o.
4832 		 * (Holes never create vdev children, so all the counters
4833 		 * remain zero, which is what we want.)
4834 		 *
4835 		 * Note: this only applies to successful i/o (io_error == 0)
4836 		 * because unlike i/o counts, errors are not additive.
4837 		 * When reading a ditto block, for example, failure of
4838 		 * one top-level vdev does not imply a root-level error.
4839 		 */
4840 		if (vd == rvd)
4841 			return;
4842 
4843 		ASSERT(vd == zio->io_vd);
4844 
4845 		if (flags & ZIO_FLAG_IO_BYPASS)
4846 			return;
4847 
4848 		mutex_enter(&vd->vdev_stat_lock);
4849 
4850 		if (flags & ZIO_FLAG_IO_REPAIR) {
4851 			/*
4852 			 * Repair is the result of a resilver issued by the
4853 			 * scan thread (spa_sync).
4854 			 */
4855 			if (flags & ZIO_FLAG_SCAN_THREAD) {
4856 				dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
4857 				dsl_scan_phys_t *scn_phys = &scn->scn_phys;
4858 				uint64_t *processed = &scn_phys->scn_processed;
4859 
4860 				if (vd->vdev_ops->vdev_op_leaf)
4861 					atomic_add_64(processed, psize);
4862 				vs->vs_scan_processed += psize;
4863 			}
4864 
4865 			/*
4866 			 * Repair is the result of a rebuild issued by the
4867 			 * rebuild thread (vdev_rebuild_thread).  To avoid
4868 			 * double counting repaired bytes the virtual dRAID
4869 			 * spare vdev is excluded from the processed bytes.
4870 			 */
4871 			if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
4872 				vdev_t *tvd = vd->vdev_top;
4873 				vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
4874 				vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
4875 				uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
4876 
4877 				if (vd->vdev_ops->vdev_op_leaf &&
4878 				    vd->vdev_ops != &vdev_draid_spare_ops) {
4879 					atomic_add_64(rebuilt, psize);
4880 				}
4881 				vs->vs_rebuild_processed += psize;
4882 			}
4883 
4884 			if (flags & ZIO_FLAG_SELF_HEAL)
4885 				vs->vs_self_healed += psize;
4886 		}
4887 
4888 		/*
4889 		 * The bytes/ops/histograms are recorded at the leaf level and
4890 		 * aggregated into the higher level vdevs in vdev_get_stats().
4891 		 */
4892 		if (vd->vdev_ops->vdev_op_leaf &&
4893 		    (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
4894 			zio_type_t vs_type = type;
4895 			zio_priority_t priority = zio->io_priority;
4896 
4897 			/*
4898 			 * TRIM ops and bytes are reported to user space as
4899 			 * ZIO_TYPE_IOCTL.  This is done to preserve the
4900 			 * vdev_stat_t structure layout for user space.
4901 			 */
4902 			if (type == ZIO_TYPE_TRIM)
4903 				vs_type = ZIO_TYPE_IOCTL;
4904 
4905 			/*
4906 			 * Solely for the purposes of 'zpool iostat -lqrw'
4907 			 * reporting use the priority to categorize the IO.
4908 			 * Only the following are reported to user space:
4909 			 *
4910 			 *   ZIO_PRIORITY_SYNC_READ,
4911 			 *   ZIO_PRIORITY_SYNC_WRITE,
4912 			 *   ZIO_PRIORITY_ASYNC_READ,
4913 			 *   ZIO_PRIORITY_ASYNC_WRITE,
4914 			 *   ZIO_PRIORITY_SCRUB,
4915 			 *   ZIO_PRIORITY_TRIM,
4916 			 *   ZIO_PRIORITY_REBUILD.
4917 			 */
4918 			if (priority == ZIO_PRIORITY_INITIALIZING) {
4919 				ASSERT3U(type, ==, ZIO_TYPE_WRITE);
4920 				priority = ZIO_PRIORITY_ASYNC_WRITE;
4921 			} else if (priority == ZIO_PRIORITY_REMOVAL) {
4922 				priority = ((type == ZIO_TYPE_WRITE) ?
4923 				    ZIO_PRIORITY_ASYNC_WRITE :
4924 				    ZIO_PRIORITY_ASYNC_READ);
4925 			}
4926 
4927 			vs->vs_ops[vs_type]++;
4928 			vs->vs_bytes[vs_type] += psize;
4929 
4930 			if (flags & ZIO_FLAG_DELEGATED) {
4931 				vsx->vsx_agg_histo[priority]
4932 				    [RQ_HISTO(zio->io_size)]++;
4933 			} else {
4934 				vsx->vsx_ind_histo[priority]
4935 				    [RQ_HISTO(zio->io_size)]++;
4936 			}
4937 
4938 			if (zio->io_delta && zio->io_delay) {
4939 				vsx->vsx_queue_histo[priority]
4940 				    [L_HISTO(zio->io_delta - zio->io_delay)]++;
4941 				vsx->vsx_disk_histo[type]
4942 				    [L_HISTO(zio->io_delay)]++;
4943 				vsx->vsx_total_histo[type]
4944 				    [L_HISTO(zio->io_delta)]++;
4945 			}
4946 		}
4947 
4948 		mutex_exit(&vd->vdev_stat_lock);
4949 		return;
4950 	}
4951 
4952 	if (flags & ZIO_FLAG_SPECULATIVE)
4953 		return;
4954 
4955 	/*
4956 	 * If this is an I/O error that is going to be retried, then ignore the
4957 	 * error.  Otherwise, the user may interpret B_FAILFAST I/O errors as
4958 	 * hard errors, when in reality they can happen for any number of
4959 	 * innocuous reasons (bus resets, MPxIO link failure, etc).
4960 	 */
4961 	if (zio->io_error == EIO &&
4962 	    !(zio->io_flags & ZIO_FLAG_IO_RETRY))
4963 		return;
4964 
4965 	/*
4966 	 * Intent logs writes won't propagate their error to the root
4967 	 * I/O so don't mark these types of failures as pool-level
4968 	 * errors.
4969 	 */
4970 	if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
4971 		return;
4972 
4973 	if (type == ZIO_TYPE_WRITE && txg != 0 &&
4974 	    (!(flags & ZIO_FLAG_IO_REPAIR) ||
4975 	    (flags & ZIO_FLAG_SCAN_THREAD) ||
4976 	    spa->spa_claiming)) {
4977 		/*
4978 		 * This is either a normal write (not a repair), or it's
4979 		 * a repair induced by the scrub thread, or it's a repair
4980 		 * made by zil_claim() during spa_load() in the first txg.
4981 		 * In the normal case, we commit the DTL change in the same
4982 		 * txg as the block was born.  In the scrub-induced repair
4983 		 * case, we know that scrubs run in first-pass syncing context,
4984 		 * so we commit the DTL change in spa_syncing_txg(spa).
4985 		 * In the zil_claim() case, we commit in spa_first_txg(spa).
4986 		 *
4987 		 * We currently do not make DTL entries for failed spontaneous
4988 		 * self-healing writes triggered by normal (non-scrubbing)
4989 		 * reads, because we have no transactional context in which to
4990 		 * do so -- and it's not clear that it'd be desirable anyway.
4991 		 */
4992 		if (vd->vdev_ops->vdev_op_leaf) {
4993 			uint64_t commit_txg = txg;
4994 			if (flags & ZIO_FLAG_SCAN_THREAD) {
4995 				ASSERT(flags & ZIO_FLAG_IO_REPAIR);
4996 				ASSERT(spa_sync_pass(spa) == 1);
4997 				vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
4998 				commit_txg = spa_syncing_txg(spa);
4999 			} else if (spa->spa_claiming) {
5000 				ASSERT(flags & ZIO_FLAG_IO_REPAIR);
5001 				commit_txg = spa_first_txg(spa);
5002 			}
5003 			ASSERT(commit_txg >= spa_syncing_txg(spa));
5004 			if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
5005 				return;
5006 			for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
5007 				vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
5008 			vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
5009 		}
5010 		if (vd != rvd)
5011 			vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
5012 	}
5013 }
5014 
5015 int64_t
vdev_deflated_space(vdev_t * vd,int64_t space)5016 vdev_deflated_space(vdev_t *vd, int64_t space)
5017 {
5018 	ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
5019 	ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
5020 
5021 	return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
5022 }
5023 
5024 /*
5025  * Update the in-core space usage stats for this vdev, its metaslab class,
5026  * and the root vdev.
5027  */
5028 void
vdev_space_update(vdev_t * vd,int64_t alloc_delta,int64_t defer_delta,int64_t space_delta)5029 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
5030     int64_t space_delta)
5031 {
5032 	(void) defer_delta;
5033 	int64_t dspace_delta;
5034 	spa_t *spa = vd->vdev_spa;
5035 	vdev_t *rvd = spa->spa_root_vdev;
5036 
5037 	ASSERT(vd == vd->vdev_top);
5038 
5039 	/*
5040 	 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
5041 	 * factor.  We must calculate this here and not at the root vdev
5042 	 * because the root vdev's psize-to-asize is simply the max of its
5043 	 * children's, thus not accurate enough for us.
5044 	 */
5045 	dspace_delta = vdev_deflated_space(vd, space_delta);
5046 
5047 	mutex_enter(&vd->vdev_stat_lock);
5048 	/* ensure we won't underflow */
5049 	if (alloc_delta < 0) {
5050 		ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
5051 	}
5052 
5053 	vd->vdev_stat.vs_alloc += alloc_delta;
5054 	vd->vdev_stat.vs_space += space_delta;
5055 	vd->vdev_stat.vs_dspace += dspace_delta;
5056 	mutex_exit(&vd->vdev_stat_lock);
5057 
5058 	/* every class but log contributes to root space stats */
5059 	if (vd->vdev_mg != NULL && !vd->vdev_islog) {
5060 		ASSERT(!vd->vdev_isl2cache);
5061 		mutex_enter(&rvd->vdev_stat_lock);
5062 		rvd->vdev_stat.vs_alloc += alloc_delta;
5063 		rvd->vdev_stat.vs_space += space_delta;
5064 		rvd->vdev_stat.vs_dspace += dspace_delta;
5065 		mutex_exit(&rvd->vdev_stat_lock);
5066 	}
5067 	/* Note: metaslab_class_space_update moved to metaslab_space_update */
5068 }
5069 
5070 /*
5071  * Mark a top-level vdev's config as dirty, placing it on the dirty list
5072  * so that it will be written out next time the vdev configuration is synced.
5073  * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
5074  */
5075 void
vdev_config_dirty(vdev_t * vd)5076 vdev_config_dirty(vdev_t *vd)
5077 {
5078 	spa_t *spa = vd->vdev_spa;
5079 	vdev_t *rvd = spa->spa_root_vdev;
5080 	int c;
5081 
5082 	ASSERT(spa_writeable(spa));
5083 
5084 	/*
5085 	 * If this is an aux vdev (as with l2cache and spare devices), then we
5086 	 * update the vdev config manually and set the sync flag.
5087 	 */
5088 	if (vd->vdev_aux != NULL) {
5089 		spa_aux_vdev_t *sav = vd->vdev_aux;
5090 		nvlist_t **aux;
5091 		uint_t naux;
5092 
5093 		for (c = 0; c < sav->sav_count; c++) {
5094 			if (sav->sav_vdevs[c] == vd)
5095 				break;
5096 		}
5097 
5098 		if (c == sav->sav_count) {
5099 			/*
5100 			 * We're being removed.  There's nothing more to do.
5101 			 */
5102 			ASSERT(sav->sav_sync == B_TRUE);
5103 			return;
5104 		}
5105 
5106 		sav->sav_sync = B_TRUE;
5107 
5108 		if (nvlist_lookup_nvlist_array(sav->sav_config,
5109 		    ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
5110 			VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
5111 			    ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
5112 		}
5113 
5114 		ASSERT(c < naux);
5115 
5116 		/*
5117 		 * Setting the nvlist in the middle if the array is a little
5118 		 * sketchy, but it will work.
5119 		 */
5120 		nvlist_free(aux[c]);
5121 		aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
5122 
5123 		return;
5124 	}
5125 
5126 	/*
5127 	 * The dirty list is protected by the SCL_CONFIG lock.  The caller
5128 	 * must either hold SCL_CONFIG as writer, or must be the sync thread
5129 	 * (which holds SCL_CONFIG as reader).  There's only one sync thread,
5130 	 * so this is sufficient to ensure mutual exclusion.
5131 	 */
5132 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
5133 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5134 	    spa_config_held(spa, SCL_CONFIG, RW_READER)));
5135 
5136 	if (vd == rvd) {
5137 		for (c = 0; c < rvd->vdev_children; c++)
5138 			vdev_config_dirty(rvd->vdev_child[c]);
5139 	} else {
5140 		ASSERT(vd == vd->vdev_top);
5141 
5142 		if (!list_link_active(&vd->vdev_config_dirty_node) &&
5143 		    vdev_is_concrete(vd)) {
5144 			list_insert_head(&spa->spa_config_dirty_list, vd);
5145 		}
5146 	}
5147 }
5148 
5149 void
vdev_config_clean(vdev_t * vd)5150 vdev_config_clean(vdev_t *vd)
5151 {
5152 	spa_t *spa = vd->vdev_spa;
5153 
5154 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
5155 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5156 	    spa_config_held(spa, SCL_CONFIG, RW_READER)));
5157 
5158 	ASSERT(list_link_active(&vd->vdev_config_dirty_node));
5159 	list_remove(&spa->spa_config_dirty_list, vd);
5160 }
5161 
5162 /*
5163  * Mark a top-level vdev's state as dirty, so that the next pass of
5164  * spa_sync() can convert this into vdev_config_dirty().  We distinguish
5165  * the state changes from larger config changes because they require
5166  * much less locking, and are often needed for administrative actions.
5167  */
5168 void
vdev_state_dirty(vdev_t * vd)5169 vdev_state_dirty(vdev_t *vd)
5170 {
5171 	spa_t *spa = vd->vdev_spa;
5172 
5173 	ASSERT(spa_writeable(spa));
5174 	ASSERT(vd == vd->vdev_top);
5175 
5176 	/*
5177 	 * The state list is protected by the SCL_STATE lock.  The caller
5178 	 * must either hold SCL_STATE as writer, or must be the sync thread
5179 	 * (which holds SCL_STATE as reader).  There's only one sync thread,
5180 	 * so this is sufficient to ensure mutual exclusion.
5181 	 */
5182 	ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
5183 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5184 	    spa_config_held(spa, SCL_STATE, RW_READER)));
5185 
5186 	if (!list_link_active(&vd->vdev_state_dirty_node) &&
5187 	    vdev_is_concrete(vd))
5188 		list_insert_head(&spa->spa_state_dirty_list, vd);
5189 }
5190 
5191 void
vdev_state_clean(vdev_t * vd)5192 vdev_state_clean(vdev_t *vd)
5193 {
5194 	spa_t *spa = vd->vdev_spa;
5195 
5196 	ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
5197 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5198 	    spa_config_held(spa, SCL_STATE, RW_READER)));
5199 
5200 	ASSERT(list_link_active(&vd->vdev_state_dirty_node));
5201 	list_remove(&spa->spa_state_dirty_list, vd);
5202 }
5203 
5204 /*
5205  * Propagate vdev state up from children to parent.
5206  */
5207 void
vdev_propagate_state(vdev_t * vd)5208 vdev_propagate_state(vdev_t *vd)
5209 {
5210 	spa_t *spa = vd->vdev_spa;
5211 	vdev_t *rvd = spa->spa_root_vdev;
5212 	int degraded = 0, faulted = 0;
5213 	int corrupted = 0;
5214 	vdev_t *child;
5215 
5216 	if (vd->vdev_children > 0) {
5217 		for (int c = 0; c < vd->vdev_children; c++) {
5218 			child = vd->vdev_child[c];
5219 
5220 			/*
5221 			 * Don't factor holes or indirect vdevs into the
5222 			 * decision.
5223 			 */
5224 			if (!vdev_is_concrete(child))
5225 				continue;
5226 
5227 			if (!vdev_readable(child) ||
5228 			    (!vdev_writeable(child) && spa_writeable(spa))) {
5229 				/*
5230 				 * Root special: if there is a top-level log
5231 				 * device, treat the root vdev as if it were
5232 				 * degraded.
5233 				 */
5234 				if (child->vdev_islog && vd == rvd)
5235 					degraded++;
5236 				else
5237 					faulted++;
5238 			} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
5239 				degraded++;
5240 			}
5241 
5242 			if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
5243 				corrupted++;
5244 		}
5245 
5246 		vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
5247 
5248 		/*
5249 		 * Root special: if there is a top-level vdev that cannot be
5250 		 * opened due to corrupted metadata, then propagate the root
5251 		 * vdev's aux state as 'corrupt' rather than 'insufficient
5252 		 * replicas'.
5253 		 */
5254 		if (corrupted && vd == rvd &&
5255 		    rvd->vdev_state == VDEV_STATE_CANT_OPEN)
5256 			vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
5257 			    VDEV_AUX_CORRUPT_DATA);
5258 	}
5259 
5260 	if (vd->vdev_parent)
5261 		vdev_propagate_state(vd->vdev_parent);
5262 }
5263 
5264 /*
5265  * Set a vdev's state.  If this is during an open, we don't update the parent
5266  * state, because we're in the process of opening children depth-first.
5267  * Otherwise, we propagate the change to the parent.
5268  *
5269  * If this routine places a device in a faulted state, an appropriate ereport is
5270  * generated.
5271  */
5272 void
vdev_set_state(vdev_t * vd,boolean_t isopen,vdev_state_t state,vdev_aux_t aux)5273 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
5274 {
5275 	uint64_t save_state;
5276 	spa_t *spa = vd->vdev_spa;
5277 
5278 	if (state == vd->vdev_state) {
5279 		/*
5280 		 * Since vdev_offline() code path is already in an offline
5281 		 * state we can miss a statechange event to OFFLINE. Check
5282 		 * the previous state to catch this condition.
5283 		 */
5284 		if (vd->vdev_ops->vdev_op_leaf &&
5285 		    (state == VDEV_STATE_OFFLINE) &&
5286 		    (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
5287 			/* post an offline state change */
5288 			zfs_post_state_change(spa, vd, vd->vdev_prevstate);
5289 		}
5290 		vd->vdev_stat.vs_aux = aux;
5291 		return;
5292 	}
5293 
5294 	save_state = vd->vdev_state;
5295 
5296 	vd->vdev_state = state;
5297 	vd->vdev_stat.vs_aux = aux;
5298 
5299 	/*
5300 	 * If we are setting the vdev state to anything but an open state, then
5301 	 * always close the underlying device unless the device has requested
5302 	 * a delayed close (i.e. we're about to remove or fault the device).
5303 	 * Otherwise, we keep accessible but invalid devices open forever.
5304 	 * We don't call vdev_close() itself, because that implies some extra
5305 	 * checks (offline, etc) that we don't want here.  This is limited to
5306 	 * leaf devices, because otherwise closing the device will affect other
5307 	 * children.
5308 	 */
5309 	if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
5310 	    vd->vdev_ops->vdev_op_leaf)
5311 		vd->vdev_ops->vdev_op_close(vd);
5312 
5313 	if (vd->vdev_removed &&
5314 	    state == VDEV_STATE_CANT_OPEN &&
5315 	    (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
5316 		/*
5317 		 * If the previous state is set to VDEV_STATE_REMOVED, then this
5318 		 * device was previously marked removed and someone attempted to
5319 		 * reopen it.  If this failed due to a nonexistent device, then
5320 		 * keep the device in the REMOVED state.  We also let this be if
5321 		 * it is one of our special test online cases, which is only
5322 		 * attempting to online the device and shouldn't generate an FMA
5323 		 * fault.
5324 		 */
5325 		vd->vdev_state = VDEV_STATE_REMOVED;
5326 		vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
5327 	} else if (state == VDEV_STATE_REMOVED) {
5328 		vd->vdev_removed = B_TRUE;
5329 	} else if (state == VDEV_STATE_CANT_OPEN) {
5330 		/*
5331 		 * If we fail to open a vdev during an import or recovery, we
5332 		 * mark it as "not available", which signifies that it was
5333 		 * never there to begin with.  Failure to open such a device
5334 		 * is not considered an error.
5335 		 */
5336 		if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
5337 		    spa_load_state(spa) == SPA_LOAD_RECOVER) &&
5338 		    vd->vdev_ops->vdev_op_leaf)
5339 			vd->vdev_not_present = 1;
5340 
5341 		/*
5342 		 * Post the appropriate ereport.  If the 'prevstate' field is
5343 		 * set to something other than VDEV_STATE_UNKNOWN, it indicates
5344 		 * that this is part of a vdev_reopen().  In this case, we don't
5345 		 * want to post the ereport if the device was already in the
5346 		 * CANT_OPEN state beforehand.
5347 		 *
5348 		 * If the 'checkremove' flag is set, then this is an attempt to
5349 		 * online the device in response to an insertion event.  If we
5350 		 * hit this case, then we have detected an insertion event for a
5351 		 * faulted or offline device that wasn't in the removed state.
5352 		 * In this scenario, we don't post an ereport because we are
5353 		 * about to replace the device, or attempt an online with
5354 		 * vdev_forcefault, which will generate the fault for us.
5355 		 */
5356 		if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
5357 		    !vd->vdev_not_present && !vd->vdev_checkremove &&
5358 		    vd != spa->spa_root_vdev) {
5359 			const char *class;
5360 
5361 			switch (aux) {
5362 			case VDEV_AUX_OPEN_FAILED:
5363 				class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
5364 				break;
5365 			case VDEV_AUX_CORRUPT_DATA:
5366 				class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
5367 				break;
5368 			case VDEV_AUX_NO_REPLICAS:
5369 				class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
5370 				break;
5371 			case VDEV_AUX_BAD_GUID_SUM:
5372 				class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
5373 				break;
5374 			case VDEV_AUX_TOO_SMALL:
5375 				class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
5376 				break;
5377 			case VDEV_AUX_BAD_LABEL:
5378 				class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
5379 				break;
5380 			case VDEV_AUX_BAD_ASHIFT:
5381 				class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
5382 				break;
5383 			default:
5384 				class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
5385 			}
5386 
5387 			(void) zfs_ereport_post(class, spa, vd, NULL, NULL,
5388 			    save_state);
5389 		}
5390 
5391 		/* Erase any notion of persistent removed state */
5392 		vd->vdev_removed = B_FALSE;
5393 	} else {
5394 		vd->vdev_removed = B_FALSE;
5395 	}
5396 
5397 	/*
5398 	 * Notify ZED of any significant state-change on a leaf vdev.
5399 	 *
5400 	 */
5401 	if (vd->vdev_ops->vdev_op_leaf) {
5402 		/* preserve original state from a vdev_reopen() */
5403 		if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
5404 		    (vd->vdev_prevstate != vd->vdev_state) &&
5405 		    (save_state <= VDEV_STATE_CLOSED))
5406 			save_state = vd->vdev_prevstate;
5407 
5408 		/* filter out state change due to initial vdev_open */
5409 		if (save_state > VDEV_STATE_CLOSED)
5410 			zfs_post_state_change(spa, vd, save_state);
5411 	}
5412 
5413 	if (!isopen && vd->vdev_parent)
5414 		vdev_propagate_state(vd->vdev_parent);
5415 }
5416 
5417 boolean_t
vdev_children_are_offline(vdev_t * vd)5418 vdev_children_are_offline(vdev_t *vd)
5419 {
5420 	ASSERT(!vd->vdev_ops->vdev_op_leaf);
5421 
5422 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
5423 		if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
5424 			return (B_FALSE);
5425 	}
5426 
5427 	return (B_TRUE);
5428 }
5429 
5430 /*
5431  * Check the vdev configuration to ensure that it's capable of supporting
5432  * a root pool. We do not support partial configuration.
5433  */
5434 boolean_t
vdev_is_bootable(vdev_t * vd)5435 vdev_is_bootable(vdev_t *vd)
5436 {
5437 	if (!vd->vdev_ops->vdev_op_leaf) {
5438 		const char *vdev_type = vd->vdev_ops->vdev_op_type;
5439 
5440 		if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
5441 			return (B_FALSE);
5442 	}
5443 
5444 	for (int c = 0; c < vd->vdev_children; c++) {
5445 		if (!vdev_is_bootable(vd->vdev_child[c]))
5446 			return (B_FALSE);
5447 	}
5448 	return (B_TRUE);
5449 }
5450 
5451 boolean_t
vdev_is_concrete(vdev_t * vd)5452 vdev_is_concrete(vdev_t *vd)
5453 {
5454 	vdev_ops_t *ops = vd->vdev_ops;
5455 	if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
5456 	    ops == &vdev_missing_ops || ops == &vdev_root_ops) {
5457 		return (B_FALSE);
5458 	} else {
5459 		return (B_TRUE);
5460 	}
5461 }
5462 
5463 /*
5464  * Determine if a log device has valid content.  If the vdev was
5465  * removed or faulted in the MOS config then we know that
5466  * the content on the log device has already been written to the pool.
5467  */
5468 boolean_t
vdev_log_state_valid(vdev_t * vd)5469 vdev_log_state_valid(vdev_t *vd)
5470 {
5471 	if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
5472 	    !vd->vdev_removed)
5473 		return (B_TRUE);
5474 
5475 	for (int c = 0; c < vd->vdev_children; c++)
5476 		if (vdev_log_state_valid(vd->vdev_child[c]))
5477 			return (B_TRUE);
5478 
5479 	return (B_FALSE);
5480 }
5481 
5482 /*
5483  * Expand a vdev if possible.
5484  */
5485 void
vdev_expand(vdev_t * vd,uint64_t txg)5486 vdev_expand(vdev_t *vd, uint64_t txg)
5487 {
5488 	ASSERT(vd->vdev_top == vd);
5489 	ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5490 	ASSERT(vdev_is_concrete(vd));
5491 
5492 	vdev_set_deflate_ratio(vd);
5493 
5494 	if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
5495 	    vdev_is_concrete(vd)) {
5496 		vdev_metaslab_group_create(vd);
5497 		VERIFY(vdev_metaslab_init(vd, txg) == 0);
5498 		vdev_config_dirty(vd);
5499 	}
5500 }
5501 
5502 /*
5503  * Split a vdev.
5504  */
5505 void
vdev_split(vdev_t * vd)5506 vdev_split(vdev_t *vd)
5507 {
5508 	vdev_t *cvd, *pvd = vd->vdev_parent;
5509 
5510 	VERIFY3U(pvd->vdev_children, >, 1);
5511 
5512 	vdev_remove_child(pvd, vd);
5513 	vdev_compact_children(pvd);
5514 
5515 	ASSERT3P(pvd->vdev_child, !=, NULL);
5516 
5517 	cvd = pvd->vdev_child[0];
5518 	if (pvd->vdev_children == 1) {
5519 		vdev_remove_parent(cvd);
5520 		cvd->vdev_splitting = B_TRUE;
5521 	}
5522 	vdev_propagate_state(cvd);
5523 }
5524 
5525 void
vdev_deadman(vdev_t * vd,const char * tag)5526 vdev_deadman(vdev_t *vd, const char *tag)
5527 {
5528 	for (int c = 0; c < vd->vdev_children; c++) {
5529 		vdev_t *cvd = vd->vdev_child[c];
5530 
5531 		vdev_deadman(cvd, tag);
5532 	}
5533 
5534 	if (vd->vdev_ops->vdev_op_leaf) {
5535 		vdev_queue_t *vq = &vd->vdev_queue;
5536 
5537 		mutex_enter(&vq->vq_lock);
5538 		if (vq->vq_active > 0) {
5539 			spa_t *spa = vd->vdev_spa;
5540 			zio_t *fio;
5541 			uint64_t delta;
5542 
5543 			zfs_dbgmsg("slow vdev: %s has %u active IOs",
5544 			    vd->vdev_path, vq->vq_active);
5545 
5546 			/*
5547 			 * Look at the head of all the pending queues,
5548 			 * if any I/O has been outstanding for longer than
5549 			 * the spa_deadman_synctime invoke the deadman logic.
5550 			 */
5551 			fio = list_head(&vq->vq_active_list);
5552 			delta = gethrtime() - fio->io_timestamp;
5553 			if (delta > spa_deadman_synctime(spa))
5554 				zio_deadman(fio, tag);
5555 		}
5556 		mutex_exit(&vq->vq_lock);
5557 	}
5558 }
5559 
5560 void
vdev_defer_resilver(vdev_t * vd)5561 vdev_defer_resilver(vdev_t *vd)
5562 {
5563 	ASSERT(vd->vdev_ops->vdev_op_leaf);
5564 
5565 	vd->vdev_resilver_deferred = B_TRUE;
5566 	vd->vdev_spa->spa_resilver_deferred = B_TRUE;
5567 }
5568 
5569 /*
5570  * Clears the resilver deferred flag on all leaf devs under vd. Returns
5571  * B_TRUE if we have devices that need to be resilvered and are available to
5572  * accept resilver I/Os.
5573  */
5574 boolean_t
vdev_clear_resilver_deferred(vdev_t * vd,dmu_tx_t * tx)5575 vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
5576 {
5577 	boolean_t resilver_needed = B_FALSE;
5578 	spa_t *spa = vd->vdev_spa;
5579 
5580 	for (int c = 0; c < vd->vdev_children; c++) {
5581 		vdev_t *cvd = vd->vdev_child[c];
5582 		resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
5583 	}
5584 
5585 	if (vd == spa->spa_root_vdev &&
5586 	    spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
5587 		spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
5588 		vdev_config_dirty(vd);
5589 		spa->spa_resilver_deferred = B_FALSE;
5590 		return (resilver_needed);
5591 	}
5592 
5593 	if (!vdev_is_concrete(vd) || vd->vdev_aux ||
5594 	    !vd->vdev_ops->vdev_op_leaf)
5595 		return (resilver_needed);
5596 
5597 	vd->vdev_resilver_deferred = B_FALSE;
5598 
5599 	return (!vdev_is_dead(vd) && !vd->vdev_offline &&
5600 	    vdev_resilver_needed(vd, NULL, NULL));
5601 }
5602 
5603 boolean_t
vdev_xlate_is_empty(range_seg64_t * rs)5604 vdev_xlate_is_empty(range_seg64_t *rs)
5605 {
5606 	return (rs->rs_start == rs->rs_end);
5607 }
5608 
5609 /*
5610  * Translate a logical range to the first contiguous physical range for the
5611  * specified vdev_t.  This function is initially called with a leaf vdev and
5612  * will walk each parent vdev until it reaches a top-level vdev. Once the
5613  * top-level is reached the physical range is initialized and the recursive
5614  * function begins to unwind. As it unwinds it calls the parent's vdev
5615  * specific translation function to do the real conversion.
5616  */
5617 void
vdev_xlate(vdev_t * vd,const range_seg64_t * logical_rs,range_seg64_t * physical_rs,range_seg64_t * remain_rs)5618 vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
5619     range_seg64_t *physical_rs, range_seg64_t *remain_rs)
5620 {
5621 	/*
5622 	 * Walk up the vdev tree
5623 	 */
5624 	if (vd != vd->vdev_top) {
5625 		vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
5626 		    remain_rs);
5627 	} else {
5628 		/*
5629 		 * We've reached the top-level vdev, initialize the physical
5630 		 * range to the logical range and set an empty remaining
5631 		 * range then start to unwind.
5632 		 */
5633 		physical_rs->rs_start = logical_rs->rs_start;
5634 		physical_rs->rs_end = logical_rs->rs_end;
5635 
5636 		remain_rs->rs_start = logical_rs->rs_start;
5637 		remain_rs->rs_end = logical_rs->rs_start;
5638 
5639 		return;
5640 	}
5641 
5642 	vdev_t *pvd = vd->vdev_parent;
5643 	ASSERT3P(pvd, !=, NULL);
5644 	ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
5645 
5646 	/*
5647 	 * As this recursive function unwinds, translate the logical
5648 	 * range into its physical and any remaining components by calling
5649 	 * the vdev specific translate function.
5650 	 */
5651 	range_seg64_t intermediate = { 0 };
5652 	pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
5653 
5654 	physical_rs->rs_start = intermediate.rs_start;
5655 	physical_rs->rs_end = intermediate.rs_end;
5656 }
5657 
5658 void
vdev_xlate_walk(vdev_t * vd,const range_seg64_t * logical_rs,vdev_xlate_func_t * func,void * arg)5659 vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
5660     vdev_xlate_func_t *func, void *arg)
5661 {
5662 	range_seg64_t iter_rs = *logical_rs;
5663 	range_seg64_t physical_rs;
5664 	range_seg64_t remain_rs;
5665 
5666 	while (!vdev_xlate_is_empty(&iter_rs)) {
5667 
5668 		vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
5669 
5670 		/*
5671 		 * With raidz and dRAID, it's possible that the logical range
5672 		 * does not live on this leaf vdev. Only when there is a non-
5673 		 * zero physical size call the provided function.
5674 		 */
5675 		if (!vdev_xlate_is_empty(&physical_rs))
5676 			func(arg, &physical_rs);
5677 
5678 		iter_rs = remain_rs;
5679 	}
5680 }
5681 
5682 static char *
vdev_name(vdev_t * vd,char * buf,int buflen)5683 vdev_name(vdev_t *vd, char *buf, int buflen)
5684 {
5685 	if (vd->vdev_path == NULL) {
5686 		if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) {
5687 			strlcpy(buf, vd->vdev_spa->spa_name, buflen);
5688 		} else if (!vd->vdev_ops->vdev_op_leaf) {
5689 			snprintf(buf, buflen, "%s-%llu",
5690 			    vd->vdev_ops->vdev_op_type,
5691 			    (u_longlong_t)vd->vdev_id);
5692 		}
5693 	} else {
5694 		strlcpy(buf, vd->vdev_path, buflen);
5695 	}
5696 	return (buf);
5697 }
5698 
5699 /*
5700  * Look at the vdev tree and determine whether any devices are currently being
5701  * replaced.
5702  */
5703 boolean_t
vdev_replace_in_progress(vdev_t * vdev)5704 vdev_replace_in_progress(vdev_t *vdev)
5705 {
5706 	ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
5707 
5708 	if (vdev->vdev_ops == &vdev_replacing_ops)
5709 		return (B_TRUE);
5710 
5711 	/*
5712 	 * A 'spare' vdev indicates that we have a replace in progress, unless
5713 	 * it has exactly two children, and the second, the hot spare, has
5714 	 * finished being resilvered.
5715 	 */
5716 	if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
5717 	    !vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
5718 		return (B_TRUE);
5719 
5720 	for (int i = 0; i < vdev->vdev_children; i++) {
5721 		if (vdev_replace_in_progress(vdev->vdev_child[i]))
5722 			return (B_TRUE);
5723 	}
5724 
5725 	return (B_FALSE);
5726 }
5727 
5728 /*
5729  * Add a (source=src, propname=propval) list to an nvlist.
5730  */
5731 static void
vdev_prop_add_list(nvlist_t * nvl,const char * propname,const char * strval,uint64_t intval,zprop_source_t src)5732 vdev_prop_add_list(nvlist_t *nvl, const char *propname, const char *strval,
5733     uint64_t intval, zprop_source_t src)
5734 {
5735 	nvlist_t *propval;
5736 
5737 	propval = fnvlist_alloc();
5738 	fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
5739 
5740 	if (strval != NULL)
5741 		fnvlist_add_string(propval, ZPROP_VALUE, strval);
5742 	else
5743 		fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
5744 
5745 	fnvlist_add_nvlist(nvl, propname, propval);
5746 	nvlist_free(propval);
5747 }
5748 
5749 static void
vdev_props_set_sync(void * arg,dmu_tx_t * tx)5750 vdev_props_set_sync(void *arg, dmu_tx_t *tx)
5751 {
5752 	vdev_t *vd;
5753 	nvlist_t *nvp = arg;
5754 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
5755 	objset_t *mos = spa->spa_meta_objset;
5756 	nvpair_t *elem = NULL;
5757 	uint64_t vdev_guid;
5758 	uint64_t objid;
5759 	nvlist_t *nvprops;
5760 
5761 	vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV);
5762 	nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS);
5763 	vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE);
5764 
5765 	/* this vdev could get removed while waiting for this sync task */
5766 	if (vd == NULL)
5767 		return;
5768 
5769 	/*
5770 	 * Set vdev property values in the vdev props mos object.
5771 	 */
5772 	if (vd->vdev_root_zap != 0) {
5773 		objid = vd->vdev_root_zap;
5774 	} else if (vd->vdev_top_zap != 0) {
5775 		objid = vd->vdev_top_zap;
5776 	} else if (vd->vdev_leaf_zap != 0) {
5777 		objid = vd->vdev_leaf_zap;
5778 	} else {
5779 		panic("unexpected vdev type");
5780 	}
5781 
5782 	mutex_enter(&spa->spa_props_lock);
5783 
5784 	while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
5785 		uint64_t intval;
5786 		const char *strval;
5787 		vdev_prop_t prop;
5788 		const char *propname = nvpair_name(elem);
5789 		zprop_type_t proptype;
5790 
5791 		switch (prop = vdev_name_to_prop(propname)) {
5792 		case VDEV_PROP_USERPROP:
5793 			if (vdev_prop_user(propname)) {
5794 				strval = fnvpair_value_string(elem);
5795 				if (strlen(strval) == 0) {
5796 					/* remove the property if value == "" */
5797 					(void) zap_remove(mos, objid, propname,
5798 					    tx);
5799 				} else {
5800 					VERIFY0(zap_update(mos, objid, propname,
5801 					    1, strlen(strval) + 1, strval, tx));
5802 				}
5803 				spa_history_log_internal(spa, "vdev set", tx,
5804 				    "vdev_guid=%llu: %s=%s",
5805 				    (u_longlong_t)vdev_guid, nvpair_name(elem),
5806 				    strval);
5807 			}
5808 			break;
5809 		default:
5810 			/* normalize the property name */
5811 			propname = vdev_prop_to_name(prop);
5812 			proptype = vdev_prop_get_type(prop);
5813 
5814 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
5815 				ASSERT(proptype == PROP_TYPE_STRING);
5816 				strval = fnvpair_value_string(elem);
5817 				VERIFY0(zap_update(mos, objid, propname,
5818 				    1, strlen(strval) + 1, strval, tx));
5819 				spa_history_log_internal(spa, "vdev set", tx,
5820 				    "vdev_guid=%llu: %s=%s",
5821 				    (u_longlong_t)vdev_guid, nvpair_name(elem),
5822 				    strval);
5823 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
5824 				intval = fnvpair_value_uint64(elem);
5825 
5826 				if (proptype == PROP_TYPE_INDEX) {
5827 					const char *unused;
5828 					VERIFY0(vdev_prop_index_to_string(
5829 					    prop, intval, &unused));
5830 				}
5831 				VERIFY0(zap_update(mos, objid, propname,
5832 				    sizeof (uint64_t), 1, &intval, tx));
5833 				spa_history_log_internal(spa, "vdev set", tx,
5834 				    "vdev_guid=%llu: %s=%lld",
5835 				    (u_longlong_t)vdev_guid,
5836 				    nvpair_name(elem), (longlong_t)intval);
5837 			} else {
5838 				panic("invalid vdev property type %u",
5839 				    nvpair_type(elem));
5840 			}
5841 		}
5842 
5843 	}
5844 
5845 	mutex_exit(&spa->spa_props_lock);
5846 }
5847 
5848 int
vdev_prop_set(vdev_t * vd,nvlist_t * innvl,nvlist_t * outnvl)5849 vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
5850 {
5851 	spa_t *spa = vd->vdev_spa;
5852 	nvpair_t *elem = NULL;
5853 	uint64_t vdev_guid;
5854 	nvlist_t *nvprops;
5855 	int error = 0;
5856 
5857 	ASSERT(vd != NULL);
5858 
5859 	/* Check that vdev has a zap we can use */
5860 	if (vd->vdev_root_zap == 0 &&
5861 	    vd->vdev_top_zap == 0 &&
5862 	    vd->vdev_leaf_zap == 0)
5863 		return (SET_ERROR(EINVAL));
5864 
5865 	if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
5866 	    &vdev_guid) != 0)
5867 		return (SET_ERROR(EINVAL));
5868 
5869 	if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS,
5870 	    &nvprops) != 0)
5871 		return (SET_ERROR(EINVAL));
5872 
5873 	if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL)
5874 		return (SET_ERROR(EINVAL));
5875 
5876 	while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
5877 		const char *propname = nvpair_name(elem);
5878 		vdev_prop_t prop = vdev_name_to_prop(propname);
5879 		uint64_t intval = 0;
5880 		const char *strval = NULL;
5881 
5882 		if (prop == VDEV_PROP_USERPROP && !vdev_prop_user(propname)) {
5883 			error = EINVAL;
5884 			goto end;
5885 		}
5886 
5887 		if (vdev_prop_readonly(prop)) {
5888 			error = EROFS;
5889 			goto end;
5890 		}
5891 
5892 		/* Special Processing */
5893 		switch (prop) {
5894 		case VDEV_PROP_PATH:
5895 			if (vd->vdev_path == NULL) {
5896 				error = EROFS;
5897 				break;
5898 			}
5899 			if (nvpair_value_string(elem, &strval) != 0) {
5900 				error = EINVAL;
5901 				break;
5902 			}
5903 			/* New path must start with /dev/ */
5904 			if (strncmp(strval, "/dev/", 5)) {
5905 				error = EINVAL;
5906 				break;
5907 			}
5908 			error = spa_vdev_setpath(spa, vdev_guid, strval);
5909 			break;
5910 		case VDEV_PROP_ALLOCATING:
5911 			if (nvpair_value_uint64(elem, &intval) != 0) {
5912 				error = EINVAL;
5913 				break;
5914 			}
5915 			if (intval != vd->vdev_noalloc)
5916 				break;
5917 			if (intval == 0)
5918 				error = spa_vdev_noalloc(spa, vdev_guid);
5919 			else
5920 				error = spa_vdev_alloc(spa, vdev_guid);
5921 			break;
5922 		case VDEV_PROP_FAILFAST:
5923 			if (nvpair_value_uint64(elem, &intval) != 0) {
5924 				error = EINVAL;
5925 				break;
5926 			}
5927 			vd->vdev_failfast = intval & 1;
5928 			break;
5929 		case VDEV_PROP_CHECKSUM_N:
5930 			if (nvpair_value_uint64(elem, &intval) != 0) {
5931 				error = EINVAL;
5932 				break;
5933 			}
5934 			vd->vdev_checksum_n = intval;
5935 			break;
5936 		case VDEV_PROP_CHECKSUM_T:
5937 			if (nvpair_value_uint64(elem, &intval) != 0) {
5938 				error = EINVAL;
5939 				break;
5940 			}
5941 			vd->vdev_checksum_t = intval;
5942 			break;
5943 		case VDEV_PROP_IO_N:
5944 			if (nvpair_value_uint64(elem, &intval) != 0) {
5945 				error = EINVAL;
5946 				break;
5947 			}
5948 			vd->vdev_io_n = intval;
5949 			break;
5950 		case VDEV_PROP_IO_T:
5951 			if (nvpair_value_uint64(elem, &intval) != 0) {
5952 				error = EINVAL;
5953 				break;
5954 			}
5955 			vd->vdev_io_t = intval;
5956 			break;
5957 		case VDEV_PROP_SLOW_IO_N:
5958 			if (nvpair_value_uint64(elem, &intval) != 0) {
5959 				error = EINVAL;
5960 				break;
5961 			}
5962 			vd->vdev_slow_io_n = intval;
5963 			break;
5964 		case VDEV_PROP_SLOW_IO_T:
5965 			if (nvpair_value_uint64(elem, &intval) != 0) {
5966 				error = EINVAL;
5967 				break;
5968 			}
5969 			vd->vdev_slow_io_t = intval;
5970 			break;
5971 		default:
5972 			/* Most processing is done in vdev_props_set_sync */
5973 			break;
5974 		}
5975 end:
5976 		if (error != 0) {
5977 			intval = error;
5978 			vdev_prop_add_list(outnvl, propname, strval, intval, 0);
5979 			return (error);
5980 		}
5981 	}
5982 
5983 	return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync,
5984 	    innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED));
5985 }
5986 
5987 int
vdev_prop_get(vdev_t * vd,nvlist_t * innvl,nvlist_t * outnvl)5988 vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
5989 {
5990 	spa_t *spa = vd->vdev_spa;
5991 	objset_t *mos = spa->spa_meta_objset;
5992 	int err = 0;
5993 	uint64_t objid;
5994 	uint64_t vdev_guid;
5995 	nvpair_t *elem = NULL;
5996 	nvlist_t *nvprops = NULL;
5997 	uint64_t intval = 0;
5998 	char *strval = NULL;
5999 	const char *propname = NULL;
6000 	vdev_prop_t prop;
6001 
6002 	ASSERT(vd != NULL);
6003 	ASSERT(mos != NULL);
6004 
6005 	if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
6006 	    &vdev_guid) != 0)
6007 		return (SET_ERROR(EINVAL));
6008 
6009 	nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops);
6010 
6011 	if (vd->vdev_root_zap != 0) {
6012 		objid = vd->vdev_root_zap;
6013 	} else if (vd->vdev_top_zap != 0) {
6014 		objid = vd->vdev_top_zap;
6015 	} else if (vd->vdev_leaf_zap != 0) {
6016 		objid = vd->vdev_leaf_zap;
6017 	} else {
6018 		return (SET_ERROR(EINVAL));
6019 	}
6020 	ASSERT(objid != 0);
6021 
6022 	mutex_enter(&spa->spa_props_lock);
6023 
6024 	if (nvprops != NULL) {
6025 		char namebuf[64] = { 0 };
6026 
6027 		while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
6028 			intval = 0;
6029 			strval = NULL;
6030 			propname = nvpair_name(elem);
6031 			prop = vdev_name_to_prop(propname);
6032 			zprop_source_t src = ZPROP_SRC_DEFAULT;
6033 			uint64_t integer_size, num_integers;
6034 
6035 			switch (prop) {
6036 			/* Special Read-only Properties */
6037 			case VDEV_PROP_NAME:
6038 				strval = vdev_name(vd, namebuf,
6039 				    sizeof (namebuf));
6040 				if (strval == NULL)
6041 					continue;
6042 				vdev_prop_add_list(outnvl, propname, strval, 0,
6043 				    ZPROP_SRC_NONE);
6044 				continue;
6045 			case VDEV_PROP_CAPACITY:
6046 				/* percent used */
6047 				intval = (vd->vdev_stat.vs_dspace == 0) ? 0 :
6048 				    (vd->vdev_stat.vs_alloc * 100 /
6049 				    vd->vdev_stat.vs_dspace);
6050 				vdev_prop_add_list(outnvl, propname, NULL,
6051 				    intval, ZPROP_SRC_NONE);
6052 				continue;
6053 			case VDEV_PROP_STATE:
6054 				vdev_prop_add_list(outnvl, propname, NULL,
6055 				    vd->vdev_state, ZPROP_SRC_NONE);
6056 				continue;
6057 			case VDEV_PROP_GUID:
6058 				vdev_prop_add_list(outnvl, propname, NULL,
6059 				    vd->vdev_guid, ZPROP_SRC_NONE);
6060 				continue;
6061 			case VDEV_PROP_ASIZE:
6062 				vdev_prop_add_list(outnvl, propname, NULL,
6063 				    vd->vdev_asize, ZPROP_SRC_NONE);
6064 				continue;
6065 			case VDEV_PROP_PSIZE:
6066 				vdev_prop_add_list(outnvl, propname, NULL,
6067 				    vd->vdev_psize, ZPROP_SRC_NONE);
6068 				continue;
6069 			case VDEV_PROP_ASHIFT:
6070 				vdev_prop_add_list(outnvl, propname, NULL,
6071 				    vd->vdev_ashift, ZPROP_SRC_NONE);
6072 				continue;
6073 			case VDEV_PROP_SIZE:
6074 				vdev_prop_add_list(outnvl, propname, NULL,
6075 				    vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE);
6076 				continue;
6077 			case VDEV_PROP_FREE:
6078 				vdev_prop_add_list(outnvl, propname, NULL,
6079 				    vd->vdev_stat.vs_dspace -
6080 				    vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
6081 				continue;
6082 			case VDEV_PROP_ALLOCATED:
6083 				vdev_prop_add_list(outnvl, propname, NULL,
6084 				    vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
6085 				continue;
6086 			case VDEV_PROP_EXPANDSZ:
6087 				vdev_prop_add_list(outnvl, propname, NULL,
6088 				    vd->vdev_stat.vs_esize, ZPROP_SRC_NONE);
6089 				continue;
6090 			case VDEV_PROP_FRAGMENTATION:
6091 				vdev_prop_add_list(outnvl, propname, NULL,
6092 				    vd->vdev_stat.vs_fragmentation,
6093 				    ZPROP_SRC_NONE);
6094 				continue;
6095 			case VDEV_PROP_PARITY:
6096 				vdev_prop_add_list(outnvl, propname, NULL,
6097 				    vdev_get_nparity(vd), ZPROP_SRC_NONE);
6098 				continue;
6099 			case VDEV_PROP_PATH:
6100 				if (vd->vdev_path == NULL)
6101 					continue;
6102 				vdev_prop_add_list(outnvl, propname,
6103 				    vd->vdev_path, 0, ZPROP_SRC_NONE);
6104 				continue;
6105 			case VDEV_PROP_DEVID:
6106 				if (vd->vdev_devid == NULL)
6107 					continue;
6108 				vdev_prop_add_list(outnvl, propname,
6109 				    vd->vdev_devid, 0, ZPROP_SRC_NONE);
6110 				continue;
6111 			case VDEV_PROP_PHYS_PATH:
6112 				if (vd->vdev_physpath == NULL)
6113 					continue;
6114 				vdev_prop_add_list(outnvl, propname,
6115 				    vd->vdev_physpath, 0, ZPROP_SRC_NONE);
6116 				continue;
6117 			case VDEV_PROP_ENC_PATH:
6118 				if (vd->vdev_enc_sysfs_path == NULL)
6119 					continue;
6120 				vdev_prop_add_list(outnvl, propname,
6121 				    vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE);
6122 				continue;
6123 			case VDEV_PROP_FRU:
6124 				if (vd->vdev_fru == NULL)
6125 					continue;
6126 				vdev_prop_add_list(outnvl, propname,
6127 				    vd->vdev_fru, 0, ZPROP_SRC_NONE);
6128 				continue;
6129 			case VDEV_PROP_PARENT:
6130 				if (vd->vdev_parent != NULL) {
6131 					strval = vdev_name(vd->vdev_parent,
6132 					    namebuf, sizeof (namebuf));
6133 					vdev_prop_add_list(outnvl, propname,
6134 					    strval, 0, ZPROP_SRC_NONE);
6135 				}
6136 				continue;
6137 			case VDEV_PROP_CHILDREN:
6138 				if (vd->vdev_children > 0)
6139 					strval = kmem_zalloc(ZAP_MAXVALUELEN,
6140 					    KM_SLEEP);
6141 				for (uint64_t i = 0; i < vd->vdev_children;
6142 				    i++) {
6143 					const char *vname;
6144 
6145 					vname = vdev_name(vd->vdev_child[i],
6146 					    namebuf, sizeof (namebuf));
6147 					if (vname == NULL)
6148 						vname = "(unknown)";
6149 					if (strlen(strval) > 0)
6150 						strlcat(strval, ",",
6151 						    ZAP_MAXVALUELEN);
6152 					strlcat(strval, vname, ZAP_MAXVALUELEN);
6153 				}
6154 				if (strval != NULL) {
6155 					vdev_prop_add_list(outnvl, propname,
6156 					    strval, 0, ZPROP_SRC_NONE);
6157 					kmem_free(strval, ZAP_MAXVALUELEN);
6158 				}
6159 				continue;
6160 			case VDEV_PROP_NUMCHILDREN:
6161 				vdev_prop_add_list(outnvl, propname, NULL,
6162 				    vd->vdev_children, ZPROP_SRC_NONE);
6163 				continue;
6164 			case VDEV_PROP_READ_ERRORS:
6165 				vdev_prop_add_list(outnvl, propname, NULL,
6166 				    vd->vdev_stat.vs_read_errors,
6167 				    ZPROP_SRC_NONE);
6168 				continue;
6169 			case VDEV_PROP_WRITE_ERRORS:
6170 				vdev_prop_add_list(outnvl, propname, NULL,
6171 				    vd->vdev_stat.vs_write_errors,
6172 				    ZPROP_SRC_NONE);
6173 				continue;
6174 			case VDEV_PROP_CHECKSUM_ERRORS:
6175 				vdev_prop_add_list(outnvl, propname, NULL,
6176 				    vd->vdev_stat.vs_checksum_errors,
6177 				    ZPROP_SRC_NONE);
6178 				continue;
6179 			case VDEV_PROP_INITIALIZE_ERRORS:
6180 				vdev_prop_add_list(outnvl, propname, NULL,
6181 				    vd->vdev_stat.vs_initialize_errors,
6182 				    ZPROP_SRC_NONE);
6183 				continue;
6184 			case VDEV_PROP_OPS_NULL:
6185 				vdev_prop_add_list(outnvl, propname, NULL,
6186 				    vd->vdev_stat.vs_ops[ZIO_TYPE_NULL],
6187 				    ZPROP_SRC_NONE);
6188 				continue;
6189 			case VDEV_PROP_OPS_READ:
6190 				vdev_prop_add_list(outnvl, propname, NULL,
6191 				    vd->vdev_stat.vs_ops[ZIO_TYPE_READ],
6192 				    ZPROP_SRC_NONE);
6193 				continue;
6194 			case VDEV_PROP_OPS_WRITE:
6195 				vdev_prop_add_list(outnvl, propname, NULL,
6196 				    vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE],
6197 				    ZPROP_SRC_NONE);
6198 				continue;
6199 			case VDEV_PROP_OPS_FREE:
6200 				vdev_prop_add_list(outnvl, propname, NULL,
6201 				    vd->vdev_stat.vs_ops[ZIO_TYPE_FREE],
6202 				    ZPROP_SRC_NONE);
6203 				continue;
6204 			case VDEV_PROP_OPS_CLAIM:
6205 				vdev_prop_add_list(outnvl, propname, NULL,
6206 				    vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM],
6207 				    ZPROP_SRC_NONE);
6208 				continue;
6209 			case VDEV_PROP_OPS_TRIM:
6210 				/*
6211 				 * TRIM ops and bytes are reported to user
6212 				 * space as ZIO_TYPE_IOCTL.  This is done to
6213 				 * preserve the vdev_stat_t structure layout
6214 				 * for user space.
6215 				 */
6216 				vdev_prop_add_list(outnvl, propname, NULL,
6217 				    vd->vdev_stat.vs_ops[ZIO_TYPE_IOCTL],
6218 				    ZPROP_SRC_NONE);
6219 				continue;
6220 			case VDEV_PROP_BYTES_NULL:
6221 				vdev_prop_add_list(outnvl, propname, NULL,
6222 				    vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL],
6223 				    ZPROP_SRC_NONE);
6224 				continue;
6225 			case VDEV_PROP_BYTES_READ:
6226 				vdev_prop_add_list(outnvl, propname, NULL,
6227 				    vd->vdev_stat.vs_bytes[ZIO_TYPE_READ],
6228 				    ZPROP_SRC_NONE);
6229 				continue;
6230 			case VDEV_PROP_BYTES_WRITE:
6231 				vdev_prop_add_list(outnvl, propname, NULL,
6232 				    vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE],
6233 				    ZPROP_SRC_NONE);
6234 				continue;
6235 			case VDEV_PROP_BYTES_FREE:
6236 				vdev_prop_add_list(outnvl, propname, NULL,
6237 				    vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE],
6238 				    ZPROP_SRC_NONE);
6239 				continue;
6240 			case VDEV_PROP_BYTES_CLAIM:
6241 				vdev_prop_add_list(outnvl, propname, NULL,
6242 				    vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM],
6243 				    ZPROP_SRC_NONE);
6244 				continue;
6245 			case VDEV_PROP_BYTES_TRIM:
6246 				/*
6247 				 * TRIM ops and bytes are reported to user
6248 				 * space as ZIO_TYPE_IOCTL.  This is done to
6249 				 * preserve the vdev_stat_t structure layout
6250 				 * for user space.
6251 				 */
6252 				vdev_prop_add_list(outnvl, propname, NULL,
6253 				    vd->vdev_stat.vs_bytes[ZIO_TYPE_IOCTL],
6254 				    ZPROP_SRC_NONE);
6255 				continue;
6256 			case VDEV_PROP_REMOVING:
6257 				vdev_prop_add_list(outnvl, propname, NULL,
6258 				    vd->vdev_removing, ZPROP_SRC_NONE);
6259 				continue;
6260 			/* Numeric Properites */
6261 			case VDEV_PROP_ALLOCATING:
6262 				/* Leaf vdevs cannot have this property */
6263 				if (vd->vdev_mg == NULL &&
6264 				    vd->vdev_top != NULL) {
6265 					src = ZPROP_SRC_NONE;
6266 					intval = ZPROP_BOOLEAN_NA;
6267 				} else {
6268 					err = vdev_prop_get_int(vd, prop,
6269 					    &intval);
6270 					if (err && err != ENOENT)
6271 						break;
6272 
6273 					if (intval ==
6274 					    vdev_prop_default_numeric(prop))
6275 						src = ZPROP_SRC_DEFAULT;
6276 					else
6277 						src = ZPROP_SRC_LOCAL;
6278 				}
6279 
6280 				vdev_prop_add_list(outnvl, propname, NULL,
6281 				    intval, src);
6282 				break;
6283 			case VDEV_PROP_FAILFAST:
6284 				src = ZPROP_SRC_LOCAL;
6285 				strval = NULL;
6286 
6287 				err = zap_lookup(mos, objid, nvpair_name(elem),
6288 				    sizeof (uint64_t), 1, &intval);
6289 				if (err == ENOENT) {
6290 					intval = vdev_prop_default_numeric(
6291 					    prop);
6292 					err = 0;
6293 				} else if (err) {
6294 					break;
6295 				}
6296 				if (intval == vdev_prop_default_numeric(prop))
6297 					src = ZPROP_SRC_DEFAULT;
6298 
6299 				vdev_prop_add_list(outnvl, propname, strval,
6300 				    intval, src);
6301 				break;
6302 			case VDEV_PROP_CHECKSUM_N:
6303 			case VDEV_PROP_CHECKSUM_T:
6304 			case VDEV_PROP_IO_N:
6305 			case VDEV_PROP_IO_T:
6306 			case VDEV_PROP_SLOW_IO_N:
6307 			case VDEV_PROP_SLOW_IO_T:
6308 				err = vdev_prop_get_int(vd, prop, &intval);
6309 				if (err && err != ENOENT)
6310 					break;
6311 
6312 				if (intval == vdev_prop_default_numeric(prop))
6313 					src = ZPROP_SRC_DEFAULT;
6314 				else
6315 					src = ZPROP_SRC_LOCAL;
6316 
6317 				vdev_prop_add_list(outnvl, propname, NULL,
6318 				    intval, src);
6319 				break;
6320 			/* Text Properties */
6321 			case VDEV_PROP_COMMENT:
6322 				/* Exists in the ZAP below */
6323 				/* FALLTHRU */
6324 			case VDEV_PROP_USERPROP:
6325 				/* User Properites */
6326 				src = ZPROP_SRC_LOCAL;
6327 
6328 				err = zap_length(mos, objid, nvpair_name(elem),
6329 				    &integer_size, &num_integers);
6330 				if (err)
6331 					break;
6332 
6333 				switch (integer_size) {
6334 				case 8:
6335 					/* User properties cannot be integers */
6336 					err = EINVAL;
6337 					break;
6338 				case 1:
6339 					/* string property */
6340 					strval = kmem_alloc(num_integers,
6341 					    KM_SLEEP);
6342 					err = zap_lookup(mos, objid,
6343 					    nvpair_name(elem), 1,
6344 					    num_integers, strval);
6345 					if (err) {
6346 						kmem_free(strval,
6347 						    num_integers);
6348 						break;
6349 					}
6350 					vdev_prop_add_list(outnvl, propname,
6351 					    strval, 0, src);
6352 					kmem_free(strval, num_integers);
6353 					break;
6354 				}
6355 				break;
6356 			default:
6357 				err = ENOENT;
6358 				break;
6359 			}
6360 			if (err)
6361 				break;
6362 		}
6363 	} else {
6364 		/*
6365 		 * Get all properties from the MOS vdev property object.
6366 		 */
6367 		zap_cursor_t zc;
6368 		zap_attribute_t za;
6369 		for (zap_cursor_init(&zc, mos, objid);
6370 		    (err = zap_cursor_retrieve(&zc, &za)) == 0;
6371 		    zap_cursor_advance(&zc)) {
6372 			intval = 0;
6373 			strval = NULL;
6374 			zprop_source_t src = ZPROP_SRC_DEFAULT;
6375 			propname = za.za_name;
6376 
6377 			switch (za.za_integer_length) {
6378 			case 8:
6379 				/* We do not allow integer user properties */
6380 				/* This is likely an internal value */
6381 				break;
6382 			case 1:
6383 				/* string property */
6384 				strval = kmem_alloc(za.za_num_integers,
6385 				    KM_SLEEP);
6386 				err = zap_lookup(mos, objid, za.za_name, 1,
6387 				    za.za_num_integers, strval);
6388 				if (err) {
6389 					kmem_free(strval, za.za_num_integers);
6390 					break;
6391 				}
6392 				vdev_prop_add_list(outnvl, propname, strval, 0,
6393 				    src);
6394 				kmem_free(strval, za.za_num_integers);
6395 				break;
6396 
6397 			default:
6398 				break;
6399 			}
6400 		}
6401 		zap_cursor_fini(&zc);
6402 	}
6403 
6404 	mutex_exit(&spa->spa_props_lock);
6405 	if (err && err != ENOENT) {
6406 		return (err);
6407 	}
6408 
6409 	return (0);
6410 }
6411 
6412 EXPORT_SYMBOL(vdev_fault);
6413 EXPORT_SYMBOL(vdev_degrade);
6414 EXPORT_SYMBOL(vdev_online);
6415 EXPORT_SYMBOL(vdev_offline);
6416 EXPORT_SYMBOL(vdev_clear);
6417 
6418 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW,
6419 	"Target number of metaslabs per top-level vdev");
6420 
6421 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW,
6422 	"Default lower limit for metaslab size");
6423 
6424 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_ms_shift, UINT, ZMOD_RW,
6425 	"Default upper limit for metaslab size");
6426 
6427 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW,
6428 	"Minimum number of metaslabs per top-level vdev");
6429 
6430 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW,
6431 	"Practical upper limit of total metaslabs per top-level vdev");
6432 
6433 ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
6434 	"Rate limit slow IO (delay) events to this many per second");
6435 
6436 /* BEGIN CSTYLED */
6437 ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
6438 	"Rate limit checksum events to this many checksum errors per second "
6439 	"(do not set below ZED threshold).");
6440 /* END CSTYLED */
6441 
6442 ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
6443 	"Ignore errors during resilver/scrub");
6444 
6445 ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
6446 	"Bypass vdev_validate()");
6447 
6448 ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
6449 	"Disable cache flushes");
6450 
6451 ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW,
6452 	"Minimum number of metaslabs required to dedicate one for log blocks");
6453 
6454 /* BEGIN CSTYLED */
6455 ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
6456 	param_set_min_auto_ashift, param_get_uint, ZMOD_RW,
6457 	"Minimum ashift used when creating new top-level vdevs");
6458 
6459 ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
6460 	param_set_max_auto_ashift, param_get_uint, ZMOD_RW,
6461 	"Maximum ashift used when optimizing for logical -> physical sector "
6462 	"size on new top-level vdevs");
6463 /* END CSTYLED */
6464