1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright 2016 Nexenta Systems, Inc.
27 * Copyright 2016 Igor Kozhukhov <[email protected]>
28 * Copyright (c) 2017 Datto Inc.
29 */
30
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <ctype.h>
34 #include <errno.h>
35 #include <devid.h>
36 #include <fcntl.h>
37 #include <libintl.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <strings.h>
41 #include <unistd.h>
42 #include <libgen.h>
43 #include <sys/zfs_ioctl.h>
44 #include <dlfcn.h>
45
46 #include "zfs_namecheck.h"
47 #include "zfs_prop.h"
48 #include "libzfs_impl.h"
49 #include "zfs_comutil.h"
50 #include "zfeature_common.h"
51
52 static int read_efi_label(nvlist_t *, diskaddr_t *, boolean_t *);
53 static boolean_t zpool_vdev_is_interior(const char *name);
54
55 #define BACKUP_SLICE "s2"
56
57 typedef struct prop_flags {
58 int create:1; /* Validate property on creation */
59 int import:1; /* Validate property on import */
60 } prop_flags_t;
61
62 /*
63 * ====================================================================
64 * zpool property functions
65 * ====================================================================
66 */
67
68 static int
zpool_get_all_props(zpool_handle_t * zhp)69 zpool_get_all_props(zpool_handle_t *zhp)
70 {
71 zfs_cmd_t zc = { 0 };
72 libzfs_handle_t *hdl = zhp->zpool_hdl;
73
74 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
75
76 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
77 return (-1);
78
79 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
80 if (errno == ENOMEM) {
81 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 } else {
86 zcmd_free_nvlists(&zc);
87 return (-1);
88 }
89 }
90
91 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
92 zcmd_free_nvlists(&zc);
93 return (-1);
94 }
95
96 zcmd_free_nvlists(&zc);
97
98 return (0);
99 }
100
101 static int
zpool_props_refresh(zpool_handle_t * zhp)102 zpool_props_refresh(zpool_handle_t *zhp)
103 {
104 nvlist_t *old_props;
105
106 old_props = zhp->zpool_props;
107
108 if (zpool_get_all_props(zhp) != 0)
109 return (-1);
110
111 nvlist_free(old_props);
112 return (0);
113 }
114
115 static char *
zpool_get_prop_string(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)116 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
117 zprop_source_t *src)
118 {
119 nvlist_t *nv, *nvl;
120 uint64_t ival;
121 char *value;
122 zprop_source_t source;
123
124 nvl = zhp->zpool_props;
125 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
126 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
127 source = ival;
128 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
129 } else {
130 source = ZPROP_SRC_DEFAULT;
131 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
132 value = "-";
133 }
134
135 if (src)
136 *src = source;
137
138 return (value);
139 }
140
141 uint64_t
zpool_get_prop_int(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)142 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
143 {
144 nvlist_t *nv, *nvl;
145 uint64_t value;
146 zprop_source_t source;
147
148 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
149 /*
150 * zpool_get_all_props() has most likely failed because
151 * the pool is faulted, but if all we need is the top level
152 * vdev's guid then get it from the zhp config nvlist.
153 */
154 if ((prop == ZPOOL_PROP_GUID) &&
155 (nvlist_lookup_nvlist(zhp->zpool_config,
156 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
157 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
158 == 0)) {
159 return (value);
160 }
161 return (zpool_prop_default_numeric(prop));
162 }
163
164 nvl = zhp->zpool_props;
165 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
166 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
167 source = value;
168 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
169 } else {
170 source = ZPROP_SRC_DEFAULT;
171 value = zpool_prop_default_numeric(prop);
172 }
173
174 if (src)
175 *src = source;
176
177 return (value);
178 }
179
180 /*
181 * Map VDEV STATE to printed strings.
182 */
183 const char *
zpool_state_to_name(vdev_state_t state,vdev_aux_t aux)184 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
185 {
186 switch (state) {
187 case VDEV_STATE_CLOSED:
188 case VDEV_STATE_OFFLINE:
189 return (gettext("OFFLINE"));
190 case VDEV_STATE_REMOVED:
191 return (gettext("REMOVED"));
192 case VDEV_STATE_CANT_OPEN:
193 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
194 return (gettext("FAULTED"));
195 else if (aux == VDEV_AUX_SPLIT_POOL)
196 return (gettext("SPLIT"));
197 else
198 return (gettext("UNAVAIL"));
199 case VDEV_STATE_FAULTED:
200 return (gettext("FAULTED"));
201 case VDEV_STATE_DEGRADED:
202 return (gettext("DEGRADED"));
203 case VDEV_STATE_HEALTHY:
204 return (gettext("ONLINE"));
205
206 default:
207 break;
208 }
209
210 return (gettext("UNKNOWN"));
211 }
212
213 /*
214 * Map POOL STATE to printed strings.
215 */
216 const char *
zpool_pool_state_to_name(pool_state_t state)217 zpool_pool_state_to_name(pool_state_t state)
218 {
219 switch (state) {
220 case POOL_STATE_ACTIVE:
221 return (gettext("ACTIVE"));
222 case POOL_STATE_EXPORTED:
223 return (gettext("EXPORTED"));
224 case POOL_STATE_DESTROYED:
225 return (gettext("DESTROYED"));
226 case POOL_STATE_SPARE:
227 return (gettext("SPARE"));
228 case POOL_STATE_L2CACHE:
229 return (gettext("L2CACHE"));
230 case POOL_STATE_UNINITIALIZED:
231 return (gettext("UNINITIALIZED"));
232 case POOL_STATE_UNAVAIL:
233 return (gettext("UNAVAIL"));
234 case POOL_STATE_POTENTIALLY_ACTIVE:
235 return (gettext("POTENTIALLY_ACTIVE"));
236 }
237
238 return (gettext("UNKNOWN"));
239 }
240
241 /*
242 * Get a zpool property value for 'prop' and return the value in
243 * a pre-allocated buffer.
244 */
245 int
zpool_get_prop(zpool_handle_t * zhp,zpool_prop_t prop,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)246 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
247 zprop_source_t *srctype, boolean_t literal)
248 {
249 uint64_t intval;
250 const char *strval;
251 zprop_source_t src = ZPROP_SRC_NONE;
252 nvlist_t *nvroot;
253 vdev_stat_t *vs;
254 uint_t vsc;
255
256 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
257 switch (prop) {
258 case ZPOOL_PROP_NAME:
259 (void) strlcpy(buf, zpool_get_name(zhp), len);
260 break;
261
262 case ZPOOL_PROP_HEALTH:
263 (void) strlcpy(buf,
264 zpool_pool_state_to_name(POOL_STATE_UNAVAIL), len);
265 break;
266
267 case ZPOOL_PROP_GUID:
268 intval = zpool_get_prop_int(zhp, prop, &src);
269 (void) snprintf(buf, len, "%llu", intval);
270 break;
271
272 case ZPOOL_PROP_ALTROOT:
273 case ZPOOL_PROP_CACHEFILE:
274 case ZPOOL_PROP_COMMENT:
275 if (zhp->zpool_props != NULL ||
276 zpool_get_all_props(zhp) == 0) {
277 (void) strlcpy(buf,
278 zpool_get_prop_string(zhp, prop, &src),
279 len);
280 break;
281 }
282 /* FALLTHROUGH */
283 default:
284 (void) strlcpy(buf, "-", len);
285 break;
286 }
287
288 if (srctype != NULL)
289 *srctype = src;
290 return (0);
291 }
292
293 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
294 prop != ZPOOL_PROP_NAME)
295 return (-1);
296
297 switch (zpool_prop_get_type(prop)) {
298 case PROP_TYPE_STRING:
299 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
300 len);
301 break;
302
303 case PROP_TYPE_NUMBER:
304 intval = zpool_get_prop_int(zhp, prop, &src);
305
306 switch (prop) {
307 case ZPOOL_PROP_SIZE:
308 case ZPOOL_PROP_ALLOCATED:
309 case ZPOOL_PROP_FREE:
310 case ZPOOL_PROP_FREEING:
311 case ZPOOL_PROP_LEAKED:
312 if (literal) {
313 (void) snprintf(buf, len, "%llu",
314 (u_longlong_t)intval);
315 } else {
316 (void) zfs_nicenum(intval, buf, len);
317 }
318 break;
319 case ZPOOL_PROP_BOOTSIZE:
320 case ZPOOL_PROP_EXPANDSZ:
321 case ZPOOL_PROP_CHECKPOINT:
322 if (intval == 0) {
323 (void) strlcpy(buf, "-", len);
324 } else if (literal) {
325 (void) snprintf(buf, len, "%llu",
326 (u_longlong_t)intval);
327 } else {
328 (void) zfs_nicenum(intval, buf, len);
329 }
330 break;
331 case ZPOOL_PROP_CAPACITY:
332 if (literal) {
333 (void) snprintf(buf, len, "%llu",
334 (u_longlong_t)intval);
335 } else {
336 (void) snprintf(buf, len, "%llu%%",
337 (u_longlong_t)intval);
338 }
339 break;
340 case ZPOOL_PROP_FRAGMENTATION:
341 if (intval == UINT64_MAX) {
342 (void) strlcpy(buf, "-", len);
343 } else {
344 (void) snprintf(buf, len, "%llu%%",
345 (u_longlong_t)intval);
346 }
347 break;
348 case ZPOOL_PROP_DEDUPRATIO:
349 (void) snprintf(buf, len, "%llu.%02llux",
350 (u_longlong_t)(intval / 100),
351 (u_longlong_t)(intval % 100));
352 break;
353 case ZPOOL_PROP_HEALTH:
354 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
355 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
356 verify(nvlist_lookup_uint64_array(nvroot,
357 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
358 == 0);
359
360 (void) strlcpy(buf, zpool_state_to_name(intval,
361 vs->vs_aux), len);
362 break;
363 case ZPOOL_PROP_VERSION:
364 if (intval >= SPA_VERSION_FEATURES) {
365 (void) snprintf(buf, len, "-");
366 break;
367 }
368 /* FALLTHROUGH */
369 default:
370 (void) snprintf(buf, len, "%llu", intval);
371 }
372 break;
373
374 case PROP_TYPE_INDEX:
375 intval = zpool_get_prop_int(zhp, prop, &src);
376 if (zpool_prop_index_to_string(prop, intval, &strval)
377 != 0)
378 return (-1);
379 (void) strlcpy(buf, strval, len);
380 break;
381
382 default:
383 abort();
384 }
385
386 if (srctype)
387 *srctype = src;
388
389 return (0);
390 }
391
392 /*
393 * Check if the bootfs name has the same pool name as it is set to.
394 * Assuming bootfs is a valid dataset name.
395 */
396 static boolean_t
bootfs_name_valid(const char * pool,char * bootfs)397 bootfs_name_valid(const char *pool, char *bootfs)
398 {
399 int len = strlen(pool);
400
401 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
402 return (B_FALSE);
403
404 if (strncmp(pool, bootfs, len) == 0 &&
405 (bootfs[len] == '/' || bootfs[len] == '\0'))
406 return (B_TRUE);
407
408 return (B_FALSE);
409 }
410
411 boolean_t
zpool_is_bootable(zpool_handle_t * zhp)412 zpool_is_bootable(zpool_handle_t *zhp)
413 {
414 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
415
416 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
417 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
418 sizeof (bootfs)) != 0);
419 }
420
421
422 /*
423 * Given an nvlist of zpool properties to be set, validate that they are
424 * correct, and parse any numeric properties (index, boolean, etc) if they are
425 * specified as strings.
426 */
427 static nvlist_t *
zpool_valid_proplist(libzfs_handle_t * hdl,const char * poolname,nvlist_t * props,uint64_t version,prop_flags_t flags,char * errbuf)428 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
429 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
430 {
431 nvpair_t *elem;
432 nvlist_t *retprops;
433 zpool_prop_t prop;
434 char *strval;
435 uint64_t intval;
436 char *slash, *check;
437 struct stat64 statbuf;
438 zpool_handle_t *zhp;
439
440 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
441 (void) no_memory(hdl);
442 return (NULL);
443 }
444
445 elem = NULL;
446 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
447 const char *propname = nvpair_name(elem);
448
449 prop = zpool_name_to_prop(propname);
450 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
451 int err;
452 char *fname = strchr(propname, '@') + 1;
453
454 err = zfeature_lookup_name(fname, NULL);
455 if (err != 0) {
456 ASSERT3U(err, ==, ENOENT);
457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
458 "invalid feature '%s'"), fname);
459 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
460 goto error;
461 }
462
463 if (nvpair_type(elem) != DATA_TYPE_STRING) {
464 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
465 "'%s' must be a string"), propname);
466 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
467 goto error;
468 }
469
470 (void) nvpair_value_string(elem, &strval);
471 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
472 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
473 "property '%s' can only be set to "
474 "'enabled'"), propname);
475 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
476 goto error;
477 }
478
479 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
480 (void) no_memory(hdl);
481 goto error;
482 }
483 continue;
484 }
485
486 /*
487 * Make sure this property is valid and applies to this type.
488 */
489 if (prop == ZPOOL_PROP_INVAL) {
490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
491 "invalid property '%s'"), propname);
492 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
493 goto error;
494 }
495
496 if (zpool_prop_readonly(prop)) {
497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
498 "is readonly"), propname);
499 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
500 goto error;
501 }
502
503 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
504 &strval, &intval, errbuf) != 0)
505 goto error;
506
507 /*
508 * Perform additional checking for specific properties.
509 */
510 switch (prop) {
511 case ZPOOL_PROP_VERSION:
512 if (intval < version ||
513 !SPA_VERSION_IS_SUPPORTED(intval)) {
514 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
515 "property '%s' number %d is invalid."),
516 propname, intval);
517 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
518 goto error;
519 }
520 break;
521
522 case ZPOOL_PROP_BOOTSIZE:
523 if (!flags.create) {
524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 "property '%s' can only be set during pool "
526 "creation"), propname);
527 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
528 goto error;
529 }
530 break;
531
532 case ZPOOL_PROP_BOOTFS:
533 if (flags.create || flags.import) {
534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
535 "property '%s' cannot be set at creation "
536 "or import time"), propname);
537 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
538 goto error;
539 }
540
541 if (version < SPA_VERSION_BOOTFS) {
542 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
543 "pool must be upgraded to support "
544 "'%s' property"), propname);
545 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
546 goto error;
547 }
548
549 /*
550 * bootfs property value has to be a dataset name and
551 * the dataset has to be in the same pool as it sets to.
552 */
553 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
554 strval)) {
555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
556 "is an invalid name"), strval);
557 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
558 goto error;
559 }
560
561 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
562 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
563 "could not open pool '%s'"), poolname);
564 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
565 goto error;
566 }
567 zpool_close(zhp);
568 break;
569
570 case ZPOOL_PROP_ALTROOT:
571 if (!flags.create && !flags.import) {
572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
573 "property '%s' can only be set during pool "
574 "creation or import"), propname);
575 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
576 goto error;
577 }
578
579 if (strval[0] != '/') {
580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
581 "bad alternate root '%s'"), strval);
582 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
583 goto error;
584 }
585 break;
586
587 case ZPOOL_PROP_CACHEFILE:
588 if (strval[0] == '\0')
589 break;
590
591 if (strcmp(strval, "none") == 0)
592 break;
593
594 if (strval[0] != '/') {
595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
596 "property '%s' must be empty, an "
597 "absolute path, or 'none'"), propname);
598 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
599 goto error;
600 }
601
602 slash = strrchr(strval, '/');
603
604 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
605 strcmp(slash, "/..") == 0) {
606 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
607 "'%s' is not a valid file"), strval);
608 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
609 goto error;
610 }
611
612 *slash = '\0';
613
614 if (strval[0] != '\0' &&
615 (stat64(strval, &statbuf) != 0 ||
616 !S_ISDIR(statbuf.st_mode))) {
617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
618 "'%s' is not a valid directory"),
619 strval);
620 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
621 goto error;
622 }
623
624 *slash = '/';
625 break;
626
627 case ZPOOL_PROP_COMMENT:
628 for (check = strval; *check != '\0'; check++) {
629 if (!isprint(*check)) {
630 zfs_error_aux(hdl,
631 dgettext(TEXT_DOMAIN,
632 "comment may only have printable "
633 "characters"));
634 (void) zfs_error(hdl, EZFS_BADPROP,
635 errbuf);
636 goto error;
637 }
638 }
639 if (strlen(strval) > ZPROP_MAX_COMMENT) {
640 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
641 "comment must not exceed %d characters"),
642 ZPROP_MAX_COMMENT);
643 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
644 goto error;
645 }
646 break;
647
648 case ZPOOL_PROP_READONLY:
649 if (!flags.import) {
650 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
651 "property '%s' can only be set at "
652 "import time"), propname);
653 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
654 goto error;
655 }
656 break;
657
658 case ZPOOL_PROP_TNAME:
659 if (!flags.create) {
660 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
661 "property '%s' can only be set at "
662 "creation time"), propname);
663 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
664 goto error;
665 }
666 break;
667
668 default:
669 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
670 "property '%s'(%d) not defined"), propname, prop);
671 break;
672 }
673 }
674
675 return (retprops);
676 error:
677 nvlist_free(retprops);
678 return (NULL);
679 }
680
681 /*
682 * Set zpool property : propname=propval.
683 */
684 int
zpool_set_prop(zpool_handle_t * zhp,const char * propname,const char * propval)685 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
686 {
687 zfs_cmd_t zc = { 0 };
688 int ret = -1;
689 char errbuf[1024];
690 nvlist_t *nvl = NULL;
691 nvlist_t *realprops;
692 uint64_t version;
693 prop_flags_t flags = { 0 };
694
695 (void) snprintf(errbuf, sizeof (errbuf),
696 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
697 zhp->zpool_name);
698
699 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
700 return (no_memory(zhp->zpool_hdl));
701
702 if (nvlist_add_string(nvl, propname, propval) != 0) {
703 nvlist_free(nvl);
704 return (no_memory(zhp->zpool_hdl));
705 }
706
707 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
708 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
709 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
710 nvlist_free(nvl);
711 return (-1);
712 }
713
714 nvlist_free(nvl);
715 nvl = realprops;
716
717 /*
718 * Execute the corresponding ioctl() to set this property.
719 */
720 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
721
722 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
723 nvlist_free(nvl);
724 return (-1);
725 }
726
727 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
728
729 zcmd_free_nvlists(&zc);
730 nvlist_free(nvl);
731
732 if (ret)
733 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
734 else
735 (void) zpool_props_refresh(zhp);
736
737 return (ret);
738 }
739
740 int
zpool_expand_proplist(zpool_handle_t * zhp,zprop_list_t ** plp)741 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
742 {
743 libzfs_handle_t *hdl = zhp->zpool_hdl;
744 zprop_list_t *entry;
745 char buf[ZFS_MAXPROPLEN];
746 nvlist_t *features = NULL;
747 zprop_list_t **last;
748 boolean_t firstexpand = (NULL == *plp);
749
750 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
751 return (-1);
752
753 last = plp;
754 while (*last != NULL)
755 last = &(*last)->pl_next;
756
757 if ((*plp)->pl_all)
758 features = zpool_get_features(zhp);
759
760 if ((*plp)->pl_all && firstexpand) {
761 for (int i = 0; i < SPA_FEATURES; i++) {
762 zprop_list_t *entry = zfs_alloc(hdl,
763 sizeof (zprop_list_t));
764 entry->pl_prop = ZPROP_INVAL;
765 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
766 spa_feature_table[i].fi_uname);
767 entry->pl_width = strlen(entry->pl_user_prop);
768 entry->pl_all = B_TRUE;
769
770 *last = entry;
771 last = &entry->pl_next;
772 }
773 }
774
775 /* add any unsupported features */
776 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
777 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
778 char *propname;
779 boolean_t found;
780 zprop_list_t *entry;
781
782 if (zfeature_is_supported(nvpair_name(nvp)))
783 continue;
784
785 propname = zfs_asprintf(hdl, "unsupported@%s",
786 nvpair_name(nvp));
787
788 /*
789 * Before adding the property to the list make sure that no
790 * other pool already added the same property.
791 */
792 found = B_FALSE;
793 entry = *plp;
794 while (entry != NULL) {
795 if (entry->pl_user_prop != NULL &&
796 strcmp(propname, entry->pl_user_prop) == 0) {
797 found = B_TRUE;
798 break;
799 }
800 entry = entry->pl_next;
801 }
802 if (found) {
803 free(propname);
804 continue;
805 }
806
807 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
808 entry->pl_prop = ZPROP_INVAL;
809 entry->pl_user_prop = propname;
810 entry->pl_width = strlen(entry->pl_user_prop);
811 entry->pl_all = B_TRUE;
812
813 *last = entry;
814 last = &entry->pl_next;
815 }
816
817 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
818
819 if (entry->pl_fixed)
820 continue;
821
822 if (entry->pl_prop != ZPROP_INVAL &&
823 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
824 NULL, B_FALSE) == 0) {
825 if (strlen(buf) > entry->pl_width)
826 entry->pl_width = strlen(buf);
827 }
828 }
829
830 return (0);
831 }
832
833 /*
834 * Get the state for the given feature on the given ZFS pool.
835 */
836 int
zpool_prop_get_feature(zpool_handle_t * zhp,const char * propname,char * buf,size_t len)837 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
838 size_t len)
839 {
840 uint64_t refcount;
841 boolean_t found = B_FALSE;
842 nvlist_t *features = zpool_get_features(zhp);
843 boolean_t supported;
844 const char *feature = strchr(propname, '@') + 1;
845
846 supported = zpool_prop_feature(propname);
847 ASSERT(supported || zpool_prop_unsupported(propname));
848
849 /*
850 * Convert from feature name to feature guid. This conversion is
851 * unecessary for unsupported@... properties because they already
852 * use guids.
853 */
854 if (supported) {
855 int ret;
856 spa_feature_t fid;
857
858 ret = zfeature_lookup_name(feature, &fid);
859 if (ret != 0) {
860 (void) strlcpy(buf, "-", len);
861 return (ENOTSUP);
862 }
863 feature = spa_feature_table[fid].fi_guid;
864 }
865
866 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
867 found = B_TRUE;
868
869 if (supported) {
870 if (!found) {
871 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
872 } else {
873 if (refcount == 0)
874 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
875 else
876 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
877 }
878 } else {
879 if (found) {
880 if (refcount == 0) {
881 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
882 } else {
883 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
884 }
885 } else {
886 (void) strlcpy(buf, "-", len);
887 return (ENOTSUP);
888 }
889 }
890
891 return (0);
892 }
893
894 /*
895 * Don't start the slice at the default block of 34; many storage
896 * devices will use a stripe width of 128k, so start there instead.
897 */
898 #define NEW_START_BLOCK 256
899
900 /*
901 * Validate the given pool name, optionally putting an extended error message in
902 * 'buf'.
903 */
904 boolean_t
zpool_name_valid(libzfs_handle_t * hdl,boolean_t isopen,const char * pool)905 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
906 {
907 namecheck_err_t why;
908 char what;
909 int ret;
910
911 ret = pool_namecheck(pool, &why, &what);
912
913 /*
914 * The rules for reserved pool names were extended at a later point.
915 * But we need to support users with existing pools that may now be
916 * invalid. So we only check for this expanded set of names during a
917 * create (or import), and only in userland.
918 */
919 if (ret == 0 && !isopen &&
920 (strncmp(pool, "mirror", 6) == 0 ||
921 strncmp(pool, "raidz", 5) == 0 ||
922 strncmp(pool, "spare", 5) == 0 ||
923 strcmp(pool, "log") == 0)) {
924 if (hdl != NULL)
925 zfs_error_aux(hdl,
926 dgettext(TEXT_DOMAIN, "name is reserved"));
927 return (B_FALSE);
928 }
929
930
931 if (ret != 0) {
932 if (hdl != NULL) {
933 switch (why) {
934 case NAME_ERR_TOOLONG:
935 zfs_error_aux(hdl,
936 dgettext(TEXT_DOMAIN, "name is too long"));
937 break;
938
939 case NAME_ERR_INVALCHAR:
940 zfs_error_aux(hdl,
941 dgettext(TEXT_DOMAIN, "invalid character "
942 "'%c' in pool name"), what);
943 break;
944
945 case NAME_ERR_NOLETTER:
946 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
947 "name must begin with a letter"));
948 break;
949
950 case NAME_ERR_RESERVED:
951 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
952 "name is reserved"));
953 break;
954
955 case NAME_ERR_DISKLIKE:
956 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
957 "pool name is reserved"));
958 break;
959
960 case NAME_ERR_LEADING_SLASH:
961 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
962 "leading slash in name"));
963 break;
964
965 case NAME_ERR_EMPTY_COMPONENT:
966 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
967 "empty component in name"));
968 break;
969
970 case NAME_ERR_TRAILING_SLASH:
971 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
972 "trailing slash in name"));
973 break;
974
975 case NAME_ERR_MULTIPLE_DELIMITERS:
976 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
977 "multiple '@' and/or '#' delimiters in "
978 "name"));
979 break;
980
981 default:
982 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
983 "(%d) not defined"), why);
984 break;
985 }
986 }
987 return (B_FALSE);
988 }
989
990 return (B_TRUE);
991 }
992
993 /*
994 * Open a handle to the given pool, even if the pool is currently in the FAULTED
995 * state.
996 */
997 zpool_handle_t *
zpool_open_canfail(libzfs_handle_t * hdl,const char * pool)998 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
999 {
1000 zpool_handle_t *zhp;
1001 boolean_t missing;
1002
1003 /*
1004 * Make sure the pool name is valid.
1005 */
1006 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1007 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1008 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1009 pool);
1010 return (NULL);
1011 }
1012
1013 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1014 return (NULL);
1015
1016 zhp->zpool_hdl = hdl;
1017 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1018
1019 if (zpool_refresh_stats(zhp, &missing) != 0) {
1020 zpool_close(zhp);
1021 return (NULL);
1022 }
1023
1024 if (missing) {
1025 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1026 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1027 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1028 zpool_close(zhp);
1029 return (NULL);
1030 }
1031
1032 return (zhp);
1033 }
1034
1035 /*
1036 * Like the above, but silent on error. Used when iterating over pools (because
1037 * the configuration cache may be out of date).
1038 */
1039 int
zpool_open_silent(libzfs_handle_t * hdl,const char * pool,zpool_handle_t ** ret)1040 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1041 {
1042 zpool_handle_t *zhp;
1043 boolean_t missing;
1044
1045 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1046 return (-1);
1047
1048 zhp->zpool_hdl = hdl;
1049 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1050
1051 if (zpool_refresh_stats(zhp, &missing) != 0) {
1052 zpool_close(zhp);
1053 return (-1);
1054 }
1055
1056 if (missing) {
1057 zpool_close(zhp);
1058 *ret = NULL;
1059 return (0);
1060 }
1061
1062 *ret = zhp;
1063 return (0);
1064 }
1065
1066 /*
1067 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1068 * state.
1069 */
1070 zpool_handle_t *
zpool_open(libzfs_handle_t * hdl,const char * pool)1071 zpool_open(libzfs_handle_t *hdl, const char *pool)
1072 {
1073 zpool_handle_t *zhp;
1074
1075 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1076 return (NULL);
1077
1078 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1079 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1080 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1081 zpool_close(zhp);
1082 return (NULL);
1083 }
1084
1085 return (zhp);
1086 }
1087
1088 /*
1089 * Close the handle. Simply frees the memory associated with the handle.
1090 */
1091 void
zpool_close(zpool_handle_t * zhp)1092 zpool_close(zpool_handle_t *zhp)
1093 {
1094 nvlist_free(zhp->zpool_config);
1095 nvlist_free(zhp->zpool_old_config);
1096 nvlist_free(zhp->zpool_props);
1097 free(zhp);
1098 }
1099
1100 /*
1101 * Return the name of the pool.
1102 */
1103 const char *
zpool_get_name(zpool_handle_t * zhp)1104 zpool_get_name(zpool_handle_t *zhp)
1105 {
1106 return (zhp->zpool_name);
1107 }
1108
1109
1110 /*
1111 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1112 */
1113 int
zpool_get_state(zpool_handle_t * zhp)1114 zpool_get_state(zpool_handle_t *zhp)
1115 {
1116 return (zhp->zpool_state);
1117 }
1118
1119 /*
1120 * Create the named pool, using the provided vdev list. It is assumed
1121 * that the consumer has already validated the contents of the nvlist, so we
1122 * don't have to worry about error semantics.
1123 */
1124 int
zpool_create(libzfs_handle_t * hdl,const char * pool,nvlist_t * nvroot,nvlist_t * props,nvlist_t * fsprops)1125 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1126 nvlist_t *props, nvlist_t *fsprops)
1127 {
1128 zfs_cmd_t zc = { 0 };
1129 nvlist_t *zc_fsprops = NULL;
1130 nvlist_t *zc_props = NULL;
1131 char msg[1024];
1132 int ret = -1;
1133
1134 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1135 "cannot create '%s'"), pool);
1136
1137 if (!zpool_name_valid(hdl, B_FALSE, pool))
1138 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1139
1140 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1141 return (-1);
1142
1143 if (props) {
1144 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1145
1146 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1147 SPA_VERSION_1, flags, msg)) == NULL) {
1148 goto create_failed;
1149 }
1150 }
1151
1152 if (fsprops) {
1153 uint64_t zoned;
1154 char *zonestr;
1155
1156 zoned = ((nvlist_lookup_string(fsprops,
1157 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1158 strcmp(zonestr, "on") == 0);
1159
1160 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1161 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1162 goto create_failed;
1163 }
1164 if (!zc_props &&
1165 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1166 goto create_failed;
1167 }
1168 if (nvlist_add_nvlist(zc_props,
1169 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1170 goto create_failed;
1171 }
1172 }
1173
1174 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1175 goto create_failed;
1176
1177 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1178
1179 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1180
1181 zcmd_free_nvlists(&zc);
1182 nvlist_free(zc_props);
1183 nvlist_free(zc_fsprops);
1184
1185 switch (errno) {
1186 case EBUSY:
1187 /*
1188 * This can happen if the user has specified the same
1189 * device multiple times. We can't reliably detect this
1190 * until we try to add it and see we already have a
1191 * label.
1192 */
1193 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1194 "one or more vdevs refer to the same device"));
1195 return (zfs_error(hdl, EZFS_BADDEV, msg));
1196
1197 case ERANGE:
1198 /*
1199 * This happens if the record size is smaller or larger
1200 * than the allowed size range, or not a power of 2.
1201 *
1202 * NOTE: although zfs_valid_proplist is called earlier,
1203 * this case may have slipped through since the
1204 * pool does not exist yet and it is therefore
1205 * impossible to read properties e.g. max blocksize
1206 * from the pool.
1207 */
1208 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1209 "record size invalid"));
1210 return (zfs_error(hdl, EZFS_BADPROP, msg));
1211
1212 case EOVERFLOW:
1213 /*
1214 * This occurs when one of the devices is below
1215 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1216 * device was the problem device since there's no
1217 * reliable way to determine device size from userland.
1218 */
1219 {
1220 char buf[64];
1221
1222 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1223
1224 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1225 "one or more devices is less than the "
1226 "minimum size (%s)"), buf);
1227 }
1228 return (zfs_error(hdl, EZFS_BADDEV, msg));
1229
1230 case ENOSPC:
1231 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1232 "one or more devices is out of space"));
1233 return (zfs_error(hdl, EZFS_BADDEV, msg));
1234
1235 case ENOTBLK:
1236 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1237 "cache device must be a disk or disk slice"));
1238 return (zfs_error(hdl, EZFS_BADDEV, msg));
1239
1240 default:
1241 return (zpool_standard_error(hdl, errno, msg));
1242 }
1243 }
1244
1245 create_failed:
1246 zcmd_free_nvlists(&zc);
1247 nvlist_free(zc_props);
1248 nvlist_free(zc_fsprops);
1249 return (ret);
1250 }
1251
1252 /*
1253 * Destroy the given pool. It is up to the caller to ensure that there are no
1254 * datasets left in the pool.
1255 */
1256 int
zpool_destroy(zpool_handle_t * zhp,const char * log_str)1257 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1258 {
1259 zfs_cmd_t zc = { 0 };
1260 zfs_handle_t *zfp = NULL;
1261 libzfs_handle_t *hdl = zhp->zpool_hdl;
1262 char msg[1024];
1263
1264 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1265 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1266 return (-1);
1267
1268 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1269 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1270
1271 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1272 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1273 "cannot destroy '%s'"), zhp->zpool_name);
1274
1275 if (errno == EROFS) {
1276 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1277 "one or more devices is read only"));
1278 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1279 } else {
1280 (void) zpool_standard_error(hdl, errno, msg);
1281 }
1282
1283 if (zfp)
1284 zfs_close(zfp);
1285 return (-1);
1286 }
1287
1288 if (zfp) {
1289 remove_mountpoint(zfp);
1290 zfs_close(zfp);
1291 }
1292
1293 return (0);
1294 }
1295
1296 /*
1297 * Create a checkpoint in the given pool.
1298 */
1299 int
zpool_checkpoint(zpool_handle_t * zhp)1300 zpool_checkpoint(zpool_handle_t *zhp)
1301 {
1302 libzfs_handle_t *hdl = zhp->zpool_hdl;
1303 char msg[1024];
1304 int error;
1305
1306 error = lzc_pool_checkpoint(zhp->zpool_name);
1307 if (error != 0) {
1308 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1309 "cannot checkpoint '%s'"), zhp->zpool_name);
1310 (void) zpool_standard_error(hdl, error, msg);
1311 return (-1);
1312 }
1313
1314 return (0);
1315 }
1316
1317 /*
1318 * Discard the checkpoint from the given pool.
1319 */
1320 int
zpool_discard_checkpoint(zpool_handle_t * zhp)1321 zpool_discard_checkpoint(zpool_handle_t *zhp)
1322 {
1323 libzfs_handle_t *hdl = zhp->zpool_hdl;
1324 char msg[1024];
1325 int error;
1326
1327 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1328 if (error != 0) {
1329 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1330 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1331 (void) zpool_standard_error(hdl, error, msg);
1332 return (-1);
1333 }
1334
1335 return (0);
1336 }
1337
1338 /*
1339 * Add the given vdevs to the pool. The caller must have already performed the
1340 * necessary verification to ensure that the vdev specification is well-formed.
1341 */
1342 int
zpool_add(zpool_handle_t * zhp,nvlist_t * nvroot)1343 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1344 {
1345 zfs_cmd_t zc = { 0 };
1346 int ret;
1347 libzfs_handle_t *hdl = zhp->zpool_hdl;
1348 char msg[1024];
1349 nvlist_t **spares, **l2cache;
1350 uint_t nspares, nl2cache;
1351
1352 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1353 "cannot add to '%s'"), zhp->zpool_name);
1354
1355 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1356 SPA_VERSION_SPARES &&
1357 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1358 &spares, &nspares) == 0) {
1359 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1360 "upgraded to add hot spares"));
1361 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1362 }
1363
1364 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1365 SPA_VERSION_L2CACHE &&
1366 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1367 &l2cache, &nl2cache) == 0) {
1368 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1369 "upgraded to add cache devices"));
1370 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1371 }
1372
1373 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1374 return (-1);
1375 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1376
1377 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1378 switch (errno) {
1379 case EBUSY:
1380 /*
1381 * This can happen if the user has specified the same
1382 * device multiple times. We can't reliably detect this
1383 * until we try to add it and see we already have a
1384 * label.
1385 */
1386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1387 "one or more vdevs refer to the same device"));
1388 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1389 break;
1390
1391 case EINVAL:
1392 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1393 "invalid config; a pool with removing/removed "
1394 "vdevs does not support adding raidz vdevs"));
1395 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1396 break;
1397
1398 case EOVERFLOW:
1399 /*
1400 * This occurrs when one of the devices is below
1401 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1402 * device was the problem device since there's no
1403 * reliable way to determine device size from userland.
1404 */
1405 {
1406 char buf[64];
1407
1408 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1409
1410 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1411 "device is less than the minimum "
1412 "size (%s)"), buf);
1413 }
1414 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1415 break;
1416
1417 case ENOTSUP:
1418 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1419 "pool must be upgraded to add these vdevs"));
1420 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1421 break;
1422
1423 case EDOM:
1424 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1425 "root pool can not have multiple vdevs"
1426 " or separate logs"));
1427 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1428 break;
1429
1430 case ENOTBLK:
1431 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1432 "cache device must be a disk or disk slice"));
1433 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1434 break;
1435
1436 default:
1437 (void) zpool_standard_error(hdl, errno, msg);
1438 }
1439
1440 ret = -1;
1441 } else {
1442 ret = 0;
1443 }
1444
1445 zcmd_free_nvlists(&zc);
1446
1447 return (ret);
1448 }
1449
1450 /*
1451 * Exports the pool from the system. The caller must ensure that there are no
1452 * mounted datasets in the pool.
1453 */
1454 static int
zpool_export_common(zpool_handle_t * zhp,boolean_t force,boolean_t hardforce,const char * log_str)1455 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1456 const char *log_str)
1457 {
1458 zfs_cmd_t zc = { 0 };
1459 char msg[1024];
1460
1461 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1462 "cannot export '%s'"), zhp->zpool_name);
1463
1464 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1465 zc.zc_cookie = force;
1466 zc.zc_guid = hardforce;
1467 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1468
1469 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1470 switch (errno) {
1471 case EXDEV:
1472 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1473 "use '-f' to override the following errors:\n"
1474 "'%s' has an active shared spare which could be"
1475 " used by other pools once '%s' is exported."),
1476 zhp->zpool_name, zhp->zpool_name);
1477 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1478 msg));
1479 default:
1480 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1481 msg));
1482 }
1483 }
1484
1485 return (0);
1486 }
1487
1488 int
zpool_export(zpool_handle_t * zhp,boolean_t force,const char * log_str)1489 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1490 {
1491 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1492 }
1493
1494 int
zpool_export_force(zpool_handle_t * zhp,const char * log_str)1495 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1496 {
1497 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1498 }
1499
1500 static void
zpool_rewind_exclaim(libzfs_handle_t * hdl,const char * name,boolean_t dryrun,nvlist_t * config)1501 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1502 nvlist_t *config)
1503 {
1504 nvlist_t *nv = NULL;
1505 uint64_t rewindto;
1506 int64_t loss = -1;
1507 struct tm t;
1508 char timestr[128];
1509
1510 if (!hdl->libzfs_printerr || config == NULL)
1511 return;
1512
1513 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1514 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1515 return;
1516 }
1517
1518 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1519 return;
1520 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1521
1522 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1523 strftime(timestr, 128, 0, &t) != 0) {
1524 if (dryrun) {
1525 (void) printf(dgettext(TEXT_DOMAIN,
1526 "Would be able to return %s "
1527 "to its state as of %s.\n"),
1528 name, timestr);
1529 } else {
1530 (void) printf(dgettext(TEXT_DOMAIN,
1531 "Pool %s returned to its state as of %s.\n"),
1532 name, timestr);
1533 }
1534 if (loss > 120) {
1535 (void) printf(dgettext(TEXT_DOMAIN,
1536 "%s approximately %lld "),
1537 dryrun ? "Would discard" : "Discarded",
1538 (loss + 30) / 60);
1539 (void) printf(dgettext(TEXT_DOMAIN,
1540 "minutes of transactions.\n"));
1541 } else if (loss > 0) {
1542 (void) printf(dgettext(TEXT_DOMAIN,
1543 "%s approximately %lld "),
1544 dryrun ? "Would discard" : "Discarded", loss);
1545 (void) printf(dgettext(TEXT_DOMAIN,
1546 "seconds of transactions.\n"));
1547 }
1548 }
1549 }
1550
1551 void
zpool_explain_recover(libzfs_handle_t * hdl,const char * name,int reason,nvlist_t * config)1552 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1553 nvlist_t *config)
1554 {
1555 nvlist_t *nv = NULL;
1556 int64_t loss = -1;
1557 uint64_t edata = UINT64_MAX;
1558 uint64_t rewindto;
1559 struct tm t;
1560 char timestr[128];
1561
1562 if (!hdl->libzfs_printerr)
1563 return;
1564
1565 if (reason >= 0)
1566 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1567 else
1568 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1569
1570 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1571 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1572 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1573 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1574 goto no_info;
1575
1576 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1577 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1578 &edata);
1579
1580 (void) printf(dgettext(TEXT_DOMAIN,
1581 "Recovery is possible, but will result in some data loss.\n"));
1582
1583 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1584 strftime(timestr, 128, 0, &t) != 0) {
1585 (void) printf(dgettext(TEXT_DOMAIN,
1586 "\tReturning the pool to its state as of %s\n"
1587 "\tshould correct the problem. "),
1588 timestr);
1589 } else {
1590 (void) printf(dgettext(TEXT_DOMAIN,
1591 "\tReverting the pool to an earlier state "
1592 "should correct the problem.\n\t"));
1593 }
1594
1595 if (loss > 120) {
1596 (void) printf(dgettext(TEXT_DOMAIN,
1597 "Approximately %lld minutes of data\n"
1598 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1599 } else if (loss > 0) {
1600 (void) printf(dgettext(TEXT_DOMAIN,
1601 "Approximately %lld seconds of data\n"
1602 "\tmust be discarded, irreversibly. "), loss);
1603 }
1604 if (edata != 0 && edata != UINT64_MAX) {
1605 if (edata == 1) {
1606 (void) printf(dgettext(TEXT_DOMAIN,
1607 "After rewind, at least\n"
1608 "\tone persistent user-data error will remain. "));
1609 } else {
1610 (void) printf(dgettext(TEXT_DOMAIN,
1611 "After rewind, several\n"
1612 "\tpersistent user-data errors will remain. "));
1613 }
1614 }
1615 (void) printf(dgettext(TEXT_DOMAIN,
1616 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1617 reason >= 0 ? "clear" : "import", name);
1618
1619 (void) printf(dgettext(TEXT_DOMAIN,
1620 "A scrub of the pool\n"
1621 "\tis strongly recommended after recovery.\n"));
1622 return;
1623
1624 no_info:
1625 (void) printf(dgettext(TEXT_DOMAIN,
1626 "Destroy and re-create the pool from\n\ta backup source.\n"));
1627 }
1628
1629 /*
1630 * zpool_import() is a contracted interface. Should be kept the same
1631 * if possible.
1632 *
1633 * Applications should use zpool_import_props() to import a pool with
1634 * new properties value to be set.
1635 */
1636 int
zpool_import(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,char * altroot)1637 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1638 char *altroot)
1639 {
1640 nvlist_t *props = NULL;
1641 int ret;
1642
1643 if (altroot != NULL) {
1644 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1645 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1646 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1647 newname));
1648 }
1649
1650 if (nvlist_add_string(props,
1651 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1652 nvlist_add_string(props,
1653 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1654 nvlist_free(props);
1655 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1656 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1657 newname));
1658 }
1659 }
1660
1661 ret = zpool_import_props(hdl, config, newname, props,
1662 ZFS_IMPORT_NORMAL);
1663 nvlist_free(props);
1664 return (ret);
1665 }
1666
1667 static void
print_vdev_tree(libzfs_handle_t * hdl,const char * name,nvlist_t * nv,int indent)1668 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1669 int indent)
1670 {
1671 nvlist_t **child;
1672 uint_t c, children;
1673 char *vname;
1674 uint64_t is_log = 0;
1675
1676 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1677 &is_log);
1678
1679 if (name != NULL)
1680 (void) printf("\t%*s%s%s\n", indent, "", name,
1681 is_log ? " [log]" : "");
1682
1683 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1684 &child, &children) != 0)
1685 return;
1686
1687 for (c = 0; c < children; c++) {
1688 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1689 print_vdev_tree(hdl, vname, child[c], indent + 2);
1690 free(vname);
1691 }
1692 }
1693
1694 void
zpool_print_unsup_feat(nvlist_t * config)1695 zpool_print_unsup_feat(nvlist_t *config)
1696 {
1697 nvlist_t *nvinfo, *unsup_feat;
1698
1699 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1700 0);
1701 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1702 &unsup_feat) == 0);
1703
1704 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1705 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1706 char *desc;
1707
1708 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1709 verify(nvpair_value_string(nvp, &desc) == 0);
1710
1711 if (strlen(desc) > 0)
1712 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1713 else
1714 (void) printf("\t%s\n", nvpair_name(nvp));
1715 }
1716 }
1717
1718 /*
1719 * Import the given pool using the known configuration and a list of
1720 * properties to be set. The configuration should have come from
1721 * zpool_find_import(). The 'newname' parameters control whether the pool
1722 * is imported with a different name.
1723 */
1724 int
zpool_import_props(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,nvlist_t * props,int flags)1725 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1726 nvlist_t *props, int flags)
1727 {
1728 zfs_cmd_t zc = { 0 };
1729 zpool_load_policy_t policy;
1730 nvlist_t *nv = NULL;
1731 nvlist_t *nvinfo = NULL;
1732 nvlist_t *missing = NULL;
1733 char *thename;
1734 char *origname;
1735 int ret;
1736 int error = 0;
1737 char errbuf[1024];
1738
1739 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1740 &origname) == 0);
1741
1742 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1743 "cannot import pool '%s'"), origname);
1744
1745 if (newname != NULL) {
1746 if (!zpool_name_valid(hdl, B_FALSE, newname))
1747 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1748 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1749 newname));
1750 thename = (char *)newname;
1751 } else {
1752 thename = origname;
1753 }
1754
1755 if (props != NULL) {
1756 uint64_t version;
1757 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1758
1759 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1760 &version) == 0);
1761
1762 if ((props = zpool_valid_proplist(hdl, origname,
1763 props, version, flags, errbuf)) == NULL)
1764 return (-1);
1765 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1766 nvlist_free(props);
1767 return (-1);
1768 }
1769 nvlist_free(props);
1770 }
1771
1772 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1773
1774 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1775 &zc.zc_guid) == 0);
1776
1777 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1778 zcmd_free_nvlists(&zc);
1779 return (-1);
1780 }
1781 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1782 zcmd_free_nvlists(&zc);
1783 return (-1);
1784 }
1785
1786 zc.zc_cookie = flags;
1787 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1788 errno == ENOMEM) {
1789 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1790 zcmd_free_nvlists(&zc);
1791 return (-1);
1792 }
1793 }
1794 if (ret != 0)
1795 error = errno;
1796
1797 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1798
1799 zcmd_free_nvlists(&zc);
1800
1801 zpool_get_load_policy(config, &policy);
1802
1803 if (error) {
1804 char desc[1024];
1805
1806 /*
1807 * Dry-run failed, but we print out what success
1808 * looks like if we found a best txg
1809 */
1810 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
1811 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1812 B_TRUE, nv);
1813 nvlist_free(nv);
1814 return (-1);
1815 }
1816
1817 if (newname == NULL)
1818 (void) snprintf(desc, sizeof (desc),
1819 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1820 thename);
1821 else
1822 (void) snprintf(desc, sizeof (desc),
1823 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1824 origname, thename);
1825
1826 switch (error) {
1827 case ENOTSUP:
1828 if (nv != NULL && nvlist_lookup_nvlist(nv,
1829 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1830 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1831 (void) printf(dgettext(TEXT_DOMAIN, "This "
1832 "pool uses the following feature(s) not "
1833 "supported by this system:\n"));
1834 zpool_print_unsup_feat(nv);
1835 if (nvlist_exists(nvinfo,
1836 ZPOOL_CONFIG_CAN_RDONLY)) {
1837 (void) printf(dgettext(TEXT_DOMAIN,
1838 "All unsupported features are only "
1839 "required for writing to the pool."
1840 "\nThe pool can be imported using "
1841 "'-o readonly=on'.\n"));
1842 }
1843 }
1844 /*
1845 * Unsupported version.
1846 */
1847 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1848 break;
1849
1850 case EINVAL:
1851 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1852 break;
1853
1854 case EROFS:
1855 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1856 "one or more devices is read only"));
1857 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1858 break;
1859
1860 case ENXIO:
1861 if (nv && nvlist_lookup_nvlist(nv,
1862 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1863 nvlist_lookup_nvlist(nvinfo,
1864 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1865 (void) printf(dgettext(TEXT_DOMAIN,
1866 "The devices below are missing or "
1867 "corrupted, use '-m' to import the pool "
1868 "anyway:\n"));
1869 print_vdev_tree(hdl, NULL, missing, 2);
1870 (void) printf("\n");
1871 }
1872 (void) zpool_standard_error(hdl, error, desc);
1873 break;
1874
1875 case EEXIST:
1876 (void) zpool_standard_error(hdl, error, desc);
1877 break;
1878 case ENAMETOOLONG:
1879 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1880 "new name of at least one dataset is longer than "
1881 "the maximum allowable length"));
1882 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1883 break;
1884 default:
1885 (void) zpool_standard_error(hdl, error, desc);
1886 zpool_explain_recover(hdl,
1887 newname ? origname : thename, -error, nv);
1888 break;
1889 }
1890
1891 nvlist_free(nv);
1892 ret = -1;
1893 } else {
1894 zpool_handle_t *zhp;
1895
1896 /*
1897 * This should never fail, but play it safe anyway.
1898 */
1899 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1900 ret = -1;
1901 else if (zhp != NULL)
1902 zpool_close(zhp);
1903 if (policy.zlp_rewind &
1904 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1905 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1906 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
1907 }
1908 nvlist_free(nv);
1909 return (0);
1910 }
1911
1912 return (ret);
1913 }
1914
1915 /*
1916 * Scan the pool.
1917 */
1918 int
zpool_scan(zpool_handle_t * zhp,pool_scan_func_t func,pool_scrub_cmd_t cmd)1919 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
1920 {
1921 zfs_cmd_t zc = { 0 };
1922 char msg[1024];
1923 int err;
1924 libzfs_handle_t *hdl = zhp->zpool_hdl;
1925
1926 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1927 zc.zc_cookie = func;
1928 zc.zc_flags = cmd;
1929
1930 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
1931 return (0);
1932
1933 err = errno;
1934
1935 /* ECANCELED on a scrub means we resumed a paused scrub */
1936 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
1937 cmd == POOL_SCRUB_NORMAL)
1938 return (0);
1939
1940 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
1941 return (0);
1942
1943 if (func == POOL_SCAN_SCRUB) {
1944 if (cmd == POOL_SCRUB_PAUSE) {
1945 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1946 "cannot pause scrubbing %s"), zc.zc_name);
1947 } else {
1948 assert(cmd == POOL_SCRUB_NORMAL);
1949 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1950 "cannot scrub %s"), zc.zc_name);
1951 }
1952 } else if (func == POOL_SCAN_NONE) {
1953 (void) snprintf(msg, sizeof (msg),
1954 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1955 zc.zc_name);
1956 } else {
1957 assert(!"unexpected result");
1958 }
1959
1960 if (err == EBUSY) {
1961 nvlist_t *nvroot;
1962 pool_scan_stat_t *ps = NULL;
1963 uint_t psc;
1964
1965 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1966 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1967 (void) nvlist_lookup_uint64_array(nvroot,
1968 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1969 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
1970 if (cmd == POOL_SCRUB_PAUSE)
1971 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
1972 else
1973 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1974 } else {
1975 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1976 }
1977 } else if (err == ENOENT) {
1978 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1979 } else {
1980 return (zpool_standard_error(hdl, err, msg));
1981 }
1982 }
1983
1984 static int
xlate_init_err(int err)1985 xlate_init_err(int err)
1986 {
1987 switch (err) {
1988 case ENODEV:
1989 return (EZFS_NODEVICE);
1990 case EINVAL:
1991 case EROFS:
1992 return (EZFS_BADDEV);
1993 case EBUSY:
1994 return (EZFS_INITIALIZING);
1995 case ESRCH:
1996 return (EZFS_NO_INITIALIZE);
1997 }
1998 return (err);
1999 }
2000
2001 /*
2002 * Begin, suspend, or cancel the initialization (initializing of all free
2003 * blocks) for the given vdevs in the given pool.
2004 */
2005 int
zpool_initialize(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds)2006 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2007 nvlist_t *vds)
2008 {
2009 char msg[1024];
2010 libzfs_handle_t *hdl = zhp->zpool_hdl;
2011
2012 nvlist_t *errlist;
2013
2014 /* translate vdev names to guids */
2015 nvlist_t *vdev_guids = fnvlist_alloc();
2016 nvlist_t *guids_to_paths = fnvlist_alloc();
2017 boolean_t spare, cache;
2018 nvlist_t *tgt;
2019 nvpair_t *elem;
2020
2021 for (elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2022 elem = nvlist_next_nvpair(vds, elem)) {
2023 char *vd_path = nvpair_name(elem);
2024 tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache, NULL);
2025
2026 if ((tgt == NULL) || cache || spare) {
2027 (void) snprintf(msg, sizeof (msg),
2028 dgettext(TEXT_DOMAIN, "cannot initialize '%s'"),
2029 vd_path);
2030 int err = (tgt == NULL) ? EZFS_NODEVICE :
2031 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2032 fnvlist_free(vdev_guids);
2033 fnvlist_free(guids_to_paths);
2034 return (zfs_error(hdl, err, msg));
2035 }
2036
2037 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2038 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2039
2040 (void) snprintf(msg, sizeof (msg), "%llu", guid);
2041 fnvlist_add_string(guids_to_paths, msg, vd_path);
2042 }
2043
2044 int err = lzc_initialize(zhp->zpool_name, cmd_type, vdev_guids,
2045 &errlist);
2046 fnvlist_free(vdev_guids);
2047
2048 if (err == 0) {
2049 fnvlist_free(guids_to_paths);
2050 return (0);
2051 }
2052
2053 nvlist_t *vd_errlist = NULL;
2054 if (errlist != NULL) {
2055 vd_errlist = fnvlist_lookup_nvlist(errlist,
2056 ZPOOL_INITIALIZE_VDEVS);
2057 }
2058
2059 (void) snprintf(msg, sizeof (msg),
2060 dgettext(TEXT_DOMAIN, "operation failed"));
2061
2062 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2063 elem = nvlist_next_nvpair(vd_errlist, elem)) {
2064 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2065 char *path = fnvlist_lookup_string(guids_to_paths,
2066 nvpair_name(elem));
2067 (void) zfs_error_fmt(hdl, vd_error, "cannot initialize '%s'",
2068 path);
2069 }
2070
2071 fnvlist_free(guids_to_paths);
2072 if (vd_errlist != NULL)
2073 return (-1);
2074
2075 return (zpool_standard_error(hdl, err, msg));
2076 }
2077
2078 #ifdef illumos
2079 /*
2080 * This provides a very minimal check whether a given string is likely a
2081 * c#t#d# style string. Users of this are expected to do their own
2082 * verification of the s# part.
2083 */
2084 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
2085
2086 /*
2087 * More elaborate version for ones which may start with "/dev/dsk/"
2088 * and the like.
2089 */
2090 static int
ctd_check_path(char * str)2091 ctd_check_path(char *str)
2092 {
2093 /*
2094 * If it starts with a slash, check the last component.
2095 */
2096 if (str && str[0] == '/') {
2097 char *tmp = strrchr(str, '/');
2098
2099 /*
2100 * If it ends in "/old", check the second-to-last
2101 * component of the string instead.
2102 */
2103 if (tmp != str && strcmp(tmp, "/old") == 0) {
2104 for (tmp--; *tmp != '/'; tmp--)
2105 ;
2106 }
2107 str = tmp + 1;
2108 }
2109 return (CTD_CHECK(str));
2110 }
2111 #endif
2112
2113 /*
2114 * Find a vdev that matches the search criteria specified. We use the
2115 * the nvpair name to determine how we should look for the device.
2116 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2117 * spare; but FALSE if its an INUSE spare.
2118 */
2119 static nvlist_t *
vdev_to_nvlist_iter(nvlist_t * nv,nvlist_t * search,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)2120 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2121 boolean_t *l2cache, boolean_t *log)
2122 {
2123 uint_t c, children;
2124 nvlist_t **child;
2125 nvlist_t *ret;
2126 uint64_t is_log;
2127 char *srchkey;
2128 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2129
2130 /* Nothing to look for */
2131 if (search == NULL || pair == NULL)
2132 return (NULL);
2133
2134 /* Obtain the key we will use to search */
2135 srchkey = nvpair_name(pair);
2136
2137 switch (nvpair_type(pair)) {
2138 case DATA_TYPE_UINT64:
2139 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2140 uint64_t srchval, theguid;
2141
2142 verify(nvpair_value_uint64(pair, &srchval) == 0);
2143 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2144 &theguid) == 0);
2145 if (theguid == srchval)
2146 return (nv);
2147 }
2148 break;
2149
2150 case DATA_TYPE_STRING: {
2151 char *srchval, *val;
2152
2153 verify(nvpair_value_string(pair, &srchval) == 0);
2154 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2155 break;
2156
2157 /*
2158 * Search for the requested value. Special cases:
2159 *
2160 * - ZPOOL_CONFIG_PATH for whole disk entries. To support
2161 * UEFI boot, these end in "s0" or "s0/old" or "s1" or
2162 * "s1/old". The "s0" or "s1" part is hidden from the user,
2163 * but included in the string, so this matches around it.
2164 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2165 *
2166 * Otherwise, all other searches are simple string compares.
2167 */
2168 #ifdef illumos
2169 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
2170 ctd_check_path(val)) {
2171 uint64_t wholedisk = 0;
2172
2173 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2174 &wholedisk);
2175 if (wholedisk) {
2176 int slen = strlen(srchval);
2177 int vlen = strlen(val);
2178
2179 if (slen != vlen - 2)
2180 break;
2181
2182 /*
2183 * make_leaf_vdev() should only set
2184 * wholedisk for ZPOOL_CONFIG_PATHs which
2185 * will include "/dev/dsk/", giving plenty of
2186 * room for the indices used next.
2187 */
2188 ASSERT(vlen >= 6);
2189
2190 /*
2191 * strings identical except trailing "s0"
2192 */
2193 if ((strcmp(&val[vlen - 2], "s0") == 0 ||
2194 strcmp(&val[vlen - 2], "s1") == 0) &&
2195 strncmp(srchval, val, slen) == 0)
2196 return (nv);
2197
2198 /*
2199 * strings identical except trailing "s0/old"
2200 */
2201 if ((strcmp(&val[vlen - 6], "s0/old") == 0 ||
2202 strcmp(&val[vlen - 6], "s1/old") == 0) &&
2203 strcmp(&srchval[slen - 4], "/old") == 0 &&
2204 strncmp(srchval, val, slen - 4) == 0)
2205 return (nv);
2206
2207 break;
2208 }
2209 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2210 #else
2211 if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2212 #endif
2213 char *type, *idx, *end, *p;
2214 uint64_t id, vdev_id;
2215
2216 /*
2217 * Determine our vdev type, keeping in mind
2218 * that the srchval is composed of a type and
2219 * vdev id pair (i.e. mirror-4).
2220 */
2221 if ((type = strdup(srchval)) == NULL)
2222 return (NULL);
2223
2224 if ((p = strrchr(type, '-')) == NULL) {
2225 free(type);
2226 break;
2227 }
2228 idx = p + 1;
2229 *p = '\0';
2230
2231 /*
2232 * If the types don't match then keep looking.
2233 */
2234 if (strncmp(val, type, strlen(val)) != 0) {
2235 free(type);
2236 break;
2237 }
2238
2239 verify(zpool_vdev_is_interior(type));
2240 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2241 &id) == 0);
2242
2243 errno = 0;
2244 vdev_id = strtoull(idx, &end, 10);
2245
2246 free(type);
2247 if (errno != 0)
2248 return (NULL);
2249
2250 /*
2251 * Now verify that we have the correct vdev id.
2252 */
2253 if (vdev_id == id)
2254 return (nv);
2255 }
2256
2257 /*
2258 * Common case
2259 */
2260 if (strcmp(srchval, val) == 0)
2261 return (nv);
2262 break;
2263 }
2264
2265 default:
2266 break;
2267 }
2268
2269 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2270 &child, &children) != 0)
2271 return (NULL);
2272
2273 for (c = 0; c < children; c++) {
2274 if ((ret = vdev_to_nvlist_iter(child[c], search,
2275 avail_spare, l2cache, NULL)) != NULL) {
2276 /*
2277 * The 'is_log' value is only set for the toplevel
2278 * vdev, not the leaf vdevs. So we always lookup the
2279 * log device from the root of the vdev tree (where
2280 * 'log' is non-NULL).
2281 */
2282 if (log != NULL &&
2283 nvlist_lookup_uint64(child[c],
2284 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2285 is_log) {
2286 *log = B_TRUE;
2287 }
2288 return (ret);
2289 }
2290 }
2291
2292 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2293 &child, &children) == 0) {
2294 for (c = 0; c < children; c++) {
2295 if ((ret = vdev_to_nvlist_iter(child[c], search,
2296 avail_spare, l2cache, NULL)) != NULL) {
2297 *avail_spare = B_TRUE;
2298 return (ret);
2299 }
2300 }
2301 }
2302
2303 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2304 &child, &children) == 0) {
2305 for (c = 0; c < children; c++) {
2306 if ((ret = vdev_to_nvlist_iter(child[c], search,
2307 avail_spare, l2cache, NULL)) != NULL) {
2308 *l2cache = B_TRUE;
2309 return (ret);
2310 }
2311 }
2312 }
2313
2314 return (NULL);
2315 }
2316
2317 /*
2318 * Given a physical path (minus the "/devices" prefix), find the
2319 * associated vdev.
2320 */
2321 nvlist_t *
2322 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2323 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2324 {
2325 nvlist_t *search, *nvroot, *ret;
2326
2327 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2328 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2329
2330 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2331 &nvroot) == 0);
2332
2333 *avail_spare = B_FALSE;
2334 *l2cache = B_FALSE;
2335 if (log != NULL)
2336 *log = B_FALSE;
2337 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2338 nvlist_free(search);
2339
2340 return (ret);
2341 }
2342
2343 /*
2344 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2345 */
2346 static boolean_t
2347 zpool_vdev_is_interior(const char *name)
2348 {
2349 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2350 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2351 strncmp(name,
2352 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2353 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2354 return (B_TRUE);
2355 return (B_FALSE);
2356 }
2357
2358 nvlist_t *
2359 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2360 boolean_t *l2cache, boolean_t *log)
2361 {
2362 char buf[MAXPATHLEN];
2363 char *end;
2364 nvlist_t *nvroot, *search, *ret;
2365 uint64_t guid;
2366
2367 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2368
2369 guid = strtoull(path, &end, 10);
2370 if (guid != 0 && *end == '\0') {
2371 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2372 } else if (zpool_vdev_is_interior(path)) {
2373 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2374 } else if (path[0] != '/') {
2375 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
2376 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2377 } else {
2378 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2379 }
2380
2381 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2382 &nvroot) == 0);
2383
2384 *avail_spare = B_FALSE;
2385 *l2cache = B_FALSE;
2386 if (log != NULL)
2387 *log = B_FALSE;
2388 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2389 nvlist_free(search);
2390
2391 return (ret);
2392 }
2393
2394 static int
2395 vdev_online(nvlist_t *nv)
2396 {
2397 uint64_t ival;
2398
2399 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2400 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2401 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2402 return (0);
2403
2404 return (1);
2405 }
2406
2407 /*
2408 * Helper function for zpool_get_physpaths().
2409 */
2410 static int
2411 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2412 size_t *bytes_written)
2413 {
2414 size_t bytes_left, pos, rsz;
2415 char *tmppath;
2416 const char *format;
2417
2418 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2419 &tmppath) != 0)
2420 return (EZFS_NODEVICE);
2421
2422 pos = *bytes_written;
2423 bytes_left = physpath_size - pos;
2424 format = (pos == 0) ? "%s" : " %s";
2425
2426 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2427 *bytes_written += rsz;
2428
2429 if (rsz >= bytes_left) {
2430 /* if physpath was not copied properly, clear it */
2431 if (bytes_left != 0) {
2432 physpath[pos] = 0;
2433 }
2434 return (EZFS_NOSPC);
2435 }
2436 return (0);
2437 }
2438
2439 static int
2440 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2441 size_t *rsz, boolean_t is_spare)
2442 {
2443 char *type;
2444 int ret;
2445
2446 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2447 return (EZFS_INVALCONFIG);
2448
2449 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2450 /*
2451 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2452 * For a spare vdev, we only want to boot from the active
2453 * spare device.
2454 */
2455 if (is_spare) {
2456 uint64_t spare = 0;
2457 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2458 &spare);
2459 if (!spare)
2460 return (EZFS_INVALCONFIG);
2461 }
2462
2463 if (vdev_online(nv)) {
2464 if ((ret = vdev_get_one_physpath(nv, physpath,
2465 phypath_size, rsz)) != 0)
2466 return (ret);
2467 }
2468 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2469 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2470 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2471 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2472 nvlist_t **child;
2473 uint_t count;
2474 int i, ret;
2475
2476 if (nvlist_lookup_nvlist_array(nv,
2477 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2478 return (EZFS_INVALCONFIG);
2479
2480 for (i = 0; i < count; i++) {
2481 ret = vdev_get_physpaths(child[i], physpath,
2482 phypath_size, rsz, is_spare);
2483 if (ret == EZFS_NOSPC)
2484 return (ret);
2485 }
2486 }
2487
2488 return (EZFS_POOL_INVALARG);
2489 }
2490
2491 /*
2492 * Get phys_path for a root pool config.
2493 * Return 0 on success; non-zero on failure.
2494 */
2495 static int
2496 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2497 {
2498 size_t rsz;
2499 nvlist_t *vdev_root;
2500 nvlist_t **child;
2501 uint_t count;
2502 char *type;
2503
2504 rsz = 0;
2505
2506 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2507 &vdev_root) != 0)
2508 return (EZFS_INVALCONFIG);
2509
2510 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2511 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2512 &child, &count) != 0)
2513 return (EZFS_INVALCONFIG);
2514
2515 /*
2516 * root pool can only have a single top-level vdev.
2517 */
2518 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2519 return (EZFS_POOL_INVALARG);
2520
2521 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2522 B_FALSE);
2523
2524 /* No online devices */
2525 if (rsz == 0)
2526 return (EZFS_NODEVICE);
2527
2528 return (0);
2529 }
2530
2531 /*
2532 * Get phys_path for a root pool
2533 * Return 0 on success; non-zero on failure.
2534 */
2535 int
2536 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2537 {
2538 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2539 phypath_size));
2540 }
2541
2542 /*
2543 * If the device has being dynamically expanded then we need to relabel
2544 * the disk to use the new unallocated space.
2545 */
2546 static int
2547 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2548 {
2549 #ifdef illumos
2550 char path[MAXPATHLEN];
2551 char errbuf[1024];
2552 int fd, error;
2553 int (*_efi_use_whole_disk)(int);
2554
2555 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2556 "efi_use_whole_disk")) == NULL)
2557 return (-1);
2558
2559 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name);
2560
2561 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2562 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2563 "relabel '%s': unable to open device"), name);
2564 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2565 }
2566
2567 /*
2568 * It's possible that we might encounter an error if the device
2569 * does not have any unallocated space left. If so, we simply
2570 * ignore that error and continue on.
2571 */
2572 error = _efi_use_whole_disk(fd);
2573 (void) close(fd);
2574 if (error && error != VT_ENOSPC) {
2575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2576 "relabel '%s': unable to read disk capacity"), name);
2577 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2578 }
2579 #endif /* illumos */
2580 return (0);
2581 }
2582
2583 /*
2584 * Bring the specified vdev online. The 'flags' parameter is a set of the
2585 * ZFS_ONLINE_* flags.
2586 */
2587 int
2588 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2589 vdev_state_t *newstate)
2590 {
2591 zfs_cmd_t zc = { 0 };
2592 char msg[1024];
2593 char *pathname;
2594 nvlist_t *tgt;
2595 boolean_t avail_spare, l2cache, islog;
2596 libzfs_handle_t *hdl = zhp->zpool_hdl;
2597
2598 if (flags & ZFS_ONLINE_EXPAND) {
2599 (void) snprintf(msg, sizeof (msg),
2600 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2601 } else {
2602 (void) snprintf(msg, sizeof (msg),
2603 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2604 }
2605
2606 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2607 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2608 &islog)) == NULL)
2609 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2610
2611 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2612
2613 if (avail_spare)
2614 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2615
2616 if ((flags & ZFS_ONLINE_EXPAND ||
2617 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2618 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
2619 uint64_t wholedisk = 0;
2620
2621 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2622 &wholedisk);
2623
2624 /*
2625 * XXX - L2ARC 1.0 devices can't support expansion.
2626 */
2627 if (l2cache) {
2628 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2629 "cannot expand cache devices"));
2630 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2631 }
2632
2633 if (wholedisk) {
2634 pathname += strlen(ZFS_DISK_ROOT) + 1;
2635 (void) zpool_relabel_disk(hdl, pathname);
2636 }
2637 }
2638
2639 zc.zc_cookie = VDEV_STATE_ONLINE;
2640 zc.zc_obj = flags;
2641
2642 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2643 if (errno == EINVAL) {
2644 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2645 "from this pool into a new one. Use '%s' "
2646 "instead"), "zpool detach");
2647 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2648 }
2649 return (zpool_standard_error(hdl, errno, msg));
2650 }
2651
2652 *newstate = zc.zc_cookie;
2653 return (0);
2654 }
2655
2656 /*
2657 * Take the specified vdev offline
2658 */
2659 int
2660 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2661 {
2662 zfs_cmd_t zc = { 0 };
2663 char msg[1024];
2664 nvlist_t *tgt;
2665 boolean_t avail_spare, l2cache;
2666 libzfs_handle_t *hdl = zhp->zpool_hdl;
2667
2668 (void) snprintf(msg, sizeof (msg),
2669 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2670
2671 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2672 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2673 NULL)) == NULL)
2674 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2675
2676 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2677
2678 if (avail_spare)
2679 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2680
2681 zc.zc_cookie = VDEV_STATE_OFFLINE;
2682 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2683
2684 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2685 return (0);
2686
2687 switch (errno) {
2688 case EBUSY:
2689
2690 /*
2691 * There are no other replicas of this device.
2692 */
2693 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2694
2695 case EEXIST:
2696 /*
2697 * The log device has unplayed logs
2698 */
2699 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2700
2701 default:
2702 return (zpool_standard_error(hdl, errno, msg));
2703 }
2704 }
2705
2706 /*
2707 * Mark the given vdev faulted.
2708 */
2709 int
2710 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2711 {
2712 zfs_cmd_t zc = { 0 };
2713 char msg[1024];
2714 libzfs_handle_t *hdl = zhp->zpool_hdl;
2715
2716 (void) snprintf(msg, sizeof (msg),
2717 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2718
2719 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2720 zc.zc_guid = guid;
2721 zc.zc_cookie = VDEV_STATE_FAULTED;
2722 zc.zc_obj = aux;
2723
2724 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2725 return (0);
2726
2727 switch (errno) {
2728 case EBUSY:
2729
2730 /*
2731 * There are no other replicas of this device.
2732 */
2733 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2734
2735 default:
2736 return (zpool_standard_error(hdl, errno, msg));
2737 }
2738
2739 }
2740
2741 /*
2742 * Mark the given vdev degraded.
2743 */
2744 int
2745 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2746 {
2747 zfs_cmd_t zc = { 0 };
2748 char msg[1024];
2749 libzfs_handle_t *hdl = zhp->zpool_hdl;
2750
2751 (void) snprintf(msg, sizeof (msg),
2752 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2753
2754 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2755 zc.zc_guid = guid;
2756 zc.zc_cookie = VDEV_STATE_DEGRADED;
2757 zc.zc_obj = aux;
2758
2759 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2760 return (0);
2761
2762 return (zpool_standard_error(hdl, errno, msg));
2763 }
2764
2765 /*
2766 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2767 * a hot spare.
2768 */
2769 static boolean_t
2770 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2771 {
2772 nvlist_t **child;
2773 uint_t c, children;
2774 char *type;
2775
2776 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2777 &children) == 0) {
2778 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2779 &type) == 0);
2780
2781 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2782 children == 2 && child[which] == tgt)
2783 return (B_TRUE);
2784
2785 for (c = 0; c < children; c++)
2786 if (is_replacing_spare(child[c], tgt, which))
2787 return (B_TRUE);
2788 }
2789
2790 return (B_FALSE);
2791 }
2792
2793 /*
2794 * Attach new_disk (fully described by nvroot) to old_disk.
2795 * If 'replacing' is specified, the new disk will replace the old one.
2796 */
2797 int
2798 zpool_vdev_attach(zpool_handle_t *zhp,
2799 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2800 {
2801 zfs_cmd_t zc = { 0 };
2802 char msg[1024];
2803 int ret;
2804 nvlist_t *tgt;
2805 boolean_t avail_spare, l2cache, islog;
2806 uint64_t val;
2807 char *newname;
2808 nvlist_t **child;
2809 uint_t children;
2810 nvlist_t *config_root;
2811 libzfs_handle_t *hdl = zhp->zpool_hdl;
2812 boolean_t rootpool = zpool_is_bootable(zhp);
2813
2814 if (replacing)
2815 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2816 "cannot replace %s with %s"), old_disk, new_disk);
2817 else
2818 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2819 "cannot attach %s to %s"), new_disk, old_disk);
2820
2821 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2822 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2823 &islog)) == NULL)
2824 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2825
2826 if (avail_spare)
2827 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2828
2829 if (l2cache)
2830 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2831
2832 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2833 zc.zc_cookie = replacing;
2834
2835 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2836 &child, &children) != 0 || children != 1) {
2837 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2838 "new device must be a single disk"));
2839 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2840 }
2841
2842 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2843 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2844
2845 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2846 return (-1);
2847
2848 /*
2849 * If the target is a hot spare that has been swapped in, we can only
2850 * replace it with another hot spare.
2851 */
2852 if (replacing &&
2853 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2854 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2855 NULL) == NULL || !avail_spare) &&
2856 is_replacing_spare(config_root, tgt, 1)) {
2857 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2858 "can only be replaced by another hot spare"));
2859 free(newname);
2860 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2861 }
2862
2863 free(newname);
2864
2865 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2866 return (-1);
2867
2868 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2869
2870 zcmd_free_nvlists(&zc);
2871
2872 if (ret == 0) {
2873 if (rootpool) {
2874 /*
2875 * XXX need a better way to prevent user from
2876 * booting up a half-baked vdev.
2877 */
2878 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2879 "sure to wait until resilver is done "
2880 "before rebooting.\n"));
2881 (void) fprintf(stderr, "\n");
2882 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If "
2883 "you boot from pool '%s', you may need to update\n"
2884 "boot code on newly attached disk '%s'.\n\n"
2885 "Assuming you use GPT partitioning and 'da0' is "
2886 "your new boot disk\n"
2887 "you may use the following command:\n\n"
2888 "\tgpart bootcode -b /boot/pmbr -p "
2889 "/boot/gptzfsboot -i 1 da0\n\n"),
2890 zhp->zpool_name, new_disk);
2891 }
2892 return (0);
2893 }
2894
2895 switch (errno) {
2896 case ENOTSUP:
2897 /*
2898 * Can't attach to or replace this type of vdev.
2899 */
2900 if (replacing) {
2901 uint64_t version = zpool_get_prop_int(zhp,
2902 ZPOOL_PROP_VERSION, NULL);
2903
2904 if (islog)
2905 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2906 "cannot replace a log with a spare"));
2907 else if (version >= SPA_VERSION_MULTI_REPLACE)
2908 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2909 "already in replacing/spare config; wait "
2910 "for completion or use 'zpool detach'"));
2911 else
2912 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2913 "cannot replace a replacing device"));
2914 } else {
2915 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2916 "can only attach to mirrors and top-level "
2917 "disks"));
2918 }
2919 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2920 break;
2921
2922 case EINVAL:
2923 /*
2924 * The new device must be a single disk.
2925 */
2926 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2927 "new device must be a single disk"));
2928 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2929 break;
2930
2931 case EBUSY:
2932 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
2933 "or device removal is in progress"),
2934 new_disk);
2935 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2936 break;
2937
2938 case EOVERFLOW:
2939 /*
2940 * The new device is too small.
2941 */
2942 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2943 "device is too small"));
2944 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2945 break;
2946
2947 case EDOM:
2948 /*
2949 * The new device has a different alignment requirement.
2950 */
2951 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2952 "devices have different sector alignment"));
2953 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2954 break;
2955
2956 case ENAMETOOLONG:
2957 /*
2958 * The resulting top-level vdev spec won't fit in the label.
2959 */
2960 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2961 break;
2962
2963 default:
2964 (void) zpool_standard_error(hdl, errno, msg);
2965 }
2966
2967 return (-1);
2968 }
2969
2970 /*
2971 * Detach the specified device.
2972 */
2973 int
2974 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2975 {
2976 zfs_cmd_t zc = { 0 };
2977 char msg[1024];
2978 nvlist_t *tgt;
2979 boolean_t avail_spare, l2cache;
2980 libzfs_handle_t *hdl = zhp->zpool_hdl;
2981
2982 (void) snprintf(msg, sizeof (msg),
2983 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2984
2985 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2986 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2987 NULL)) == NULL)
2988 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2989
2990 if (avail_spare)
2991 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2992
2993 if (l2cache)
2994 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2995
2996 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2997
2998 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2999 return (0);
3000
3001 switch (errno) {
3002
3003 case ENOTSUP:
3004 /*
3005 * Can't detach from this type of vdev.
3006 */
3007 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3008 "applicable to mirror and replacing vdevs"));
3009 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3010 break;
3011
3012 case EBUSY:
3013 /*
3014 * There are no other replicas of this device.
3015 */
3016 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
3017 break;
3018
3019 default:
3020 (void) zpool_standard_error(hdl, errno, msg);
3021 }
3022
3023 return (-1);
3024 }
3025
3026 /*
3027 * Find a mirror vdev in the source nvlist.
3028 *
3029 * The mchild array contains a list of disks in one of the top-level mirrors
3030 * of the source pool. The schild array contains a list of disks that the
3031 * user specified on the command line. We loop over the mchild array to
3032 * see if any entry in the schild array matches.
3033 *
3034 * If a disk in the mchild array is found in the schild array, we return
3035 * the index of that entry. Otherwise we return -1.
3036 */
3037 static int
3038 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3039 nvlist_t **schild, uint_t schildren)
3040 {
3041 uint_t mc;
3042
3043 for (mc = 0; mc < mchildren; mc++) {
3044 uint_t sc;
3045 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3046 mchild[mc], B_FALSE);
3047
3048 for (sc = 0; sc < schildren; sc++) {
3049 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3050 schild[sc], B_FALSE);
3051 boolean_t result = (strcmp(mpath, spath) == 0);
3052
3053 free(spath);
3054 if (result) {
3055 free(mpath);
3056 return (mc);
3057 }
3058 }
3059
3060 free(mpath);
3061 }
3062
3063 return (-1);
3064 }
3065
3066 /*
3067 * Split a mirror pool. If newroot points to null, then a new nvlist
3068 * is generated and it is the responsibility of the caller to free it.
3069 */
3070 int
3071 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3072 nvlist_t *props, splitflags_t flags)
3073 {
3074 zfs_cmd_t zc = { 0 };
3075 char msg[1024];
3076 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3077 nvlist_t **varray = NULL, *zc_props = NULL;
3078 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3079 libzfs_handle_t *hdl = zhp->zpool_hdl;
3080 uint64_t vers;
3081 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3082 int retval = 0;
3083
3084 (void) snprintf(msg, sizeof (msg),
3085 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3086
3087 if (!zpool_name_valid(hdl, B_FALSE, newname))
3088 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3089
3090 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3091 (void) fprintf(stderr, gettext("Internal error: unable to "
3092 "retrieve pool configuration\n"));
3093 return (-1);
3094 }
3095
3096 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
3097 == 0);
3098 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3099
3100 if (props) {
3101 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3102 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3103 props, vers, flags, msg)) == NULL)
3104 return (-1);
3105 }
3106
3107 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3108 &children) != 0) {
3109 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3110 "Source pool is missing vdev tree"));
3111 nvlist_free(zc_props);
3112 return (-1);
3113 }
3114
3115 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3116 vcount = 0;
3117
3118 if (*newroot == NULL ||
3119 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3120 &newchild, &newchildren) != 0)
3121 newchildren = 0;
3122
3123 for (c = 0; c < children; c++) {
3124 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3125 char *type;
3126 nvlist_t **mchild, *vdev;
3127 uint_t mchildren;
3128 int entry;
3129
3130 /*
3131 * Unlike cache & spares, slogs are stored in the
3132 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3133 */
3134 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3135 &is_log);
3136 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3137 &is_hole);
3138 if (is_log || is_hole) {
3139 /*
3140 * Create a hole vdev and put it in the config.
3141 */
3142 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3143 goto out;
3144 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3145 VDEV_TYPE_HOLE) != 0)
3146 goto out;
3147 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3148 1) != 0)
3149 goto out;
3150 if (lastlog == 0)
3151 lastlog = vcount;
3152 varray[vcount++] = vdev;
3153 continue;
3154 }
3155 lastlog = 0;
3156 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3157 == 0);
3158 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3159 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3160 "Source pool must be composed only of mirrors\n"));
3161 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3162 goto out;
3163 }
3164
3165 verify(nvlist_lookup_nvlist_array(child[c],
3166 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3167
3168 /* find or add an entry for this top-level vdev */
3169 if (newchildren > 0 &&
3170 (entry = find_vdev_entry(zhp, mchild, mchildren,
3171 newchild, newchildren)) >= 0) {
3172 /* We found a disk that the user specified. */
3173 vdev = mchild[entry];
3174 ++found;
3175 } else {
3176 /* User didn't specify a disk for this vdev. */
3177 vdev = mchild[mchildren - 1];
3178 }
3179
3180 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3181 goto out;
3182 }
3183
3184 /* did we find every disk the user specified? */
3185 if (found != newchildren) {
3186 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3187 "include at most one disk from each mirror"));
3188 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3189 goto out;
3190 }
3191
3192 /* Prepare the nvlist for populating. */
3193 if (*newroot == NULL) {
3194 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3195 goto out;
3196 freelist = B_TRUE;
3197 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3198 VDEV_TYPE_ROOT) != 0)
3199 goto out;
3200 } else {
3201 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3202 }
3203
3204 /* Add all the children we found */
3205 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3206 lastlog == 0 ? vcount : lastlog) != 0)
3207 goto out;
3208
3209 /*
3210 * If we're just doing a dry run, exit now with success.
3211 */
3212 if (flags.dryrun) {
3213 memory_err = B_FALSE;
3214 freelist = B_FALSE;
3215 goto out;
3216 }
3217
3218 /* now build up the config list & call the ioctl */
3219 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3220 goto out;
3221
3222 if (nvlist_add_nvlist(newconfig,
3223 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3224 nvlist_add_string(newconfig,
3225 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3226 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3227 goto out;
3228
3229 /*
3230 * The new pool is automatically part of the namespace unless we
3231 * explicitly export it.
3232 */
3233 if (!flags.import)
3234 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3235 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3236 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3237 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3238 goto out;
3239 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3240 goto out;
3241
3242 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3243 retval = zpool_standard_error(hdl, errno, msg);
3244 goto out;
3245 }
3246
3247 freelist = B_FALSE;
3248 memory_err = B_FALSE;
3249
3250 out:
3251 if (varray != NULL) {
3252 int v;
3253
3254 for (v = 0; v < vcount; v++)
3255 nvlist_free(varray[v]);
3256 free(varray);
3257 }
3258 zcmd_free_nvlists(&zc);
3259 nvlist_free(zc_props);
3260 nvlist_free(newconfig);
3261 if (freelist) {
3262 nvlist_free(*newroot);
3263 *newroot = NULL;
3264 }
3265
3266 if (retval != 0)
3267 return (retval);
3268
3269 if (memory_err)
3270 return (no_memory(hdl));
3271
3272 return (0);
3273 }
3274
3275 /*
3276 * Remove the given device.
3277 */
3278 int
3279 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3280 {
3281 zfs_cmd_t zc = { 0 };
3282 char msg[1024];
3283 nvlist_t *tgt;
3284 boolean_t avail_spare, l2cache, islog;
3285 libzfs_handle_t *hdl = zhp->zpool_hdl;
3286 uint64_t version;
3287
3288 (void) snprintf(msg, sizeof (msg),
3289 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3290
3291 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3292 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3293 &islog)) == NULL)
3294 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3295
3296 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3297 if (islog && version < SPA_VERSION_HOLES) {
3298 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3299 "pool must be upgraded to support log removal"));
3300 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3301 }
3302
3303 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3304
3305 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3306 return (0);
3307
3308 switch (errno) {
3309
3310 case EINVAL:
3311 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3312 "invalid config; all top-level vdevs must "
3313 "have the same sector size and not be raidz."));
3314 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3315 break;
3316
3317 case EBUSY:
3318 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3319 "Pool busy; removal may already be in progress"));
3320 (void) zfs_error(hdl, EZFS_BUSY, msg);
3321 break;
3322
3323 default:
3324 (void) zpool_standard_error(hdl, errno, msg);
3325 }
3326 return (-1);
3327 }
3328
3329 int
3330 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3331 {
3332 zfs_cmd_t zc = { 0 };
3333 char msg[1024];
3334 libzfs_handle_t *hdl = zhp->zpool_hdl;
3335
3336 (void) snprintf(msg, sizeof (msg),
3337 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3338
3339 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3340 zc.zc_cookie = 1;
3341
3342 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3343 return (0);
3344
3345 return (zpool_standard_error(hdl, errno, msg));
3346 }
3347
3348 int
3349 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3350 uint64_t *sizep)
3351 {
3352 char msg[1024];
3353 nvlist_t *tgt;
3354 boolean_t avail_spare, l2cache, islog;
3355 libzfs_handle_t *hdl = zhp->zpool_hdl;
3356
3357 (void) snprintf(msg, sizeof (msg),
3358 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3359 path);
3360
3361 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3362 &islog)) == NULL)
3363 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3364
3365 if (avail_spare || l2cache || islog) {
3366 *sizep = 0;
3367 return (0);
3368 }
3369
3370 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
3371 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3372 "indirect size not available"));
3373 return (zfs_error(hdl, EINVAL, msg));
3374 }
3375 return (0);
3376 }
3377
3378 /*
3379 * Clear the errors for the pool, or the particular device if specified.
3380 */
3381 int
3382 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3383 {
3384 zfs_cmd_t zc = { 0 };
3385 char msg[1024];
3386 nvlist_t *tgt;
3387 zpool_load_policy_t policy;
3388 boolean_t avail_spare, l2cache;
3389 libzfs_handle_t *hdl = zhp->zpool_hdl;
3390 nvlist_t *nvi = NULL;
3391 int error;
3392
3393 if (path)
3394 (void) snprintf(msg, sizeof (msg),
3395 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3396 path);
3397 else
3398 (void) snprintf(msg, sizeof (msg),
3399 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3400 zhp->zpool_name);
3401
3402 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3403 if (path) {
3404 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3405 &l2cache, NULL)) == NULL)
3406 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3407
3408 /*
3409 * Don't allow error clearing for hot spares. Do allow
3410 * error clearing for l2cache devices.
3411 */
3412 if (avail_spare)
3413 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3414
3415 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3416 &zc.zc_guid) == 0);
3417 }
3418
3419 zpool_get_load_policy(rewindnvl, &policy);
3420 zc.zc_cookie = policy.zlp_rewind;
3421
3422 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3423 return (-1);
3424
3425 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3426 return (-1);
3427
3428 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3429 errno == ENOMEM) {
3430 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3431 zcmd_free_nvlists(&zc);
3432 return (-1);
3433 }
3434 }
3435
3436 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
3437 errno != EPERM && errno != EACCES)) {
3438 if (policy.zlp_rewind &
3439 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3440 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3441 zpool_rewind_exclaim(hdl, zc.zc_name,
3442 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
3443 nvi);
3444 nvlist_free(nvi);
3445 }
3446 zcmd_free_nvlists(&zc);
3447 return (0);
3448 }
3449
3450 zcmd_free_nvlists(&zc);
3451 return (zpool_standard_error(hdl, errno, msg));
3452 }
3453
3454 /*
3455 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3456 */
3457 int
3458 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3459 {
3460 zfs_cmd_t zc = { 0 };
3461 char msg[1024];
3462 libzfs_handle_t *hdl = zhp->zpool_hdl;
3463
3464 (void) snprintf(msg, sizeof (msg),
3465 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3466 guid);
3467
3468 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3469 zc.zc_guid = guid;
3470 zc.zc_cookie = ZPOOL_NO_REWIND;
3471
3472 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3473 return (0);
3474
3475 return (zpool_standard_error(hdl, errno, msg));
3476 }
3477
3478 /*
3479 * Change the GUID for a pool.
3480 */
3481 int
3482 zpool_reguid(zpool_handle_t *zhp)
3483 {
3484 char msg[1024];
3485 libzfs_handle_t *hdl = zhp->zpool_hdl;
3486 zfs_cmd_t zc = { 0 };
3487
3488 (void) snprintf(msg, sizeof (msg),
3489 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3490
3491 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3492 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3493 return (0);
3494
3495 return (zpool_standard_error(hdl, errno, msg));
3496 }
3497
3498 /*
3499 * Reopen the pool.
3500 */
3501 int
3502 zpool_reopen(zpool_handle_t *zhp)
3503 {
3504 zfs_cmd_t zc = { 0 };
3505 char msg[1024];
3506 libzfs_handle_t *hdl = zhp->zpool_hdl;
3507
3508 (void) snprintf(msg, sizeof (msg),
3509 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3510 zhp->zpool_name);
3511
3512 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3513 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3514 return (0);
3515 return (zpool_standard_error(hdl, errno, msg));
3516 }
3517
3518 /*
3519 * Convert from a devid string to a path.
3520 */
3521 static char *
3522 devid_to_path(char *devid_str)
3523 {
3524 ddi_devid_t devid;
3525 char *minor;
3526 char *path;
3527 devid_nmlist_t *list = NULL;
3528 int ret;
3529
3530 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3531 return (NULL);
3532
3533 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3534
3535 devid_str_free(minor);
3536 devid_free(devid);
3537
3538 if (ret != 0)
3539 return (NULL);
3540
3541 /*
3542 * In a case the strdup() fails, we will just return NULL below.
3543 */
3544 path = strdup(list[0].devname);
3545
3546 devid_free_nmlist(list);
3547
3548 return (path);
3549 }
3550
3551 /*
3552 * Convert from a path to a devid string.
3553 */
3554 static char *
3555 path_to_devid(const char *path)
3556 {
3557 #ifdef have_devid
3558 int fd;
3559 ddi_devid_t devid;
3560 char *minor, *ret;
3561
3562 if ((fd = open(path, O_RDONLY)) < 0)
3563 return (NULL);
3564
3565 minor = NULL;
3566 ret = NULL;
3567 if (devid_get(fd, &devid) == 0) {
3568 if (devid_get_minor_name(fd, &minor) == 0)
3569 ret = devid_str_encode(devid, minor);
3570 if (minor != NULL)
3571 devid_str_free(minor);
3572 devid_free(devid);
3573 }
3574 (void) close(fd);
3575
3576 return (ret);
3577 #else
3578 return (NULL);
3579 #endif
3580 }
3581
3582 /*
3583 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3584 * ignore any failure here, since a common case is for an unprivileged user to
3585 * type 'zpool status', and we'll display the correct information anyway.
3586 */
3587 static void
3588 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3589 {
3590 zfs_cmd_t zc = { 0 };
3591
3592 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3593 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3594 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3595 &zc.zc_guid) == 0);
3596
3597 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3598 }
3599
3600 /*
3601 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3602 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3603 * We also check if this is a whole disk, in which case we strip off the
3604 * trailing 's0' slice name.
3605 *
3606 * This routine is also responsible for identifying when disks have been
3607 * reconfigured in a new location. The kernel will have opened the device by
3608 * devid, but the path will still refer to the old location. To catch this, we
3609 * first do a path -> devid translation (which is fast for the common case). If
3610 * the devid matches, we're done. If not, we do a reverse devid -> path
3611 * translation and issue the appropriate ioctl() to update the path of the vdev.
3612 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3613 * of these checks.
3614 */
3615 char *
3616 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3617 boolean_t verbose)
3618 {
3619 char *path, *devid;
3620 uint64_t value;
3621 char buf[64];
3622 vdev_stat_t *vs;
3623 uint_t vsc;
3624 int have_stats;
3625 int have_path;
3626
3627 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3628 (uint64_t **)&vs, &vsc) == 0;
3629 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0;
3630
3631 /*
3632 * If the device is not currently present, assume it will not
3633 * come back at the same device path. Display the device by GUID.
3634 */
3635 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3636 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) {
3637 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3638 &value) == 0);
3639 (void) snprintf(buf, sizeof (buf), "%llu",
3640 (u_longlong_t)value);
3641 path = buf;
3642 } else if (have_path) {
3643
3644 /*
3645 * If the device is dead (faulted, offline, etc) then don't
3646 * bother opening it. Otherwise we may be forcing the user to
3647 * open a misbehaving device, which can have undesirable
3648 * effects.
3649 */
3650 if ((have_stats == 0 ||
3651 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3652 zhp != NULL &&
3653 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3654 /*
3655 * Determine if the current path is correct.
3656 */
3657 char *newdevid = path_to_devid(path);
3658
3659 if (newdevid == NULL ||
3660 strcmp(devid, newdevid) != 0) {
3661 char *newpath;
3662
3663 if ((newpath = devid_to_path(devid)) != NULL) {
3664 /*
3665 * Update the path appropriately.
3666 */
3667 set_path(zhp, nv, newpath);
3668 if (nvlist_add_string(nv,
3669 ZPOOL_CONFIG_PATH, newpath) == 0)
3670 verify(nvlist_lookup_string(nv,
3671 ZPOOL_CONFIG_PATH,
3672 &path) == 0);
3673 free(newpath);
3674 }
3675 }
3676
3677 if (newdevid)
3678 devid_str_free(newdevid);
3679 }
3680
3681 #ifdef illumos
3682 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0)
3683 path += strlen(ZFS_DISK_ROOTD);
3684
3685 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3686 &value) == 0 && value) {
3687 int pathlen = strlen(path);
3688 char *tmp = zfs_strdup(hdl, path);
3689
3690 /*
3691 * If it starts with c#, and ends with "s0" or "s1",
3692 * chop the slice off, or if it ends with "s0/old" or
3693 * "s1/old", remove the slice from the middle.
3694 */
3695 if (CTD_CHECK(tmp)) {
3696 if (strcmp(&tmp[pathlen - 2], "s0") == 0 ||
3697 strcmp(&tmp[pathlen - 2], "s1") == 0) {
3698 tmp[pathlen - 2] = '\0';
3699 } else if (pathlen > 6 &&
3700 (strcmp(&tmp[pathlen - 6], "s0/old") == 0 ||
3701 strcmp(&tmp[pathlen - 6], "s1/old") == 0)) {
3702 (void) strcpy(&tmp[pathlen - 6],
3703 "/old");
3704 }
3705 }
3706 return (tmp);
3707 }
3708 #else /* !illumos */
3709 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
3710 path += sizeof(_PATH_DEV) - 1;
3711 #endif /* illumos */
3712 } else {
3713 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3714
3715 /*
3716 * If it's a raidz device, we need to stick in the parity level.
3717 */
3718 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3719 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3720 &value) == 0);
3721 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3722 (u_longlong_t)value);
3723 path = buf;
3724 }
3725
3726 /*
3727 * We identify each top-level vdev by using a <type-id>
3728 * naming convention.
3729 */
3730 if (verbose) {
3731 uint64_t id;
3732
3733 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3734 &id) == 0);
3735 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3736 (u_longlong_t)id);
3737 path = buf;
3738 }
3739 }
3740
3741 return (zfs_strdup(hdl, path));
3742 }
3743
3744 static int
3745 zbookmark_mem_compare(const void *a, const void *b)
3746 {
3747 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3748 }
3749
3750 /*
3751 * Retrieve the persistent error log, uniquify the members, and return to the
3752 * caller.
3753 */
3754 int
3755 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3756 {
3757 zfs_cmd_t zc = { 0 };
3758 uint64_t count;
3759 zbookmark_phys_t *zb = NULL;
3760 int i;
3761
3762 /*
3763 * Retrieve the raw error list from the kernel. If the number of errors
3764 * has increased, allocate more space and continue until we get the
3765 * entire list.
3766 */
3767 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3768 &count) == 0);
3769 if (count == 0)
3770 return (0);
3771 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3772 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3773 return (-1);
3774 zc.zc_nvlist_dst_size = count;
3775 (void) strcpy(zc.zc_name, zhp->zpool_name);
3776 for (;;) {
3777 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3778 &zc) != 0) {
3779 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3780 if (errno == ENOMEM) {
3781 void *dst;
3782
3783 count = zc.zc_nvlist_dst_size;
3784 dst = zfs_alloc(zhp->zpool_hdl, count *
3785 sizeof (zbookmark_phys_t));
3786 if (dst == NULL)
3787 return (-1);
3788 zc.zc_nvlist_dst = (uintptr_t)dst;
3789 } else {
3790 return (-1);
3791 }
3792 } else {
3793 break;
3794 }
3795 }
3796
3797 /*
3798 * Sort the resulting bookmarks. This is a little confusing due to the
3799 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3800 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3801 * _not_ copied as part of the process. So we point the start of our
3802 * array appropriate and decrement the total number of elements.
3803 */
3804 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3805 zc.zc_nvlist_dst_size;
3806 count -= zc.zc_nvlist_dst_size;
3807
3808 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3809
3810 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3811
3812 /*
3813 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3814 */
3815 for (i = 0; i < count; i++) {
3816 nvlist_t *nv;
3817
3818 /* ignoring zb_blkid and zb_level for now */
3819 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3820 zb[i-1].zb_object == zb[i].zb_object)
3821 continue;
3822
3823 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3824 goto nomem;
3825 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3826 zb[i].zb_objset) != 0) {
3827 nvlist_free(nv);
3828 goto nomem;
3829 }
3830 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3831 zb[i].zb_object) != 0) {
3832 nvlist_free(nv);
3833 goto nomem;
3834 }
3835 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3836 nvlist_free(nv);
3837 goto nomem;
3838 }
3839 nvlist_free(nv);
3840 }
3841
3842 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3843 return (0);
3844
3845 nomem:
3846 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3847 return (no_memory(zhp->zpool_hdl));
3848 }
3849
3850 /*
3851 * Upgrade a ZFS pool to the latest on-disk version.
3852 */
3853 int
3854 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3855 {
3856 zfs_cmd_t zc = { 0 };
3857 libzfs_handle_t *hdl = zhp->zpool_hdl;
3858
3859 (void) strcpy(zc.zc_name, zhp->zpool_name);
3860 zc.zc_cookie = new_version;
3861
3862 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3863 return (zpool_standard_error_fmt(hdl, errno,
3864 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3865 zhp->zpool_name));
3866 return (0);
3867 }
3868
3869 void
3870 zfs_save_arguments(int argc, char **argv, char *string, int len)
3871 {
3872 (void) strlcpy(string, basename(argv[0]), len);
3873 for (int i = 1; i < argc; i++) {
3874 (void) strlcat(string, " ", len);
3875 (void) strlcat(string, argv[i], len);
3876 }
3877 }
3878
3879 int
3880 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3881 {
3882 zfs_cmd_t zc = { 0 };
3883 nvlist_t *args;
3884 int err;
3885
3886 args = fnvlist_alloc();
3887 fnvlist_add_string(args, "message", message);
3888 err = zcmd_write_src_nvlist(hdl, &zc, args);
3889 if (err == 0)
3890 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3891 nvlist_free(args);
3892 zcmd_free_nvlists(&zc);
3893 return (err);
3894 }
3895
3896 /*
3897 * Perform ioctl to get some command history of a pool.
3898 *
3899 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3900 * logical offset of the history buffer to start reading from.
3901 *
3902 * Upon return, 'off' is the next logical offset to read from and
3903 * 'len' is the actual amount of bytes read into 'buf'.
3904 */
3905 static int
3906 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3907 {
3908 zfs_cmd_t zc = { 0 };
3909 libzfs_handle_t *hdl = zhp->zpool_hdl;
3910
3911 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3912
3913 zc.zc_history = (uint64_t)(uintptr_t)buf;
3914 zc.zc_history_len = *len;
3915 zc.zc_history_offset = *off;
3916
3917 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3918 switch (errno) {
3919 case EPERM:
3920 return (zfs_error_fmt(hdl, EZFS_PERM,
3921 dgettext(TEXT_DOMAIN,
3922 "cannot show history for pool '%s'"),
3923 zhp->zpool_name));
3924 case ENOENT:
3925 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3926 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3927 "'%s'"), zhp->zpool_name));
3928 case ENOTSUP:
3929 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3930 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3931 "'%s', pool must be upgraded"), zhp->zpool_name));
3932 default:
3933 return (zpool_standard_error_fmt(hdl, errno,
3934 dgettext(TEXT_DOMAIN,
3935 "cannot get history for '%s'"), zhp->zpool_name));
3936 }
3937 }
3938
3939 *len = zc.zc_history_len;
3940 *off = zc.zc_history_offset;
3941
3942 return (0);
3943 }
3944
3945 /*
3946 * Process the buffer of nvlists, unpacking and storing each nvlist record
3947 * into 'records'. 'leftover' is set to the number of bytes that weren't
3948 * processed as there wasn't a complete record.
3949 */
3950 int
3951 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3952 nvlist_t ***records, uint_t *numrecords)
3953 {
3954 uint64_t reclen;
3955 nvlist_t *nv;
3956 int i;
3957
3958 while (bytes_read > sizeof (reclen)) {
3959
3960 /* get length of packed record (stored as little endian) */
3961 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3962 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3963
3964 if (bytes_read < sizeof (reclen) + reclen)
3965 break;
3966
3967 /* unpack record */
3968 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3969 return (ENOMEM);
3970 bytes_read -= sizeof (reclen) + reclen;
3971 buf += sizeof (reclen) + reclen;
3972
3973 /* add record to nvlist array */
3974 (*numrecords)++;
3975 if (ISP2(*numrecords + 1)) {
3976 *records = realloc(*records,
3977 *numrecords * 2 * sizeof (nvlist_t *));
3978 }
3979 (*records)[*numrecords - 1] = nv;
3980 }
3981
3982 *leftover = bytes_read;
3983 return (0);
3984 }
3985
3986 /* from spa_history.c: spa_history_create_obj() */
3987 #define HIS_BUF_LEN_DEF (128 << 10)
3988 #define HIS_BUF_LEN_MAX (1 << 30)
3989
3990 /*
3991 * Retrieve the command history of a pool.
3992 */
3993 int
3994 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3995 {
3996 char *buf;
3997 uint64_t buflen = HIS_BUF_LEN_DEF;
3998 uint64_t off = 0;
3999 nvlist_t **records = NULL;
4000 uint_t numrecords = 0;
4001 int err, i;
4002
4003 buf = malloc(buflen);
4004 if (buf == NULL)
4005 return (ENOMEM);
4006 do {
4007 uint64_t bytes_read = buflen;
4008 uint64_t leftover;
4009
4010 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
4011 break;
4012
4013 /* if nothing else was read in, we're at EOF, just return */
4014 if (bytes_read == 0)
4015 break;
4016
4017 if ((err = zpool_history_unpack(buf, bytes_read,
4018 &leftover, &records, &numrecords)) != 0)
4019 break;
4020 off -= leftover;
4021 if (leftover == bytes_read) {
4022 /*
4023 * no progress made, because buffer is not big enough
4024 * to hold this record; resize and retry.
4025 */
4026 buflen *= 2;
4027 free(buf);
4028 buf = NULL;
4029 if ((buflen >= HIS_BUF_LEN_MAX) ||
4030 ((buf = malloc(buflen)) == NULL)) {
4031 err = ENOMEM;
4032 break;
4033 }
4034 }
4035
4036 /* CONSTCOND */
4037 } while (1);
4038
4039 free(buf);
4040
4041 if (!err) {
4042 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
4043 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4044 records, numrecords) == 0);
4045 }
4046 for (i = 0; i < numrecords; i++)
4047 nvlist_free(records[i]);
4048 free(records);
4049
4050 return (err);
4051 }
4052
4053 void
4054 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4055 char *pathname, size_t len)
4056 {
4057 zfs_cmd_t zc = { 0 };
4058 boolean_t mounted = B_FALSE;
4059 char *mntpnt = NULL;
4060 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4061
4062 if (dsobj == 0) {
4063 /* special case for the MOS */
4064 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
4065 return;
4066 }
4067
4068 /* get the dataset's name */
4069 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4070 zc.zc_obj = dsobj;
4071 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4072 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4073 /* just write out a path of two object numbers */
4074 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4075 dsobj, obj);
4076 return;
4077 }
4078 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4079
4080 /* find out if the dataset is mounted */
4081 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4082
4083 /* get the corrupted object's path */
4084 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4085 zc.zc_obj = obj;
4086 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4087 &zc) == 0) {
4088 if (mounted) {
4089 (void) snprintf(pathname, len, "%s%s", mntpnt,
4090 zc.zc_value);
4091 } else {
4092 (void) snprintf(pathname, len, "%s:%s",
4093 dsname, zc.zc_value);
4094 }
4095 } else {
4096 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
4097 }
4098 free(mntpnt);
4099 }
4100
4101 #ifdef illumos
4102 /*
4103 * Read the EFI label from the config, if a label does not exist then
4104 * pass back the error to the caller. If the caller has passed a non-NULL
4105 * diskaddr argument then we set it to the starting address of the EFI
4106 * partition. If the caller has passed a non-NULL boolean argument, then
4107 * we set it to indicate if the disk does have efi system partition.
4108 */
4109 static int
4110 read_efi_label(nvlist_t *config, diskaddr_t *sb, boolean_t *system)
4111 {
4112 char *path;
4113 int fd;
4114 char diskname[MAXPATHLEN];
4115 boolean_t boot = B_FALSE;
4116 int err = -1;
4117 int slice;
4118
4119 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4120 return (err);
4121
4122 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT,
4123 strrchr(path, '/'));
4124 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
4125 struct dk_gpt *vtoc;
4126
4127 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4128 for (slice = 0; slice < vtoc->efi_nparts; slice++) {
4129 if (vtoc->efi_parts[slice].p_tag == V_SYSTEM)
4130 boot = B_TRUE;
4131 if (vtoc->efi_parts[slice].p_tag == V_USR)
4132 break;
4133 }
4134 if (sb != NULL && vtoc->efi_parts[slice].p_tag == V_USR)
4135 *sb = vtoc->efi_parts[slice].p_start;
4136 if (system != NULL)
4137 *system = boot;
4138 efi_free(vtoc);
4139 }
4140 (void) close(fd);
4141 }
4142 return (err);
4143 }
4144
4145 /*
4146 * determine where a partition starts on a disk in the current
4147 * configuration
4148 */
4149 static diskaddr_t
4150 find_start_block(nvlist_t *config)
4151 {
4152 nvlist_t **child;
4153 uint_t c, children;
4154 diskaddr_t sb = MAXOFFSET_T;
4155 uint64_t wholedisk;
4156
4157 if (nvlist_lookup_nvlist_array(config,
4158 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4159 if (nvlist_lookup_uint64(config,
4160 ZPOOL_CONFIG_WHOLE_DISK,
4161 &wholedisk) != 0 || !wholedisk) {
4162 return (MAXOFFSET_T);
4163 }
4164 if (read_efi_label(config, &sb, NULL) < 0)
4165 sb = MAXOFFSET_T;
4166 return (sb);
4167 }
4168
4169 for (c = 0; c < children; c++) {
4170 sb = find_start_block(child[c]);
4171 if (sb != MAXOFFSET_T) {
4172 return (sb);
4173 }
4174 }
4175 return (MAXOFFSET_T);
4176 }
4177 #endif /* illumos */
4178
4179 /*
4180 * Label an individual disk. The name provided is the short name,
4181 * stripped of any leading /dev path.
4182 */
4183 int
4184 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name,
4185 zpool_boot_label_t boot_type, uint64_t boot_size, int *slice)
4186 {
4187 #ifdef illumos
4188 char path[MAXPATHLEN];
4189 struct dk_gpt *vtoc;
4190 int fd;
4191 size_t resv = EFI_MIN_RESV_SIZE;
4192 uint64_t slice_size;
4193 diskaddr_t start_block;
4194 char errbuf[1024];
4195
4196 /* prepare an error message just in case */
4197 (void) snprintf(errbuf, sizeof (errbuf),
4198 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4199
4200 if (zhp) {
4201 nvlist_t *nvroot;
4202
4203 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4204 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4205
4206 if (zhp->zpool_start_block == 0)
4207 start_block = find_start_block(nvroot);
4208 else
4209 start_block = zhp->zpool_start_block;
4210 zhp->zpool_start_block = start_block;
4211 } else {
4212 /* new pool */
4213 start_block = NEW_START_BLOCK;
4214 }
4215
4216 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name,
4217 BACKUP_SLICE);
4218
4219 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
4220 /*
4221 * This shouldn't happen. We've long since verified that this
4222 * is a valid device.
4223 */
4224 zfs_error_aux(hdl,
4225 dgettext(TEXT_DOMAIN, "unable to open device"));
4226 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4227 }
4228
4229 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4230 /*
4231 * The only way this can fail is if we run out of memory, or we
4232 * were unable to read the disk's capacity
4233 */
4234 if (errno == ENOMEM)
4235 (void) no_memory(hdl);
4236
4237 (void) close(fd);
4238 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4239 "unable to read disk capacity"), name);
4240
4241 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4242 }
4243
4244 /*
4245 * Why we use V_USR: V_BACKUP confuses users, and is considered
4246 * disposable by some EFI utilities (since EFI doesn't have a backup
4247 * slice). V_UNASSIGNED is supposed to be used only for zero size
4248 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4249 * etc. were all pretty specific. V_USR is as close to reality as we
4250 * can get, in the absence of V_OTHER.
4251 */
4252 /* first fix the partition start block */
4253 if (start_block == MAXOFFSET_T)
4254 start_block = NEW_START_BLOCK;
4255
4256 /*
4257 * EFI System partition is using slice 0.
4258 * ZFS is on slice 1 and slice 8 is reserved.
4259 * We assume the GPT partition table without system
4260 * partition has zfs p_start == NEW_START_BLOCK.
4261 * If start_block != NEW_START_BLOCK, it means we have
4262 * system partition. Correct solution would be to query/cache vtoc
4263 * from existing vdev member.
4264 */
4265 if (boot_type == ZPOOL_CREATE_BOOT_LABEL) {
4266 if (boot_size % vtoc->efi_lbasize != 0) {
4267 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4268 "boot partition size must be a multiple of %d"),
4269 vtoc->efi_lbasize);
4270 (void) close(fd);
4271 efi_free(vtoc);
4272 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4273 }
4274 /*
4275 * System partition size checks.
4276 * Note the 1MB is quite arbitrary value, since we
4277 * are creating dedicated pool, it should be enough
4278 * to hold fat + efi bootloader. May need to be
4279 * adjusted if the bootloader size will grow.
4280 */
4281 if (boot_size < 1024 * 1024) {
4282 char buf[64];
4283 zfs_nicenum(boot_size, buf, sizeof (buf));
4284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4285 "Specified size %s for EFI System partition is too "
4286 "small, the minimum size is 1MB."), buf);
4287 (void) close(fd);
4288 efi_free(vtoc);
4289 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4290 }
4291 /* 33MB is tested with mkfs -F pcfs */
4292 if (hdl->libzfs_printerr &&
4293 ((vtoc->efi_lbasize == 512 &&
4294 boot_size < 33 * 1024 * 1024) ||
4295 (vtoc->efi_lbasize == 4096 &&
4296 boot_size < 256 * 1024 * 1024))) {
4297 char buf[64];
4298 zfs_nicenum(boot_size, buf, sizeof (buf));
4299 (void) fprintf(stderr, dgettext(TEXT_DOMAIN,
4300 "Warning: EFI System partition size %s is "
4301 "not allowing to create FAT32 file\nsystem, which "
4302 "may result in unbootable system.\n"), buf);
4303 }
4304 /* Adjust zfs partition start by size of system partition. */
4305 start_block += boot_size / vtoc->efi_lbasize;
4306 }
4307
4308 if (start_block == NEW_START_BLOCK) {
4309 /*
4310 * Use default layout.
4311 * ZFS is on slice 0 and slice 8 is reserved.
4312 */
4313 slice_size = vtoc->efi_last_u_lba + 1;
4314 slice_size -= EFI_MIN_RESV_SIZE;
4315 slice_size -= start_block;
4316 if (slice != NULL)
4317 *slice = 0;
4318
4319 vtoc->efi_parts[0].p_start = start_block;
4320 vtoc->efi_parts[0].p_size = slice_size;
4321
4322 vtoc->efi_parts[0].p_tag = V_USR;
4323 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
4324
4325 vtoc->efi_parts[8].p_start = slice_size + start_block;
4326 vtoc->efi_parts[8].p_size = resv;
4327 vtoc->efi_parts[8].p_tag = V_RESERVED;
4328 } else {
4329 slice_size = start_block - NEW_START_BLOCK;
4330 vtoc->efi_parts[0].p_start = NEW_START_BLOCK;
4331 vtoc->efi_parts[0].p_size = slice_size;
4332 vtoc->efi_parts[0].p_tag = V_SYSTEM;
4333 (void) strcpy(vtoc->efi_parts[0].p_name, "loader");
4334 if (slice != NULL)
4335 *slice = 1;
4336 /* prepare slice 1 */
4337 slice_size = vtoc->efi_last_u_lba + 1 - slice_size;
4338 slice_size -= resv;
4339 slice_size -= NEW_START_BLOCK;
4340 vtoc->efi_parts[1].p_start = start_block;
4341 vtoc->efi_parts[1].p_size = slice_size;
4342 vtoc->efi_parts[1].p_tag = V_USR;
4343 (void) strcpy(vtoc->efi_parts[1].p_name, "zfs");
4344
4345 vtoc->efi_parts[8].p_start = slice_size + start_block;
4346 vtoc->efi_parts[8].p_size = resv;
4347 vtoc->efi_parts[8].p_tag = V_RESERVED;
4348 }
4349
4350 if (efi_write(fd, vtoc) != 0) {
4351 /*
4352 * Some block drivers (like pcata) may not support EFI
4353 * GPT labels. Print out a helpful error message dir-
4354 * ecting the user to manually label the disk and give
4355 * a specific slice.
4356 */
4357 (void) close(fd);
4358 efi_free(vtoc);
4359
4360 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4361 "try using fdisk(1M) and then provide a specific slice"));
4362 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4363 }
4364
4365 (void) close(fd);
4366 efi_free(vtoc);
4367 #endif /* illumos */
4368 return (0);
4369 }
4370
4371 static boolean_t
4372 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
4373 {
4374 char *type;
4375 nvlist_t **child;
4376 uint_t children, c;
4377
4378 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
4379 if (strcmp(type, VDEV_TYPE_FILE) == 0 ||
4380 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
4381 strcmp(type, VDEV_TYPE_MISSING) == 0) {
4382 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4383 "vdev type '%s' is not supported"), type);
4384 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
4385 return (B_FALSE);
4386 }
4387 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
4388 &child, &children) == 0) {
4389 for (c = 0; c < children; c++) {
4390 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
4391 return (B_FALSE);
4392 }
4393 }
4394 return (B_TRUE);
4395 }
4396
4397 /*
4398 * Check if this zvol is allowable for use as a dump device; zero if
4399 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
4400 *
4401 * Allowable storage configurations include mirrors, all raidz variants, and
4402 * pools with log, cache, and spare devices. Pools which are backed by files or
4403 * have missing/hole vdevs are not suitable.
4404 */
4405 int
4406 zvol_check_dump_config(char *arg)
4407 {
4408 zpool_handle_t *zhp = NULL;
4409 nvlist_t *config, *nvroot;
4410 char *p, *volname;
4411 nvlist_t **top;
4412 uint_t toplevels;
4413 libzfs_handle_t *hdl;
4414 char errbuf[1024];
4415 char poolname[ZFS_MAX_DATASET_NAME_LEN];
4416 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
4417 int ret = 1;
4418
4419 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
4420 return (-1);
4421 }
4422
4423 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
4424 "dump is not supported on device '%s'"), arg);
4425
4426 if ((hdl = libzfs_init()) == NULL)
4427 return (1);
4428 libzfs_print_on_error(hdl, B_TRUE);
4429
4430 volname = arg + pathlen;
4431
4432 /* check the configuration of the pool */
4433 if ((p = strchr(volname, '/')) == NULL) {
4434 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4435 "malformed dataset name"));
4436 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4437 return (1);
4438 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) {
4439 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4440 "dataset name is too long"));
4441 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4442 return (1);
4443 } else {
4444 (void) strncpy(poolname, volname, p - volname);
4445 poolname[p - volname] = '\0';
4446 }
4447
4448 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4449 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4450 "could not open pool '%s'"), poolname);
4451 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4452 goto out;
4453 }
4454 config = zpool_get_config(zhp, NULL);
4455 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4456 &nvroot) != 0) {
4457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4458 "could not obtain vdev configuration for '%s'"), poolname);
4459 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4460 goto out;
4461 }
4462
4463 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4464 &top, &toplevels) == 0);
4465
4466 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4467 goto out;
4468 }
4469 ret = 0;
4470
4471 out:
4472 if (zhp)
4473 zpool_close(zhp);
4474 libzfs_fini(hdl);
4475 return (ret);
4476 }
4477
4478 int
4479 zpool_nextboot(libzfs_handle_t *hdl, uint64_t pool_guid, uint64_t dev_guid,
4480 const char *command)
4481 {
4482 zfs_cmd_t zc = { 0 };
4483 nvlist_t *args;
4484 char *packed;
4485 size_t size;
4486 int error;
4487
4488 args = fnvlist_alloc();
4489 fnvlist_add_uint64(args, ZPOOL_CONFIG_POOL_GUID, pool_guid);
4490 fnvlist_add_uint64(args, ZPOOL_CONFIG_GUID, dev_guid);
4491 fnvlist_add_string(args, "command", command);
4492 error = zcmd_write_src_nvlist(hdl, &zc, args);
4493 if (error == 0)
4494 error = ioctl(hdl->libzfs_fd, ZFS_IOC_NEXTBOOT, &zc);
4495 zcmd_free_nvlists(&zc);
4496 nvlist_free(args);
4497 return (error);
4498 }
4499