1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2014, 2020 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <[email protected]>
27 * Copyright 2017 RackTop Systems.
28 * Copyright (c) 2018 Datto Inc.
29 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
30 */
31
32 /*
33 * Routines to manage ZFS mounts. We separate all the nasty routines that have
34 * to deal with the OS. The following functions are the main entry points --
35 * they are used by mount and unmount and when changing a filesystem's
36 * mountpoint.
37 *
38 * zfs_is_mounted()
39 * zfs_mount()
40 * zfs_mount_at()
41 * zfs_unmount()
42 * zfs_unmountall()
43 *
44 * This file also contains the functions used to manage sharing filesystems via
45 * NFS and iSCSI:
46 *
47 * zfs_is_shared()
48 * zfs_share()
49 * zfs_unshare()
50 *
51 * zfs_is_shared_nfs()
52 * zfs_is_shared_smb()
53 * zfs_share_proto()
54 * zfs_shareall();
55 * zfs_unshare_nfs()
56 * zfs_unshare_smb()
57 * zfs_unshareall_nfs()
58 * zfs_unshareall_smb()
59 * zfs_unshareall()
60 * zfs_unshareall_bypath()
61 *
62 * The following functions are available for pool consumers, and will
63 * mount/unmount and share/unshare all datasets within pool:
64 *
65 * zpool_enable_datasets()
66 * zpool_disable_datasets()
67 */
68
69 #include <dirent.h>
70 #include <dlfcn.h>
71 #include <errno.h>
72 #include <fcntl.h>
73 #include <libgen.h>
74 #include <libintl.h>
75 #include <stdio.h>
76 #include <stdlib.h>
77 #include <strings.h>
78 #include <unistd.h>
79 #include <zone.h>
80 #include <sys/mntent.h>
81 #include <sys/mount.h>
82 #include <sys/stat.h>
83 #include <sys/vfs.h>
84 #include <sys/dsl_crypt.h>
85
86 #include <libzfs.h>
87
88 #include "libzfs_impl.h"
89 #include <thread_pool.h>
90
91 #include <libshare.h>
92 #include <sys/systeminfo.h>
93 #define MAXISALEN 257 /* based on sysinfo(2) man page */
94
95 static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
96
97 static void zfs_mount_task(void *);
98 zfs_share_type_t zfs_is_shared_proto(zfs_handle_t *, char **,
99 zfs_share_proto_t);
100
101 /*
102 * The share protocols table must be in the same order as the zfs_share_proto_t
103 * enum in libzfs_impl.h
104 */
105 proto_table_t proto_table[PROTO_END] = {
106 {ZFS_PROP_SHARENFS, "nfs", EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED},
107 {ZFS_PROP_SHARESMB, "smb", EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED},
108 };
109
110 zfs_share_proto_t nfs_only[] = {
111 PROTO_NFS,
112 PROTO_END
113 };
114
115 zfs_share_proto_t smb_only[] = {
116 PROTO_SMB,
117 PROTO_END
118 };
119 zfs_share_proto_t share_all_proto[] = {
120 PROTO_NFS,
121 PROTO_SMB,
122 PROTO_END
123 };
124
125
126
127 static boolean_t
dir_is_empty_stat(const char * dirname)128 dir_is_empty_stat(const char *dirname)
129 {
130 struct stat st;
131
132 /*
133 * We only want to return false if the given path is a non empty
134 * directory, all other errors are handled elsewhere.
135 */
136 if (stat(dirname, &st) < 0 || !S_ISDIR(st.st_mode)) {
137 return (B_TRUE);
138 }
139
140 /*
141 * An empty directory will still have two entries in it, one
142 * entry for each of "." and "..".
143 */
144 if (st.st_size > 2) {
145 return (B_FALSE);
146 }
147
148 return (B_TRUE);
149 }
150
151 static boolean_t
dir_is_empty_readdir(const char * dirname)152 dir_is_empty_readdir(const char *dirname)
153 {
154 DIR *dirp;
155 struct dirent64 *dp;
156 int dirfd;
157
158 if ((dirfd = openat(AT_FDCWD, dirname,
159 O_RDONLY | O_NDELAY | O_LARGEFILE | O_CLOEXEC, 0)) < 0) {
160 return (B_TRUE);
161 }
162
163 if ((dirp = fdopendir(dirfd)) == NULL) {
164 (void) close(dirfd);
165 return (B_TRUE);
166 }
167
168 while ((dp = readdir64(dirp)) != NULL) {
169
170 if (strcmp(dp->d_name, ".") == 0 ||
171 strcmp(dp->d_name, "..") == 0)
172 continue;
173
174 (void) closedir(dirp);
175 return (B_FALSE);
176 }
177
178 (void) closedir(dirp);
179 return (B_TRUE);
180 }
181
182 /*
183 * Returns true if the specified directory is empty. If we can't open the
184 * directory at all, return true so that the mount can fail with a more
185 * informative error message.
186 */
187 static boolean_t
dir_is_empty(const char * dirname)188 dir_is_empty(const char *dirname)
189 {
190 struct statfs64 st;
191
192 /*
193 * If the statvfs call fails or the filesystem is not a ZFS
194 * filesystem, fall back to the slow path which uses readdir.
195 */
196 if ((statfs64(dirname, &st) != 0) ||
197 (st.f_type != ZFS_SUPER_MAGIC)) {
198 return (dir_is_empty_readdir(dirname));
199 }
200
201 /*
202 * At this point, we know the provided path is on a ZFS
203 * filesystem, so we can use stat instead of readdir to
204 * determine if the directory is empty or not. We try to avoid
205 * using readdir because that requires opening "dirname"; this
206 * open file descriptor can potentially end up in a child
207 * process if there's a concurrent fork, thus preventing the
208 * zfs_mount() from otherwise succeeding (the open file
209 * descriptor inherited by the child process will cause the
210 * parent's mount to fail with EBUSY). The performance
211 * implications of replacing the open, read, and close with a
212 * single stat is nice; but is not the main motivation for the
213 * added complexity.
214 */
215 return (dir_is_empty_stat(dirname));
216 }
217
218 /*
219 * Checks to see if the mount is active. If the filesystem is mounted, we fill
220 * in 'where' with the current mountpoint, and return 1. Otherwise, we return
221 * 0.
222 */
223 boolean_t
is_mounted(libzfs_handle_t * zfs_hdl,const char * special,char ** where)224 is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where)
225 {
226 struct mnttab entry;
227
228 if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0)
229 return (B_FALSE);
230
231 if (where != NULL)
232 *where = zfs_strdup(zfs_hdl, entry.mnt_mountp);
233
234 return (B_TRUE);
235 }
236
237 boolean_t
zfs_is_mounted(zfs_handle_t * zhp,char ** where)238 zfs_is_mounted(zfs_handle_t *zhp, char **where)
239 {
240 return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where));
241 }
242
243 /*
244 * Checks any higher order concerns about whether the given dataset is
245 * mountable, false otherwise. zfs_is_mountable_internal specifically assumes
246 * that the caller has verified the sanity of mounting the dataset at
247 * mountpoint to the extent the caller wants.
248 */
249 static boolean_t
zfs_is_mountable_internal(zfs_handle_t * zhp,const char * mountpoint)250 zfs_is_mountable_internal(zfs_handle_t *zhp, const char *mountpoint)
251 {
252
253 if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
254 getzoneid() == GLOBAL_ZONEID)
255 return (B_FALSE);
256
257 return (B_TRUE);
258 }
259
260 /*
261 * Returns true if the given dataset is mountable, false otherwise. Returns the
262 * mountpoint in 'buf'.
263 */
264 boolean_t
zfs_is_mountable(zfs_handle_t * zhp,char * buf,size_t buflen,zprop_source_t * source,int flags)265 zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
266 zprop_source_t *source, int flags)
267 {
268 char sourceloc[MAXNAMELEN];
269 zprop_source_t sourcetype;
270
271 if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type,
272 B_FALSE))
273 return (B_FALSE);
274
275 verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen,
276 &sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0);
277
278 if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 ||
279 strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
280 return (B_FALSE);
281
282 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
283 return (B_FALSE);
284
285 if (!zfs_is_mountable_internal(zhp, buf))
286 return (B_FALSE);
287
288 if (zfs_prop_get_int(zhp, ZFS_PROP_REDACTED) && !(flags & MS_FORCE))
289 return (B_FALSE);
290
291 if (source)
292 *source = sourcetype;
293
294 return (B_TRUE);
295 }
296
297 /*
298 * The filesystem is mounted by invoking the system mount utility rather
299 * than by the system call mount(2). This ensures that the /etc/mtab
300 * file is correctly locked for the update. Performing our own locking
301 * and /etc/mtab update requires making an unsafe assumption about how
302 * the mount utility performs its locking. Unfortunately, this also means
303 * in the case of a mount failure we do not have the exact errno. We must
304 * make due with return value from the mount process.
305 *
306 * In the long term a shared library called libmount is under development
307 * which provides a common API to address the locking and errno issues.
308 * Once the standard mount utility has been updated to use this library
309 * we can add an autoconf check to conditionally use it.
310 *
311 * http://www.kernel.org/pub/linux/utils/util-linux/libmount-docs/index.html
312 */
313
314 static int
zfs_add_option(zfs_handle_t * zhp,char * options,int len,zfs_prop_t prop,char * on,char * off)315 zfs_add_option(zfs_handle_t *zhp, char *options, int len,
316 zfs_prop_t prop, char *on, char *off)
317 {
318 char *source;
319 uint64_t value;
320
321 /* Skip adding duplicate default options */
322 if ((strstr(options, on) != NULL) || (strstr(options, off) != NULL))
323 return (0);
324
325 /*
326 * zfs_prop_get_int() is not used to ensure our mount options
327 * are not influenced by the current /proc/self/mounts contents.
328 */
329 value = getprop_uint64(zhp, prop, &source);
330
331 (void) strlcat(options, ",", len);
332 (void) strlcat(options, value ? on : off, len);
333
334 return (0);
335 }
336
337 static int
zfs_add_options(zfs_handle_t * zhp,char * options,int len)338 zfs_add_options(zfs_handle_t *zhp, char *options, int len)
339 {
340 int error = 0;
341
342 error = zfs_add_option(zhp, options, len,
343 ZFS_PROP_ATIME, MNTOPT_ATIME, MNTOPT_NOATIME);
344 /*
345 * don't add relatime/strictatime when atime=off, otherwise strictatime
346 * will force atime=on
347 */
348 if (strstr(options, MNTOPT_NOATIME) == NULL) {
349 error = zfs_add_option(zhp, options, len,
350 ZFS_PROP_RELATIME, MNTOPT_RELATIME, MNTOPT_STRICTATIME);
351 }
352 error = error ? error : zfs_add_option(zhp, options, len,
353 ZFS_PROP_DEVICES, MNTOPT_DEVICES, MNTOPT_NODEVICES);
354 error = error ? error : zfs_add_option(zhp, options, len,
355 ZFS_PROP_EXEC, MNTOPT_EXEC, MNTOPT_NOEXEC);
356 error = error ? error : zfs_add_option(zhp, options, len,
357 ZFS_PROP_READONLY, MNTOPT_RO, MNTOPT_RW);
358 error = error ? error : zfs_add_option(zhp, options, len,
359 ZFS_PROP_SETUID, MNTOPT_SETUID, MNTOPT_NOSETUID);
360 error = error ? error : zfs_add_option(zhp, options, len,
361 ZFS_PROP_NBMAND, MNTOPT_NBMAND, MNTOPT_NONBMAND);
362
363 return (error);
364 }
365
366 int
zfs_mount(zfs_handle_t * zhp,const char * options,int flags)367 zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
368 {
369 char mountpoint[ZFS_MAXPROPLEN];
370
371 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL,
372 flags))
373 return (0);
374
375 return (zfs_mount_at(zhp, options, flags, mountpoint));
376 }
377
378 /*
379 * Mount the given filesystem.
380 */
381 int
zfs_mount_at(zfs_handle_t * zhp,const char * options,int flags,const char * mountpoint)382 zfs_mount_at(zfs_handle_t *zhp, const char *options, int flags,
383 const char *mountpoint)
384 {
385 struct stat buf;
386 char mntopts[MNT_LINE_MAX];
387 char overlay[ZFS_MAXPROPLEN];
388 libzfs_handle_t *hdl = zhp->zfs_hdl;
389 uint64_t keystatus;
390 int remount = 0, rc;
391
392 if (options == NULL) {
393 (void) strlcpy(mntopts, MNTOPT_DEFAULTS, sizeof (mntopts));
394 } else {
395 (void) strlcpy(mntopts, options, sizeof (mntopts));
396 }
397
398 if (strstr(mntopts, MNTOPT_REMOUNT) != NULL)
399 remount = 1;
400
401 /* Potentially duplicates some checks if invoked by zfs_mount(). */
402 if (!zfs_is_mountable_internal(zhp, mountpoint))
403 return (0);
404
405 /*
406 * If the pool is imported read-only then all mounts must be read-only
407 */
408 if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
409 (void) strlcat(mntopts, "," MNTOPT_RO, sizeof (mntopts));
410
411 /*
412 * Append default mount options which apply to the mount point.
413 * This is done because under Linux (unlike Solaris) multiple mount
414 * points may reference a single super block. This means that just
415 * given a super block there is no back reference to update the per
416 * mount point options.
417 */
418 rc = zfs_add_options(zhp, mntopts, sizeof (mntopts));
419 if (rc) {
420 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
421 "default options unavailable"));
422 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
423 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
424 mountpoint));
425 }
426
427 /*
428 * If the filesystem is encrypted the key must be loaded in order to
429 * mount. If the key isn't loaded, the MS_CRYPT flag decides whether
430 * or not we attempt to load the keys. Note: we must call
431 * zfs_refresh_properties() here since some callers of this function
432 * (most notably zpool_enable_datasets()) may implicitly load our key
433 * by loading the parent's key first.
434 */
435 if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
436 zfs_refresh_properties(zhp);
437 keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
438
439 /*
440 * If the key is unavailable and MS_CRYPT is set give the
441 * user a chance to enter the key. Otherwise just fail
442 * immediately.
443 */
444 if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
445 if (flags & MS_CRYPT) {
446 rc = zfs_crypto_load_key(zhp, B_FALSE, NULL);
447 if (rc)
448 return (rc);
449 } else {
450 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
451 "encryption key not loaded"));
452 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
453 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
454 mountpoint));
455 }
456 }
457
458 }
459
460 /*
461 * Append zfsutil option so the mount helper allow the mount
462 */
463 strlcat(mntopts, "," MNTOPT_ZFSUTIL, sizeof (mntopts));
464
465 /* Create the directory if it doesn't already exist */
466 if (lstat(mountpoint, &buf) != 0) {
467 if (mkdirp(mountpoint, 0755) != 0) {
468 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
469 "failed to create mountpoint: %s"),
470 strerror(errno));
471 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
472 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
473 mountpoint));
474 }
475 }
476
477 /*
478 * Overlay mounts are enabled by default but may be disabled
479 * via the 'overlay' property. The -O flag remains for compatibility.
480 */
481 if (!(flags & MS_OVERLAY)) {
482 if (zfs_prop_get(zhp, ZFS_PROP_OVERLAY, overlay,
483 sizeof (overlay), NULL, NULL, 0, B_FALSE) == 0) {
484 if (strcmp(overlay, "on") == 0) {
485 flags |= MS_OVERLAY;
486 }
487 }
488 }
489
490 /*
491 * Determine if the mountpoint is empty. If so, refuse to perform the
492 * mount. We don't perform this check if 'remount' is
493 * specified or if overlay option (-O) is given
494 */
495 if ((flags & MS_OVERLAY) == 0 && !remount &&
496 !dir_is_empty(mountpoint)) {
497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
498 "directory is not empty"));
499 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
500 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
501 }
502
503 /* perform the mount */
504 rc = do_mount(zhp, mountpoint, mntopts, flags);
505 if (rc) {
506 /*
507 * Generic errors are nasty, but there are just way too many
508 * from mount(), and they're well-understood. We pick a few
509 * common ones to improve upon.
510 */
511 if (rc == EBUSY) {
512 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
513 "mountpoint or dataset is busy"));
514 } else if (rc == EPERM) {
515 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
516 "Insufficient privileges"));
517 } else if (rc == ENOTSUP) {
518 char buf[256];
519 int spa_version;
520
521 VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
522 (void) snprintf(buf, sizeof (buf),
523 dgettext(TEXT_DOMAIN, "Can't mount a version %lld "
524 "file system on a version %d pool. Pool must be"
525 " upgraded to mount this file system."),
526 (u_longlong_t)zfs_prop_get_int(zhp,
527 ZFS_PROP_VERSION), spa_version);
528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, buf));
529 } else {
530 zfs_error_aux(hdl, strerror(rc));
531 }
532 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
533 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
534 zhp->zfs_name));
535 }
536
537 /* remove the mounted entry before re-adding on remount */
538 if (remount)
539 libzfs_mnttab_remove(hdl, zhp->zfs_name);
540
541 /* add the mounted entry into our cache */
542 libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint, mntopts);
543 return (0);
544 }
545
546 /*
547 * Unmount a single filesystem.
548 */
549 static int
unmount_one(libzfs_handle_t * hdl,const char * mountpoint,int flags)550 unmount_one(libzfs_handle_t *hdl, const char *mountpoint, int flags)
551 {
552 int error;
553
554 error = do_unmount(mountpoint, flags);
555 if (error != 0) {
556 return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED,
557 dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
558 mountpoint));
559 }
560
561 return (0);
562 }
563
564 /*
565 * Unmount the given filesystem.
566 */
567 int
zfs_unmount(zfs_handle_t * zhp,const char * mountpoint,int flags)568 zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
569 {
570 libzfs_handle_t *hdl = zhp->zfs_hdl;
571 struct mnttab entry;
572 char *mntpt = NULL;
573 boolean_t encroot, unmounted = B_FALSE;
574
575 /* check to see if we need to unmount the filesystem */
576 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
577 libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) {
578 /*
579 * mountpoint may have come from a call to
580 * getmnt/getmntany if it isn't NULL. If it is NULL,
581 * we know it comes from libzfs_mnttab_find which can
582 * then get freed later. We strdup it to play it safe.
583 */
584 if (mountpoint == NULL)
585 mntpt = zfs_strdup(hdl, entry.mnt_mountp);
586 else
587 mntpt = zfs_strdup(hdl, mountpoint);
588
589 /*
590 * Unshare and unmount the filesystem
591 */
592 if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0) {
593 free(mntpt);
594 return (-1);
595 }
596 zfs_commit_all_shares();
597
598 if (unmount_one(hdl, mntpt, flags) != 0) {
599 free(mntpt);
600 (void) zfs_shareall(zhp);
601 zfs_commit_all_shares();
602 return (-1);
603 }
604
605 libzfs_mnttab_remove(hdl, zhp->zfs_name);
606 free(mntpt);
607 unmounted = B_TRUE;
608 }
609
610 /*
611 * If the MS_CRYPT flag is provided we must ensure we attempt to
612 * unload the dataset's key regardless of whether we did any work
613 * to unmount it. We only do this for encryption roots.
614 */
615 if ((flags & MS_CRYPT) != 0 &&
616 zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
617 zfs_refresh_properties(zhp);
618
619 if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0 &&
620 unmounted) {
621 (void) zfs_mount(zhp, NULL, 0);
622 return (-1);
623 }
624
625 if (encroot && zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
626 ZFS_KEYSTATUS_AVAILABLE &&
627 zfs_crypto_unload_key(zhp) != 0) {
628 (void) zfs_mount(zhp, NULL, 0);
629 return (-1);
630 }
631 }
632
633 return (0);
634 }
635
636 /*
637 * Unmount this filesystem and any children inheriting the mountpoint property.
638 * To do this, just act like we're changing the mountpoint property, but don't
639 * remount the filesystems afterwards.
640 */
641 int
zfs_unmountall(zfs_handle_t * zhp,int flags)642 zfs_unmountall(zfs_handle_t *zhp, int flags)
643 {
644 prop_changelist_t *clp;
645 int ret;
646
647 clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
648 CL_GATHER_ITER_MOUNTED, flags);
649 if (clp == NULL)
650 return (-1);
651
652 ret = changelist_prefix(clp);
653 changelist_free(clp);
654
655 return (ret);
656 }
657
658 boolean_t
zfs_is_shared(zfs_handle_t * zhp)659 zfs_is_shared(zfs_handle_t *zhp)
660 {
661 zfs_share_type_t rc = 0;
662 zfs_share_proto_t *curr_proto;
663
664 if (ZFS_IS_VOLUME(zhp))
665 return (B_FALSE);
666
667 for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
668 curr_proto++)
669 rc |= zfs_is_shared_proto(zhp, NULL, *curr_proto);
670
671 return (rc ? B_TRUE : B_FALSE);
672 }
673
674 /*
675 * Unshare a filesystem by mountpoint.
676 */
677 int
unshare_one(libzfs_handle_t * hdl,const char * name,const char * mountpoint,zfs_share_proto_t proto)678 unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint,
679 zfs_share_proto_t proto)
680 {
681 int err;
682
683 err = sa_disable_share(mountpoint, proto_table[proto].p_name);
684 if (err != SA_OK) {
685 return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
686 dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
687 name, sa_errorstr(err)));
688 }
689 return (0);
690 }
691
692 /*
693 * Query libshare for the given mountpoint and protocol, returning
694 * a zfs_share_type_t value.
695 */
696 zfs_share_type_t
is_shared(const char * mountpoint,zfs_share_proto_t proto)697 is_shared(const char *mountpoint, zfs_share_proto_t proto)
698 {
699 if (sa_is_shared(mountpoint, proto_table[proto].p_name)) {
700 switch (proto) {
701 case PROTO_NFS:
702 return (SHARED_NFS);
703 case PROTO_SMB:
704 return (SHARED_SMB);
705 default:
706 return (SHARED_NOT_SHARED);
707 }
708 }
709 return (SHARED_NOT_SHARED);
710 }
711
712 /*
713 * Share the given filesystem according to the options in the specified
714 * protocol specific properties (sharenfs, sharesmb). We rely
715 * on "libshare" to do the dirty work for us.
716 */
717 int
zfs_share_proto(zfs_handle_t * zhp,zfs_share_proto_t * proto)718 zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
719 {
720 char mountpoint[ZFS_MAXPROPLEN];
721 char shareopts[ZFS_MAXPROPLEN];
722 char sourcestr[ZFS_MAXPROPLEN];
723 zfs_share_proto_t *curr_proto;
724 zprop_source_t sourcetype;
725 int err = 0;
726
727 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL, 0))
728 return (0);
729
730 for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) {
731 /*
732 * Return success if there are no share options.
733 */
734 if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop,
735 shareopts, sizeof (shareopts), &sourcetype, sourcestr,
736 ZFS_MAXPROPLEN, B_FALSE) != 0 ||
737 strcmp(shareopts, "off") == 0)
738 continue;
739
740 /*
741 * If the 'zoned' property is set, then zfs_is_mountable()
742 * will have already bailed out if we are in the global zone.
743 * But local zones cannot be NFS servers, so we ignore it for
744 * local zones as well.
745 */
746 if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
747 continue;
748
749 err = sa_enable_share(zfs_get_name(zhp), mountpoint, shareopts,
750 proto_table[*curr_proto].p_name);
751 if (err != SA_OK) {
752 return (zfs_error_fmt(zhp->zfs_hdl,
753 proto_table[*curr_proto].p_share_err,
754 dgettext(TEXT_DOMAIN, "cannot share '%s: %s'"),
755 zfs_get_name(zhp), sa_errorstr(err)));
756 }
757
758 }
759 return (0);
760 }
761
762 int
zfs_share(zfs_handle_t * zhp)763 zfs_share(zfs_handle_t *zhp)
764 {
765 assert(!ZFS_IS_VOLUME(zhp));
766 return (zfs_share_proto(zhp, share_all_proto));
767 }
768
769 int
zfs_unshare(zfs_handle_t * zhp)770 zfs_unshare(zfs_handle_t *zhp)
771 {
772 assert(!ZFS_IS_VOLUME(zhp));
773 return (zfs_unshareall(zhp));
774 }
775
776 /*
777 * Check to see if the filesystem is currently shared.
778 */
779 zfs_share_type_t
zfs_is_shared_proto(zfs_handle_t * zhp,char ** where,zfs_share_proto_t proto)780 zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto)
781 {
782 char *mountpoint;
783 zfs_share_type_t rc;
784
785 if (!zfs_is_mounted(zhp, &mountpoint))
786 return (SHARED_NOT_SHARED);
787
788 if ((rc = is_shared(mountpoint, proto))
789 != SHARED_NOT_SHARED) {
790 if (where != NULL)
791 *where = mountpoint;
792 else
793 free(mountpoint);
794 return (rc);
795 } else {
796 free(mountpoint);
797 return (SHARED_NOT_SHARED);
798 }
799 }
800
801 boolean_t
zfs_is_shared_nfs(zfs_handle_t * zhp,char ** where)802 zfs_is_shared_nfs(zfs_handle_t *zhp, char **where)
803 {
804 return (zfs_is_shared_proto(zhp, where,
805 PROTO_NFS) != SHARED_NOT_SHARED);
806 }
807
808 boolean_t
zfs_is_shared_smb(zfs_handle_t * zhp,char ** where)809 zfs_is_shared_smb(zfs_handle_t *zhp, char **where)
810 {
811 return (zfs_is_shared_proto(zhp, where,
812 PROTO_SMB) != SHARED_NOT_SHARED);
813 }
814
815 /*
816 * zfs_parse_options(options, proto)
817 *
818 * Call the legacy parse interface to get the protocol specific
819 * options using the NULL arg to indicate that this is a "parse" only.
820 */
821 int
zfs_parse_options(char * options,zfs_share_proto_t proto)822 zfs_parse_options(char *options, zfs_share_proto_t proto)
823 {
824 return (sa_validate_shareopts(options, proto_table[proto].p_name));
825 }
826
827 void
zfs_commit_proto(zfs_share_proto_t * proto)828 zfs_commit_proto(zfs_share_proto_t *proto)
829 {
830 zfs_share_proto_t *curr_proto;
831 for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) {
832 sa_commit_shares(proto_table[*curr_proto].p_name);
833 }
834 }
835
836 void
zfs_commit_nfs_shares(void)837 zfs_commit_nfs_shares(void)
838 {
839 zfs_commit_proto(nfs_only);
840 }
841
842 void
zfs_commit_smb_shares(void)843 zfs_commit_smb_shares(void)
844 {
845 zfs_commit_proto(smb_only);
846 }
847
848 void
zfs_commit_all_shares(void)849 zfs_commit_all_shares(void)
850 {
851 zfs_commit_proto(share_all_proto);
852 }
853
854 void
zfs_commit_shares(const char * proto)855 zfs_commit_shares(const char *proto)
856 {
857 if (proto == NULL)
858 zfs_commit_proto(share_all_proto);
859 else if (strcmp(proto, "nfs") == 0)
860 zfs_commit_proto(nfs_only);
861 else if (strcmp(proto, "smb") == 0)
862 zfs_commit_proto(smb_only);
863 }
864
865 int
zfs_share_nfs(zfs_handle_t * zhp)866 zfs_share_nfs(zfs_handle_t *zhp)
867 {
868 return (zfs_share_proto(zhp, nfs_only));
869 }
870
871 int
zfs_share_smb(zfs_handle_t * zhp)872 zfs_share_smb(zfs_handle_t *zhp)
873 {
874 return (zfs_share_proto(zhp, smb_only));
875 }
876
877 int
zfs_shareall(zfs_handle_t * zhp)878 zfs_shareall(zfs_handle_t *zhp)
879 {
880 return (zfs_share_proto(zhp, share_all_proto));
881 }
882
883 /*
884 * Unshare the given filesystem.
885 */
886 int
zfs_unshare_proto(zfs_handle_t * zhp,const char * mountpoint,zfs_share_proto_t * proto)887 zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint,
888 zfs_share_proto_t *proto)
889 {
890 libzfs_handle_t *hdl = zhp->zfs_hdl;
891 struct mnttab entry;
892 char *mntpt = NULL;
893
894 /* check to see if need to unmount the filesystem */
895 if (mountpoint != NULL)
896 mntpt = zfs_strdup(hdl, mountpoint);
897
898 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
899 libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) {
900 zfs_share_proto_t *curr_proto;
901
902 if (mountpoint == NULL)
903 mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);
904
905 for (curr_proto = proto; *curr_proto != PROTO_END;
906 curr_proto++) {
907
908 if (is_shared(mntpt, *curr_proto)) {
909 if (unshare_one(hdl, zhp->zfs_name,
910 mntpt, *curr_proto) != 0) {
911 if (mntpt != NULL)
912 free(mntpt);
913 return (-1);
914 }
915 }
916 }
917 }
918 if (mntpt != NULL)
919 free(mntpt);
920
921 return (0);
922 }
923
924 int
zfs_unshare_nfs(zfs_handle_t * zhp,const char * mountpoint)925 zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint)
926 {
927 return (zfs_unshare_proto(zhp, mountpoint, nfs_only));
928 }
929
930 int
zfs_unshare_smb(zfs_handle_t * zhp,const char * mountpoint)931 zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint)
932 {
933 return (zfs_unshare_proto(zhp, mountpoint, smb_only));
934 }
935
936 /*
937 * Same as zfs_unmountall(), but for NFS and SMB unshares.
938 */
939 static int
zfs_unshareall_proto(zfs_handle_t * zhp,zfs_share_proto_t * proto)940 zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
941 {
942 prop_changelist_t *clp;
943 int ret;
944
945 clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0);
946 if (clp == NULL)
947 return (-1);
948
949 ret = changelist_unshare(clp, proto);
950 changelist_free(clp);
951
952 return (ret);
953 }
954
955 int
zfs_unshareall_nfs(zfs_handle_t * zhp)956 zfs_unshareall_nfs(zfs_handle_t *zhp)
957 {
958 return (zfs_unshareall_proto(zhp, nfs_only));
959 }
960
961 int
zfs_unshareall_smb(zfs_handle_t * zhp)962 zfs_unshareall_smb(zfs_handle_t *zhp)
963 {
964 return (zfs_unshareall_proto(zhp, smb_only));
965 }
966
967 int
zfs_unshareall(zfs_handle_t * zhp)968 zfs_unshareall(zfs_handle_t *zhp)
969 {
970 return (zfs_unshareall_proto(zhp, share_all_proto));
971 }
972
973 int
zfs_unshareall_bypath(zfs_handle_t * zhp,const char * mountpoint)974 zfs_unshareall_bypath(zfs_handle_t *zhp, const char *mountpoint)
975 {
976 return (zfs_unshare_proto(zhp, mountpoint, share_all_proto));
977 }
978
979 int
zfs_unshareall_bytype(zfs_handle_t * zhp,const char * mountpoint,const char * proto)980 zfs_unshareall_bytype(zfs_handle_t *zhp, const char *mountpoint,
981 const char *proto)
982 {
983 if (proto == NULL)
984 return (zfs_unshare_proto(zhp, mountpoint, share_all_proto));
985 if (strcmp(proto, "nfs") == 0)
986 return (zfs_unshare_proto(zhp, mountpoint, nfs_only));
987 else if (strcmp(proto, "smb") == 0)
988 return (zfs_unshare_proto(zhp, mountpoint, smb_only));
989 else
990 return (1);
991 }
992
993 /*
994 * Remove the mountpoint associated with the current dataset, if necessary.
995 * We only remove the underlying directory if:
996 *
997 * - The mountpoint is not 'none' or 'legacy'
998 * - The mountpoint is non-empty
999 * - The mountpoint is the default or inherited
1000 * - The 'zoned' property is set, or we're in a local zone
1001 *
1002 * Any other directories we leave alone.
1003 */
1004 void
remove_mountpoint(zfs_handle_t * zhp)1005 remove_mountpoint(zfs_handle_t *zhp)
1006 {
1007 char mountpoint[ZFS_MAXPROPLEN];
1008 zprop_source_t source;
1009
1010 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
1011 &source, 0))
1012 return;
1013
1014 if (source == ZPROP_SRC_DEFAULT ||
1015 source == ZPROP_SRC_INHERITED) {
1016 /*
1017 * Try to remove the directory, silently ignoring any errors.
1018 * The filesystem may have since been removed or moved around,
1019 * and this error isn't really useful to the administrator in
1020 * any way.
1021 */
1022 (void) rmdir(mountpoint);
1023 }
1024 }
1025
1026 /*
1027 * Add the given zfs handle to the cb_handles array, dynamically reallocating
1028 * the array if it is out of space.
1029 */
1030 void
libzfs_add_handle(get_all_cb_t * cbp,zfs_handle_t * zhp)1031 libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp)
1032 {
1033 if (cbp->cb_alloc == cbp->cb_used) {
1034 size_t newsz;
1035 zfs_handle_t **newhandles;
1036
1037 newsz = cbp->cb_alloc != 0 ? cbp->cb_alloc * 2 : 64;
1038 newhandles = zfs_realloc(zhp->zfs_hdl,
1039 cbp->cb_handles, cbp->cb_alloc * sizeof (zfs_handle_t *),
1040 newsz * sizeof (zfs_handle_t *));
1041 cbp->cb_handles = newhandles;
1042 cbp->cb_alloc = newsz;
1043 }
1044 cbp->cb_handles[cbp->cb_used++] = zhp;
1045 }
1046
1047 /*
1048 * Recursive helper function used during file system enumeration
1049 */
1050 static int
zfs_iter_cb(zfs_handle_t * zhp,void * data)1051 zfs_iter_cb(zfs_handle_t *zhp, void *data)
1052 {
1053 get_all_cb_t *cbp = data;
1054
1055 if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) {
1056 zfs_close(zhp);
1057 return (0);
1058 }
1059
1060 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) {
1061 zfs_close(zhp);
1062 return (0);
1063 }
1064
1065 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
1066 ZFS_KEYSTATUS_UNAVAILABLE) {
1067 zfs_close(zhp);
1068 return (0);
1069 }
1070
1071 /*
1072 * If this filesystem is inconsistent and has a receive resume
1073 * token, we can not mount it.
1074 */
1075 if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
1076 zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
1077 NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
1078 zfs_close(zhp);
1079 return (0);
1080 }
1081
1082 libzfs_add_handle(cbp, zhp);
1083 if (zfs_iter_filesystems(zhp, zfs_iter_cb, cbp) != 0) {
1084 zfs_close(zhp);
1085 return (-1);
1086 }
1087 return (0);
1088 }
1089
1090 /*
1091 * Sort comparator that compares two mountpoint paths. We sort these paths so
1092 * that subdirectories immediately follow their parents. This means that we
1093 * effectively treat the '/' character as the lowest value non-nul char.
1094 * Since filesystems from non-global zones can have the same mountpoint
1095 * as other filesystems, the comparator sorts global zone filesystems to
1096 * the top of the list. This means that the global zone will traverse the
1097 * filesystem list in the correct order and can stop when it sees the
1098 * first zoned filesystem. In a non-global zone, only the delegated
1099 * filesystems are seen.
1100 *
1101 * An example sorted list using this comparator would look like:
1102 *
1103 * /foo
1104 * /foo/bar
1105 * /foo/bar/baz
1106 * /foo/baz
1107 * /foo.bar
1108 * /foo (NGZ1)
1109 * /foo (NGZ2)
1110 *
1111 * The mounting code depends on this ordering to deterministically iterate
1112 * over filesystems in order to spawn parallel mount tasks.
1113 */
1114 static int
mountpoint_cmp(const void * arga,const void * argb)1115 mountpoint_cmp(const void *arga, const void *argb)
1116 {
1117 zfs_handle_t *const *zap = arga;
1118 zfs_handle_t *za = *zap;
1119 zfs_handle_t *const *zbp = argb;
1120 zfs_handle_t *zb = *zbp;
1121 char mounta[MAXPATHLEN];
1122 char mountb[MAXPATHLEN];
1123 const char *a = mounta;
1124 const char *b = mountb;
1125 boolean_t gota, gotb;
1126 uint64_t zoneda, zonedb;
1127
1128 zoneda = zfs_prop_get_int(za, ZFS_PROP_ZONED);
1129 zonedb = zfs_prop_get_int(zb, ZFS_PROP_ZONED);
1130 if (zoneda && !zonedb)
1131 return (1);
1132 if (!zoneda && zonedb)
1133 return (-1);
1134
1135 gota = (zfs_get_type(za) == ZFS_TYPE_FILESYSTEM);
1136 if (gota) {
1137 verify(zfs_prop_get(za, ZFS_PROP_MOUNTPOINT, mounta,
1138 sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
1139 }
1140 gotb = (zfs_get_type(zb) == ZFS_TYPE_FILESYSTEM);
1141 if (gotb) {
1142 verify(zfs_prop_get(zb, ZFS_PROP_MOUNTPOINT, mountb,
1143 sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
1144 }
1145
1146 if (gota && gotb) {
1147 while (*a != '\0' && (*a == *b)) {
1148 a++;
1149 b++;
1150 }
1151 if (*a == *b)
1152 return (0);
1153 if (*a == '\0')
1154 return (-1);
1155 if (*b == '\0')
1156 return (1);
1157 if (*a == '/')
1158 return (-1);
1159 if (*b == '/')
1160 return (1);
1161 return (*a < *b ? -1 : *a > *b);
1162 }
1163
1164 if (gota)
1165 return (-1);
1166 if (gotb)
1167 return (1);
1168
1169 /*
1170 * If neither filesystem has a mountpoint, revert to sorting by
1171 * dataset name.
1172 */
1173 return (strcmp(zfs_get_name(za), zfs_get_name(zb)));
1174 }
1175
1176 /*
1177 * Return true if path2 is a child of path1 or path2 equals path1 or
1178 * path1 is "/" (path2 is always a child of "/").
1179 */
1180 static boolean_t
libzfs_path_contains(const char * path1,const char * path2)1181 libzfs_path_contains(const char *path1, const char *path2)
1182 {
1183 return (strcmp(path1, path2) == 0 || strcmp(path1, "/") == 0 ||
1184 (strstr(path2, path1) == path2 && path2[strlen(path1)] == '/'));
1185 }
1186
1187 /*
1188 * Given a mountpoint specified by idx in the handles array, find the first
1189 * non-descendent of that mountpoint and return its index. Descendant paths
1190 * start with the parent's path. This function relies on the ordering
1191 * enforced by mountpoint_cmp().
1192 */
1193 static int
non_descendant_idx(zfs_handle_t ** handles,size_t num_handles,int idx)1194 non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx)
1195 {
1196 char parent[ZFS_MAXPROPLEN];
1197 char child[ZFS_MAXPROPLEN];
1198 int i;
1199
1200 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, parent,
1201 sizeof (parent), NULL, NULL, 0, B_FALSE) == 0);
1202
1203 for (i = idx + 1; i < num_handles; i++) {
1204 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, child,
1205 sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
1206 if (!libzfs_path_contains(parent, child))
1207 break;
1208 }
1209 return (i);
1210 }
1211
1212 typedef struct mnt_param {
1213 libzfs_handle_t *mnt_hdl;
1214 tpool_t *mnt_tp;
1215 zfs_handle_t **mnt_zhps; /* filesystems to mount */
1216 size_t mnt_num_handles;
1217 int mnt_idx; /* Index of selected entry to mount */
1218 zfs_iter_f mnt_func;
1219 void *mnt_data;
1220 } mnt_param_t;
1221
1222 /*
1223 * Allocate and populate the parameter struct for mount function, and
1224 * schedule mounting of the entry selected by idx.
1225 */
1226 static void
zfs_dispatch_mount(libzfs_handle_t * hdl,zfs_handle_t ** handles,size_t num_handles,int idx,zfs_iter_f func,void * data,tpool_t * tp)1227 zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles,
1228 size_t num_handles, int idx, zfs_iter_f func, void *data, tpool_t *tp)
1229 {
1230 mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t));
1231
1232 mnt_param->mnt_hdl = hdl;
1233 mnt_param->mnt_tp = tp;
1234 mnt_param->mnt_zhps = handles;
1235 mnt_param->mnt_num_handles = num_handles;
1236 mnt_param->mnt_idx = idx;
1237 mnt_param->mnt_func = func;
1238 mnt_param->mnt_data = data;
1239
1240 (void) tpool_dispatch(tp, zfs_mount_task, (void*)mnt_param);
1241 }
1242
1243 /*
1244 * This is the structure used to keep state of mounting or sharing operations
1245 * during a call to zpool_enable_datasets().
1246 */
1247 typedef struct mount_state {
1248 /*
1249 * ms_mntstatus is set to -1 if any mount fails. While multiple threads
1250 * could update this variable concurrently, no synchronization is
1251 * needed as it's only ever set to -1.
1252 */
1253 int ms_mntstatus;
1254 int ms_mntflags;
1255 const char *ms_mntopts;
1256 } mount_state_t;
1257
1258 static int
zfs_mount_one(zfs_handle_t * zhp,void * arg)1259 zfs_mount_one(zfs_handle_t *zhp, void *arg)
1260 {
1261 mount_state_t *ms = arg;
1262 int ret = 0;
1263
1264 /*
1265 * don't attempt to mount encrypted datasets with
1266 * unloaded keys
1267 */
1268 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
1269 ZFS_KEYSTATUS_UNAVAILABLE)
1270 return (0);
1271
1272 if (zfs_mount(zhp, ms->ms_mntopts, ms->ms_mntflags) != 0)
1273 ret = ms->ms_mntstatus = -1;
1274 return (ret);
1275 }
1276
1277 static int
zfs_share_one(zfs_handle_t * zhp,void * arg)1278 zfs_share_one(zfs_handle_t *zhp, void *arg)
1279 {
1280 mount_state_t *ms = arg;
1281 int ret = 0;
1282
1283 if (zfs_share(zhp) != 0)
1284 ret = ms->ms_mntstatus = -1;
1285 return (ret);
1286 }
1287
1288 /*
1289 * Thread pool function to mount one file system. On completion, it finds and
1290 * schedules its children to be mounted. This depends on the sorting done in
1291 * zfs_foreach_mountpoint(). Note that the degenerate case (chain of entries
1292 * each descending from the previous) will have no parallelism since we always
1293 * have to wait for the parent to finish mounting before we can schedule
1294 * its children.
1295 */
1296 static void
zfs_mount_task(void * arg)1297 zfs_mount_task(void *arg)
1298 {
1299 mnt_param_t *mp = arg;
1300 int idx = mp->mnt_idx;
1301 zfs_handle_t **handles = mp->mnt_zhps;
1302 size_t num_handles = mp->mnt_num_handles;
1303 char mountpoint[ZFS_MAXPROPLEN];
1304
1305 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, mountpoint,
1306 sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
1307
1308 if (mp->mnt_func(handles[idx], mp->mnt_data) != 0)
1309 return;
1310
1311 /*
1312 * We dispatch tasks to mount filesystems with mountpoints underneath
1313 * this one. We do this by dispatching the next filesystem with a
1314 * descendant mountpoint of the one we just mounted, then skip all of
1315 * its descendants, dispatch the next descendant mountpoint, and so on.
1316 * The non_descendant_idx() function skips over filesystems that are
1317 * descendants of the filesystem we just dispatched.
1318 */
1319 for (int i = idx + 1; i < num_handles;
1320 i = non_descendant_idx(handles, num_handles, i)) {
1321 char child[ZFS_MAXPROPLEN];
1322 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT,
1323 child, sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
1324
1325 if (!libzfs_path_contains(mountpoint, child))
1326 break; /* not a descendant, return */
1327 zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i,
1328 mp->mnt_func, mp->mnt_data, mp->mnt_tp);
1329 }
1330 free(mp);
1331 }
1332
1333 /*
1334 * Issue the func callback for each ZFS handle contained in the handles
1335 * array. This function is used to mount all datasets, and so this function
1336 * guarantees that filesystems for parent mountpoints are called before their
1337 * children. As such, before issuing any callbacks, we first sort the array
1338 * of handles by mountpoint.
1339 *
1340 * Callbacks are issued in one of two ways:
1341 *
1342 * 1. Sequentially: If the parallel argument is B_FALSE or the ZFS_SERIAL_MOUNT
1343 * environment variable is set, then we issue callbacks sequentially.
1344 *
1345 * 2. In parallel: If the parallel argument is B_TRUE and the ZFS_SERIAL_MOUNT
1346 * environment variable is not set, then we use a tpool to dispatch threads
1347 * to mount filesystems in parallel. This function dispatches tasks to mount
1348 * the filesystems at the top-level mountpoints, and these tasks in turn
1349 * are responsible for recursively mounting filesystems in their children
1350 * mountpoints.
1351 */
1352 void
zfs_foreach_mountpoint(libzfs_handle_t * hdl,zfs_handle_t ** handles,size_t num_handles,zfs_iter_f func,void * data,boolean_t parallel)1353 zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles,
1354 size_t num_handles, zfs_iter_f func, void *data, boolean_t parallel)
1355 {
1356 zoneid_t zoneid = getzoneid();
1357
1358 /*
1359 * The ZFS_SERIAL_MOUNT environment variable is an undocumented
1360 * variable that can be used as a convenience to do a/b comparison
1361 * of serial vs. parallel mounting.
1362 */
1363 boolean_t serial_mount = !parallel ||
1364 (getenv("ZFS_SERIAL_MOUNT") != NULL);
1365
1366 /*
1367 * Sort the datasets by mountpoint. See mountpoint_cmp for details
1368 * of how these are sorted.
1369 */
1370 qsort(handles, num_handles, sizeof (zfs_handle_t *), mountpoint_cmp);
1371
1372 if (serial_mount) {
1373 for (int i = 0; i < num_handles; i++) {
1374 func(handles[i], data);
1375 }
1376 return;
1377 }
1378
1379 /*
1380 * Issue the callback function for each dataset using a parallel
1381 * algorithm that uses a thread pool to manage threads.
1382 */
1383 tpool_t *tp = tpool_create(1, mount_tp_nthr, 0, NULL);
1384
1385 /*
1386 * There may be multiple "top level" mountpoints outside of the pool's
1387 * root mountpoint, e.g.: /foo /bar. Dispatch a mount task for each of
1388 * these.
1389 */
1390 for (int i = 0; i < num_handles;
1391 i = non_descendant_idx(handles, num_handles, i)) {
1392 /*
1393 * Since the mountpoints have been sorted so that the zoned
1394 * filesystems are at the end, a zoned filesystem seen from
1395 * the global zone means that we're done.
1396 */
1397 if (zoneid == GLOBAL_ZONEID &&
1398 zfs_prop_get_int(handles[i], ZFS_PROP_ZONED))
1399 break;
1400 zfs_dispatch_mount(hdl, handles, num_handles, i, func, data,
1401 tp);
1402 }
1403
1404 tpool_wait(tp); /* wait for all scheduled mounts to complete */
1405 tpool_destroy(tp);
1406 }
1407
1408 /*
1409 * Mount and share all datasets within the given pool. This assumes that no
1410 * datasets within the pool are currently mounted.
1411 */
1412 #pragma weak zpool_mount_datasets = zpool_enable_datasets
1413 int
zpool_enable_datasets(zpool_handle_t * zhp,const char * mntopts,int flags)1414 zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
1415 {
1416 get_all_cb_t cb = { 0 };
1417 mount_state_t ms = { 0 };
1418 zfs_handle_t *zfsp;
1419 int ret = 0;
1420
1421 if ((zfsp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1422 ZFS_TYPE_DATASET)) == NULL)
1423 goto out;
1424
1425 /*
1426 * Gather all non-snapshot datasets within the pool. Start by adding
1427 * the root filesystem for this pool to the list, and then iterate
1428 * over all child filesystems.
1429 */
1430 libzfs_add_handle(&cb, zfsp);
1431 if (zfs_iter_filesystems(zfsp, zfs_iter_cb, &cb) != 0)
1432 goto out;
1433
1434 /*
1435 * Mount all filesystems
1436 */
1437 ms.ms_mntopts = mntopts;
1438 ms.ms_mntflags = flags;
1439 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
1440 zfs_mount_one, &ms, B_TRUE);
1441 if (ms.ms_mntstatus != 0)
1442 ret = ms.ms_mntstatus;
1443
1444 /*
1445 * Share all filesystems that need to be shared. This needs to be
1446 * a separate pass because libshare is not mt-safe, and so we need
1447 * to share serially.
1448 */
1449 ms.ms_mntstatus = 0;
1450 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
1451 zfs_share_one, &ms, B_FALSE);
1452 if (ms.ms_mntstatus != 0)
1453 ret = ms.ms_mntstatus;
1454 else
1455 zfs_commit_all_shares();
1456
1457 out:
1458 for (int i = 0; i < cb.cb_used; i++)
1459 zfs_close(cb.cb_handles[i]);
1460 free(cb.cb_handles);
1461
1462 return (ret);
1463 }
1464
1465 static int
mountpoint_compare(const void * a,const void * b)1466 mountpoint_compare(const void *a, const void *b)
1467 {
1468 const char *mounta = *((char **)a);
1469 const char *mountb = *((char **)b);
1470
1471 return (strcmp(mountb, mounta));
1472 }
1473
1474 /* alias for 2002/240 */
1475 #pragma weak zpool_unmount_datasets = zpool_disable_datasets
1476 /*
1477 * Unshare and unmount all datasets within the given pool. We don't want to
1478 * rely on traversing the DSL to discover the filesystems within the pool,
1479 * because this may be expensive (if not all of them are mounted), and can fail
1480 * arbitrarily (on I/O error, for example). Instead, we walk /proc/self/mounts
1481 * and gather all the filesystems that are currently mounted.
1482 */
1483 int
zpool_disable_datasets(zpool_handle_t * zhp,boolean_t force)1484 zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
1485 {
1486 int used, alloc;
1487 struct mnttab entry;
1488 size_t namelen;
1489 char **mountpoints = NULL;
1490 zfs_handle_t **datasets = NULL;
1491 libzfs_handle_t *hdl = zhp->zpool_hdl;
1492 int i;
1493 int ret = -1;
1494 int flags = (force ? MS_FORCE : 0);
1495
1496 namelen = strlen(zhp->zpool_name);
1497
1498 /* Reopen MNTTAB to prevent reading stale data from open file */
1499 if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL)
1500 return (ENOENT);
1501
1502 used = alloc = 0;
1503 while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
1504 /*
1505 * Ignore non-ZFS entries.
1506 */
1507 if (entry.mnt_fstype == NULL ||
1508 strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
1509 continue;
1510
1511 /*
1512 * Ignore filesystems not within this pool.
1513 */
1514 if (entry.mnt_mountp == NULL ||
1515 strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
1516 (entry.mnt_special[namelen] != '/' &&
1517 entry.mnt_special[namelen] != '\0'))
1518 continue;
1519
1520 /*
1521 * At this point we've found a filesystem within our pool. Add
1522 * it to our growing list.
1523 */
1524 if (used == alloc) {
1525 if (alloc == 0) {
1526 if ((mountpoints = zfs_alloc(hdl,
1527 8 * sizeof (void *))) == NULL)
1528 goto out;
1529
1530 if ((datasets = zfs_alloc(hdl,
1531 8 * sizeof (void *))) == NULL)
1532 goto out;
1533
1534 alloc = 8;
1535 } else {
1536 void *ptr;
1537
1538 if ((ptr = zfs_realloc(hdl, mountpoints,
1539 alloc * sizeof (void *),
1540 alloc * 2 * sizeof (void *))) == NULL)
1541 goto out;
1542 mountpoints = ptr;
1543
1544 if ((ptr = zfs_realloc(hdl, datasets,
1545 alloc * sizeof (void *),
1546 alloc * 2 * sizeof (void *))) == NULL)
1547 goto out;
1548 datasets = ptr;
1549
1550 alloc *= 2;
1551 }
1552 }
1553
1554 if ((mountpoints[used] = zfs_strdup(hdl,
1555 entry.mnt_mountp)) == NULL)
1556 goto out;
1557
1558 /*
1559 * This is allowed to fail, in case there is some I/O error. It
1560 * is only used to determine if we need to remove the underlying
1561 * mountpoint, so failure is not fatal.
1562 */
1563 datasets[used] = make_dataset_handle(hdl, entry.mnt_special);
1564
1565 used++;
1566 }
1567
1568 /*
1569 * At this point, we have the entire list of filesystems, so sort it by
1570 * mountpoint.
1571 */
1572 qsort(mountpoints, used, sizeof (char *), mountpoint_compare);
1573
1574 /*
1575 * Walk through and first unshare everything.
1576 */
1577 for (i = 0; i < used; i++) {
1578 zfs_share_proto_t *curr_proto;
1579 for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
1580 curr_proto++) {
1581 if (is_shared(mountpoints[i], *curr_proto) &&
1582 unshare_one(hdl, mountpoints[i],
1583 mountpoints[i], *curr_proto) != 0)
1584 goto out;
1585 }
1586 }
1587 zfs_commit_all_shares();
1588
1589 /*
1590 * Now unmount everything, removing the underlying directories as
1591 * appropriate.
1592 */
1593 for (i = 0; i < used; i++) {
1594 if (unmount_one(hdl, mountpoints[i], flags) != 0)
1595 goto out;
1596 }
1597
1598 for (i = 0; i < used; i++) {
1599 if (datasets[i])
1600 remove_mountpoint(datasets[i]);
1601 }
1602
1603 ret = 0;
1604 out:
1605 for (i = 0; i < used; i++) {
1606 if (datasets[i])
1607 zfs_close(datasets[i]);
1608 free(mountpoints[i]);
1609 }
1610 free(datasets);
1611 free(mountpoints);
1612
1613 return (ret);
1614 }
1615