1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /*-
25 * Portions Copyright (c) 1992, 1993
26 * The Regents of the University of California. All rights reserved.
27 *
28 * This code is derived from software donated to Berkeley by
29 * Jan-Simon Pendry.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)null_subr.c 8.7 (Berkeley) 5/14/95
56 *
57 * $FreeBSD$
58 */
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/lock.h>
63 #include <sys/malloc.h>
64 #include <sys/mount.h>
65 #include <sys/proc.h>
66 #include <sys/vnode.h>
67
68 #include "bindfs.h"
69
70 /*
71 * Null layer cache:
72 * Each cache entry holds a reference to the lower vnode
73 * along with a pointer to the alias vnode. When an
74 * entry is added the lower vnode is VREF'd. When the
75 * alias is removed the lower vnode is vrele'd.
76 */
77
78 #define BIND_HASH_SIZE (desiredvnodes / 10)
79
80 /* xnu doesn't really have the functionality freebsd uses here..gonna try this
81 * hacked hash...*/
82 #define BIND_NHASH(vp) (&bind_node_hashtbl[((((uintptr_t)vp) >> vnsz2log) + (uintptr_t)vnode_mount(vp)) & bind_hash_mask])
83
84 static LIST_HEAD(bind_node_hashhead, bind_node) * bind_node_hashtbl;
85 static LCK_GRP_DECLARE(bind_hashlck_grp, "com.apple.filesystems.bindfs");
86 static LCK_MTX_DECLARE(bind_hashmtx, &bind_hashlck_grp);
87 static u_long bind_hash_mask;
88
89 /* xnu doesn't have hashes built into vnodes. This mimics what freebsd does
90 * 9 is an eyeball of the log 2 size of vnode */
91 static int vnsz2log = 9;
92
93 static int bind_hashins(struct mount *, struct bind_node *, struct vnode **);
94
95 /*
96 * Initialise cache headers
97 */
98 int
bindfs_init(__unused struct vfsconf * vfsp)99 bindfs_init(__unused struct vfsconf * vfsp)
100 {
101 BINDFSDEBUG("%s\n", __FUNCTION__);
102
103 bind_node_hashtbl = hashinit(BIND_HASH_SIZE, M_TEMP, &bind_hash_mask);
104 if (bind_node_hashtbl == NULL) {
105 goto error;
106 }
107
108 BINDFSDEBUG("%s finished\n", __FUNCTION__);
109 return 0;
110 error:
111 printf("BINDFS: failed to initialize globals\n");
112 return KERN_FAILURE;
113 }
114
115 int
bindfs_destroy(void)116 bindfs_destroy(void)
117 {
118 /* This gets called when the fs is uninstalled, there wasn't an exact
119 * equivalent in vfsops */
120 hashdestroy(bind_node_hashtbl, M_TEMP, bind_hash_mask);
121 return 0;
122 }
123
124 /*
125 * Find the bindfs vnode mapped to lowervp. Return it in *vpp with an iocount if found.
126 * Return 0 on success. On failure *vpp will be NULL and a non-zero error code will be returned.
127 */
128 int
bind_hashget(struct mount * mp,struct vnode * lowervp,struct vnode ** vpp)129 bind_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp)
130 {
131 struct bind_node_hashhead * hd;
132 struct bind_node * a;
133 struct vnode * vp = NULL;
134 uint32_t vp_vid = 0;
135 int error = ENOENT;
136
137 /*
138 * Find hash base, and then search the (two-way) linked
139 * list looking for a bind_node structure which is referencing
140 * the lower vnode. We only give up our reference at reclaim so
141 * just check whether the lowervp has gotten pulled from under us
142 */
143 hd = BIND_NHASH(lowervp);
144 lck_mtx_lock(&bind_hashmtx);
145 LIST_FOREACH(a, hd, bind_hash)
146 {
147 if (a->bind_lowervp == lowervp && vnode_mount(BINDTOV(a)) == mp) {
148 vp = BINDTOV(a);
149 if (a->bind_lowervid != vnode_vid(lowervp)) {
150 /*lowervp has reved */
151 error = EIO;
152 vp = NULL;
153 } else {
154 vp_vid = a->bind_myvid;
155 }
156 break;
157 }
158 }
159 if (vp) {
160 vnode_hold(vp);
161 }
162 lck_mtx_unlock(&bind_hashmtx);
163
164 if (vp != NULL) {
165 error = vnode_getwithvid(vp, vp_vid);
166 if (error == 0) {
167 *vpp = vp;
168 }
169 vnode_drop(vp);
170 }
171 return error;
172 }
173
174 /*
175 * Act like bind_hashget, but add passed bind_node to hash if no existing
176 * node found.
177 * If we find a vnode in the hash table it is returned via vpp. If we don't
178 * find a hit in the table, then vpp is NULL on return and xp is added to the table.
179 * 0 is returned if a hash table hit occurs or if we insert the bind_node.
180 * EIO is returned if we found a hash table hit but the lower vnode was recycled.
181 */
182 static int
bind_hashins(struct mount * mp,struct bind_node * xp,struct vnode ** vpp)183 bind_hashins(struct mount * mp, struct bind_node * xp, struct vnode ** vpp)
184 {
185 struct bind_node_hashhead * hd;
186 struct bind_node * oxp;
187 struct vnode * ovp = NULL;
188 uint32_t oxp_vid = 0;
189 int error = 0;
190
191 hd = BIND_NHASH(xp->bind_lowervp);
192 lck_mtx_lock(&bind_hashmtx);
193 LIST_FOREACH(oxp, hd, bind_hash)
194 {
195 if (oxp->bind_lowervp == xp->bind_lowervp && vnode_mount(BINDTOV(oxp)) == mp) {
196 ovp = BINDTOV(oxp);
197 if (oxp->bind_lowervid != vnode_vid(oxp->bind_lowervp)) {
198 /* vp doesn't exist so return null (not sure we are actually gonna catch
199 * recycle right now
200 * This is an exceptional case right now, it suggests the vnode we are
201 * trying to add has been recycled
202 * don't add it.*/
203 error = EIO;
204 ovp = NULL;
205 } else {
206 oxp_vid = oxp->bind_myvid;
207 }
208 goto end;
209 }
210 }
211 /* if it wasn't in the hash map then the vnode pointed to by xp already has a
212 * iocount so don't get another. */
213 LIST_INSERT_HEAD(hd, xp, bind_hash);
214 xp->bind_flags |= BIND_FLAG_HASHED;
215 end:
216 if (ovp) {
217 vnode_hold(ovp);
218 }
219 lck_mtx_unlock(&bind_hashmtx);
220 if (ovp != NULL) {
221 /* if we found something in the hash map then grab an iocount */
222 error = vnode_getwithvid(ovp, oxp_vid);
223 if (error == 0) {
224 *vpp = ovp;
225 }
226 vnode_drop(ovp);
227 }
228 return error;
229 }
230
231 /*
232 * Remove node from hash.
233 */
234 void
bind_hashrem(struct bind_node * xp)235 bind_hashrem(struct bind_node * xp)
236 {
237 if (xp->bind_flags & BIND_FLAG_HASHED) {
238 lck_mtx_lock(&bind_hashmtx);
239 LIST_REMOVE(xp, bind_hash);
240 lck_mtx_unlock(&bind_hashmtx);
241 }
242 }
243
244 static struct bind_node *
bind_nodecreate(struct vnode * lowervp)245 bind_nodecreate(struct vnode * lowervp)
246 {
247 struct bind_node * xp;
248
249 xp = kalloc_type(struct bind_node, Z_WAITOK | Z_ZERO | Z_NOFAIL);
250 if (lowervp) {
251 xp->bind_lowervp = lowervp;
252 xp->bind_lowervid = vnode_vid(lowervp);
253 }
254 return xp;
255 }
256
257 /* assumption is that vnode has iocount on it after vnode create */
258 int
bind_getnewvnode(struct mount * mp,struct vnode * lowervp,struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,int root)259 bind_getnewvnode(
260 struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root)
261 {
262 struct vnode_fsparam vnfs_param;
263 int error = 0;
264 enum vtype type = VDIR;
265 struct bind_node * xp = bind_nodecreate(lowervp);
266
267 if (xp == NULL) {
268 return ENOMEM;
269 }
270
271 if (lowervp) {
272 type = vnode_vtype(lowervp);
273 }
274
275 vnfs_param.vnfs_mp = mp;
276 vnfs_param.vnfs_vtype = type;
277 vnfs_param.vnfs_str = "bindfs";
278 vnfs_param.vnfs_dvp = dvp;
279 vnfs_param.vnfs_fsnode = (void *)xp;
280 vnfs_param.vnfs_vops = bindfs_vnodeop_p;
281 vnfs_param.vnfs_markroot = root;
282 vnfs_param.vnfs_marksystem = 0;
283 vnfs_param.vnfs_rdev = 0;
284 vnfs_param.vnfs_filesize = 0; // set this to 0 since we should only be shadowing non-regular files
285 vnfs_param.vnfs_cnp = cnp;
286 vnfs_param.vnfs_flags = VNFS_ADDFSREF;
287
288 error = vnode_create_ext(VNCREATE_FLAVOR, VCREATESIZE, &vnfs_param, vpp, VNODE_CREATE_DEFAULT);
289 if (error == 0) {
290 xp->bind_vnode = *vpp;
291 xp->bind_myvid = vnode_vid(*vpp);
292 vnode_settag(*vpp, VT_BINDFS);
293 } else {
294 kfree_type(struct bind_node, xp);
295 }
296 return error;
297 }
298
299 /*
300 * Make a new or get existing bindfs node.
301 * Vp is the alias vnode, lowervp is the lower vnode.
302 *
303 * lowervp is assumed to have an iocount on it from the caller
304 */
305 int
bind_nodeget(struct mount * mp,struct vnode * lowervp,struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,int root)306 bind_nodeget(
307 struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root)
308 {
309 struct vnode * vp;
310 int error;
311
312 /* Lookup the hash firstly. */
313 error = bind_hashget(mp, lowervp, vpp);
314 /* ENOENT means it wasn't found, EIO is a failure we should bail from, 0 is it
315 * was found */
316 if (error != ENOENT) {
317 /* bind_hashget checked the vid, so if we got something here its legit to
318 * the best of our knowledge*/
319 /* if we found something then there is an iocount on vpp,
320 * if we didn't find something then vpp shouldn't be used by the caller */
321 return error;
322 }
323
324 /*
325 * We do not serialize vnode creation, instead we will check for
326 * duplicates later, when adding new vnode to hash.
327 */
328 error = vnode_ref(lowervp); // take a ref on lowervp so we let the system know we care about it
329 if (error) {
330 // Failed to get a reference on the lower vp so bail. Lowervp may be gone already.
331 return error;
332 }
333
334 error = bind_getnewvnode(mp, lowervp, dvp, &vp, cnp, root);
335
336 if (error) {
337 vnode_rele(lowervp);
338 return error;
339 }
340
341 /*
342 * Atomically insert our new node into the hash or vget existing
343 * if someone else has beaten us to it.
344 */
345 error = bind_hashins(mp, VTOBIND(vp), vpp);
346 if (error || *vpp != NULL) {
347 /* recycle will call reclaim which will get rid of the internals */
348 vnode_recycle(vp);
349 vnode_put(vp);
350 /* if we found vpp, then bind_hashins put an iocount on it */
351 return error;
352 }
353
354 /* vp has an iocount from bind_getnewvnode */
355 *vpp = vp;
356
357 return 0;
358 }
359