1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Poul-Henning Kamp of the FreeBSD Project.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include "opt_ddb.h"
41 #include "opt_ktrace.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/counter.h>
46 #include <sys/filedesc.h>
47 #include <sys/fnv_hash.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/mount.h>
53 #include <sys/namei.h>
54 #include <sys/proc.h>
55 #include <sys/rwlock.h>
56 #include <sys/sdt.h>
57 #include <sys/smp.h>
58 #include <sys/syscallsubr.h>
59 #include <sys/sysctl.h>
60 #include <sys/sysproto.h>
61 #include <sys/vnode.h>
62 #ifdef KTRACE
63 #include <sys/ktrace.h>
64 #endif
65
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #endif
69
70 #include <vm/uma.h>
71
72 SDT_PROVIDER_DECLARE(vfs);
73 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
74 "struct vnode *");
75 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
76 "char *");
77 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
78 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
79 "char *", "struct vnode *");
80 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
81 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int",
82 "struct vnode *", "char *");
83 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
84 "struct vnode *");
85 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative,
86 "struct vnode *", "char *");
87 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
88 "char *");
89 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
90 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
91 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
92 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
93 "struct vnode *");
94 SDT_PROBE_DEFINE3(vfs, namecache, zap_negative, done, "struct vnode *",
95 "char *", "int");
96 SDT_PROBE_DEFINE3(vfs, namecache, shrink_negative, done, "struct vnode *",
97 "char *", "int");
98
99 /*
100 * This structure describes the elements in the cache of recent
101 * names looked up by namei.
102 */
103
104 struct namecache {
105 LIST_ENTRY(namecache) nc_hash; /* hash chain */
106 LIST_ENTRY(namecache) nc_src; /* source vnode list */
107 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
108 struct vnode *nc_dvp; /* vnode of parent of name */
109 union {
110 struct vnode *nu_vp; /* vnode the name refers to */
111 u_int nu_neghits; /* negative entry hits */
112 } n_un;
113 u_char nc_flag; /* flag bits */
114 u_char nc_nlen; /* length of name */
115 char nc_name[0]; /* segment name + nul */
116 };
117
118 /*
119 * struct namecache_ts repeats struct namecache layout up to the
120 * nc_nlen member.
121 * struct namecache_ts is used in place of struct namecache when time(s) need
122 * to be stored. The nc_dotdottime field is used when a cache entry is mapping
123 * both a non-dotdot directory name plus dotdot for the directory's
124 * parent.
125 */
126 struct namecache_ts {
127 struct timespec nc_time; /* timespec provided by fs */
128 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */
129 int nc_ticks; /* ticks value when entry was added */
130 struct namecache nc_nc;
131 };
132
133 #define nc_vp n_un.nu_vp
134 #define nc_neghits n_un.nu_neghits
135
136 /*
137 * Flags in namecache.nc_flag
138 */
139 #define NCF_WHITE 0x01
140 #define NCF_ISDOTDOT 0x02
141 #define NCF_TS 0x04
142 #define NCF_DTS 0x08
143 #define NCF_DVDROP 0x10
144 #define NCF_NEGATIVE 0x20
145 #define NCF_HOTNEGATIVE 0x40
146
147 /*
148 * Name caching works as follows:
149 *
150 * Names found by directory scans are retained in a cache
151 * for future reference. It is managed LRU, so frequently
152 * used names will hang around. Cache is indexed by hash value
153 * obtained from (dvp, name) where dvp refers to the directory
154 * containing name.
155 *
156 * If it is a "negative" entry, (i.e. for a name that is known NOT to
157 * exist) the vnode pointer will be NULL.
158 *
159 * Upon reaching the last segment of a path, if the reference
160 * is for DELETE, or NOCACHE is set (rewrite), and the
161 * name is located in the cache, it will be dropped.
162 *
163 * These locks are used (in the order in which they can be taken):
164 * NAME TYPE ROLE
165 * vnodelock mtx vnode lists and v_cache_dd field protection
166 * bucketlock rwlock for access to given set of hash buckets
167 * neglist mtx negative entry LRU management
168 *
169 * Additionally, ncneg_shrink_lock mtx is used to have at most one thread
170 * shrinking the LRU list.
171 *
172 * It is legal to take multiple vnodelock and bucketlock locks. The locking
173 * order is lower address first. Both are recursive.
174 *
175 * "." lookups are lockless.
176 *
177 * ".." and vnode -> name lookups require vnodelock.
178 *
179 * name -> vnode lookup requires the relevant bucketlock to be held for reading.
180 *
181 * Insertions and removals of entries require involved vnodes and bucketlocks
182 * to be write-locked to prevent other threads from seeing the entry.
183 *
184 * Some lookups result in removal of the found entry (e.g. getting rid of a
185 * negative entry with the intent to create a positive one), which poses a
186 * problem when multiple threads reach the state. Similarly, two different
187 * threads can purge two different vnodes and try to remove the same name.
188 *
189 * If the already held vnode lock is lower than the second required lock, we
190 * can just take the other lock. However, in the opposite case, this could
191 * deadlock. As such, this is resolved by trylocking and if that fails unlocking
192 * the first node, locking everything in order and revalidating the state.
193 */
194
195 /*
196 * Structures associated with name caching.
197 */
198 #define NCHHASH(hash) \
199 (&nchashtbl[(hash) & nchash])
200 static __read_mostly LIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */
201 static u_long __read_mostly nchash; /* size of hash table */
202 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
203 "Size of namecache hash table");
204 static u_long __read_mostly ncnegfactor = 12; /* ratio of negative entries */
205 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
206 "Ratio of negative namecache entries");
207 static u_long __exclusive_cache_line numneg; /* number of negative entries allocated */
208 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
209 "Number of negative entries in namecache");
210 static u_long __exclusive_cache_line numcache;/* number of cache entries allocated */
211 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
212 "Number of namecache entries");
213 static u_long __exclusive_cache_line numcachehv;/* number of cache entries with vnodes held */
214 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0,
215 "Number of namecache entries with vnodes held");
216 u_int __read_mostly ncsizefactor = 2;
217 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
218 "Size factor for namecache");
219 static u_int __read_mostly ncpurgeminvnodes;
220 SYSCTL_UINT(_vfs, OID_AUTO, ncpurgeminvnodes, CTLFLAG_RW, &ncpurgeminvnodes, 0,
221 "Number of vnodes below which purgevfs ignores the request");
222 static u_int __read_mostly ncneghitsrequeue = 8;
223 SYSCTL_UINT(_vfs, OID_AUTO, ncneghitsrequeue, CTLFLAG_RW, &ncneghitsrequeue, 0,
224 "Number of hits to requeue a negative entry in the LRU list");
225
226 struct nchstats nchstats; /* cache effectiveness statistics */
227
228 static struct mtx ncneg_shrink_lock;
229 static int shrink_list_turn;
230
231 struct neglist {
232 struct mtx nl_lock;
233 TAILQ_HEAD(, namecache) nl_list;
234 } __aligned(CACHE_LINE_SIZE);
235
236 static struct neglist __read_mostly *neglists;
237 static struct neglist ncneg_hot;
238
239 #define numneglists (ncneghash + 1)
240 static u_int __read_mostly ncneghash;
241 static inline struct neglist *
NCP2NEGLIST(struct namecache * ncp)242 NCP2NEGLIST(struct namecache *ncp)
243 {
244
245 return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]);
246 }
247
248 #define numbucketlocks (ncbuckethash + 1)
249 static u_int __read_mostly ncbuckethash;
250 static struct rwlock_padalign __read_mostly *bucketlocks;
251 #define HASH2BUCKETLOCK(hash) \
252 ((struct rwlock *)(&bucketlocks[((hash) & ncbuckethash)]))
253
254 #define numvnodelocks (ncvnodehash + 1)
255 static u_int __read_mostly ncvnodehash;
256 static struct mtx __read_mostly *vnodelocks;
257 static inline struct mtx *
VP2VNODELOCK(struct vnode * vp)258 VP2VNODELOCK(struct vnode *vp)
259 {
260
261 return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]);
262 }
263
264 /*
265 * UMA zones for the VFS cache.
266 *
267 * The small cache is used for entries with short names, which are the
268 * most common. The large cache is used for entries which are too big to
269 * fit in the small cache.
270 */
271 static uma_zone_t __read_mostly cache_zone_small;
272 static uma_zone_t __read_mostly cache_zone_small_ts;
273 static uma_zone_t __read_mostly cache_zone_large;
274 static uma_zone_t __read_mostly cache_zone_large_ts;
275
276 #define CACHE_PATH_CUTOFF 35
277
278 static struct namecache *
cache_alloc(int len,int ts)279 cache_alloc(int len, int ts)
280 {
281 struct namecache_ts *ncp_ts;
282 struct namecache *ncp;
283
284 if (__predict_false(ts)) {
285 if (len <= CACHE_PATH_CUTOFF)
286 ncp_ts = uma_zalloc(cache_zone_small_ts, M_WAITOK);
287 else
288 ncp_ts = uma_zalloc(cache_zone_large_ts, M_WAITOK);
289 ncp = &ncp_ts->nc_nc;
290 } else {
291 if (len <= CACHE_PATH_CUTOFF)
292 ncp = uma_zalloc(cache_zone_small, M_WAITOK);
293 else
294 ncp = uma_zalloc(cache_zone_large, M_WAITOK);
295 }
296 return (ncp);
297 }
298
299 static void
cache_free(struct namecache * ncp)300 cache_free(struct namecache *ncp)
301 {
302 struct namecache_ts *ncp_ts;
303
304 if (ncp == NULL)
305 return;
306 if ((ncp->nc_flag & NCF_DVDROP) != 0)
307 vdrop(ncp->nc_dvp);
308 if (__predict_false(ncp->nc_flag & NCF_TS)) {
309 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
310 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF)
311 uma_zfree(cache_zone_small_ts, ncp_ts);
312 else
313 uma_zfree(cache_zone_large_ts, ncp_ts);
314 } else {
315 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF)
316 uma_zfree(cache_zone_small, ncp);
317 else
318 uma_zfree(cache_zone_large, ncp);
319 }
320 }
321
322 static void
cache_out_ts(struct namecache * ncp,struct timespec * tsp,int * ticksp)323 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp)
324 {
325 struct namecache_ts *ncp_ts;
326
327 KASSERT((ncp->nc_flag & NCF_TS) != 0 ||
328 (tsp == NULL && ticksp == NULL),
329 ("No NCF_TS"));
330
331 if (tsp == NULL && ticksp == NULL)
332 return;
333
334 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
335 if (tsp != NULL)
336 *tsp = ncp_ts->nc_time;
337 if (ticksp != NULL)
338 *ticksp = ncp_ts->nc_ticks;
339 }
340
341 static int __read_mostly doingcache = 1; /* 1 => enable the cache */
342 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
343 "VFS namecache enabled");
344
345 /* Export size information to userland */
346 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR,
347 sizeof(struct namecache), "sizeof(struct namecache)");
348
349 /*
350 * The new name cache statistics
351 */
352 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0,
353 "Name cache statistics");
354 #define STATNODE_ULONG(name, descr) \
355 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr);
356 #define STATNODE_COUNTER(name, descr) \
357 static counter_u64_t __read_mostly name; \
358 SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, descr);
359 STATNODE_ULONG(numneg, "Number of negative cache entries");
360 STATNODE_ULONG(numcache, "Number of cache entries");
361 STATNODE_COUNTER(numcalls, "Number of cache lookups");
362 STATNODE_COUNTER(dothits, "Number of '.' hits");
363 STATNODE_COUNTER(dotdothits, "Number of '..' hits");
364 STATNODE_COUNTER(numchecks, "Number of checks in lookup");
365 STATNODE_COUNTER(nummiss, "Number of cache misses");
366 STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache");
367 STATNODE_COUNTER(numposzaps,
368 "Number of cache hits (positive) we do not want to cache");
369 STATNODE_COUNTER(numposhits, "Number of cache hits (positive)");
370 STATNODE_COUNTER(numnegzaps,
371 "Number of cache hits (negative) we do not want to cache");
372 STATNODE_COUNTER(numneghits, "Number of cache hits (negative)");
373 /* These count for kern___getcwd(), too. */
374 STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls");
375 STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
376 STATNODE_COUNTER(numfullpathfail2,
377 "Number of fullpath search errors (VOP_VPTOCNP failures)");
378 STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
379 STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls");
380 static long zap_and_exit_bucket_fail; STATNODE_ULONG(zap_and_exit_bucket_fail,
381 "Number of times zap_and_exit failed to lock");
382 static long cache_lock_vnodes_cel_3_failures;
383 STATNODE_ULONG(cache_lock_vnodes_cel_3_failures,
384 "Number of times 3-way vnode locking failed");
385
386 static void cache_zap_locked(struct namecache *ncp, bool neg_locked);
387 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
388 char *buf, char **retbuf, u_int buflen);
389
390 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
391
392 static int cache_yield;
393 SYSCTL_INT(_vfs_cache, OID_AUTO, yield, CTLFLAG_RD, &cache_yield, 0,
394 "Number of times cache called yield");
395
396 static void
cache_maybe_yield(void)397 cache_maybe_yield(void)
398 {
399
400 if (should_yield()) {
401 cache_yield++;
402 kern_yield(PRI_USER);
403 }
404 }
405
406 static inline void
cache_assert_vlp_locked(struct mtx * vlp)407 cache_assert_vlp_locked(struct mtx *vlp)
408 {
409
410 if (vlp != NULL)
411 mtx_assert(vlp, MA_OWNED);
412 }
413
414 static inline void
cache_assert_vnode_locked(struct vnode * vp)415 cache_assert_vnode_locked(struct vnode *vp)
416 {
417 struct mtx *vlp;
418
419 vlp = VP2VNODELOCK(vp);
420 cache_assert_vlp_locked(vlp);
421 }
422
423 static uint32_t
cache_get_hash(char * name,u_char len,struct vnode * dvp)424 cache_get_hash(char *name, u_char len, struct vnode *dvp)
425 {
426 uint32_t hash;
427
428 hash = fnv_32_buf(name, len, FNV1_32_INIT);
429 hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
430 return (hash);
431 }
432
433 static inline struct rwlock *
NCP2BUCKETLOCK(struct namecache * ncp)434 NCP2BUCKETLOCK(struct namecache *ncp)
435 {
436 uint32_t hash;
437
438 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp);
439 return (HASH2BUCKETLOCK(hash));
440 }
441
442 #ifdef INVARIANTS
443 static void
cache_assert_bucket_locked(struct namecache * ncp,int mode)444 cache_assert_bucket_locked(struct namecache *ncp, int mode)
445 {
446 struct rwlock *blp;
447
448 blp = NCP2BUCKETLOCK(ncp);
449 rw_assert(blp, mode);
450 }
451 #else
452 #define cache_assert_bucket_locked(x, y) do { } while (0)
453 #endif
454
455 #define cache_sort(x, y) _cache_sort((void **)(x), (void **)(y))
456 static void
_cache_sort(void ** p1,void ** p2)457 _cache_sort(void **p1, void **p2)
458 {
459 void *tmp;
460
461 if (*p1 > *p2) {
462 tmp = *p2;
463 *p2 = *p1;
464 *p1 = tmp;
465 }
466 }
467
468 static void
cache_lock_all_buckets(void)469 cache_lock_all_buckets(void)
470 {
471 u_int i;
472
473 for (i = 0; i < numbucketlocks; i++)
474 rw_wlock(&bucketlocks[i]);
475 }
476
477 static void
cache_unlock_all_buckets(void)478 cache_unlock_all_buckets(void)
479 {
480 u_int i;
481
482 for (i = 0; i < numbucketlocks; i++)
483 rw_wunlock(&bucketlocks[i]);
484 }
485
486 static void
cache_lock_all_vnodes(void)487 cache_lock_all_vnodes(void)
488 {
489 u_int i;
490
491 for (i = 0; i < numvnodelocks; i++)
492 mtx_lock(&vnodelocks[i]);
493 }
494
495 static void
cache_unlock_all_vnodes(void)496 cache_unlock_all_vnodes(void)
497 {
498 u_int i;
499
500 for (i = 0; i < numvnodelocks; i++)
501 mtx_unlock(&vnodelocks[i]);
502 }
503
504 static int
cache_trylock_vnodes(struct mtx * vlp1,struct mtx * vlp2)505 cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
506 {
507
508 cache_sort(&vlp1, &vlp2);
509 MPASS(vlp2 != NULL);
510
511 if (vlp1 != NULL) {
512 if (!mtx_trylock(vlp1))
513 return (EAGAIN);
514 }
515 if (!mtx_trylock(vlp2)) {
516 if (vlp1 != NULL)
517 mtx_unlock(vlp1);
518 return (EAGAIN);
519 }
520
521 return (0);
522 }
523
524 static void
cache_unlock_vnodes(struct mtx * vlp1,struct mtx * vlp2)525 cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
526 {
527
528 MPASS(vlp1 != NULL || vlp2 != NULL);
529
530 if (vlp1 != NULL)
531 mtx_unlock(vlp1);
532 if (vlp2 != NULL)
533 mtx_unlock(vlp2);
534 }
535
536 static int
sysctl_nchstats(SYSCTL_HANDLER_ARGS)537 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
538 {
539 struct nchstats snap;
540
541 if (req->oldptr == NULL)
542 return (SYSCTL_OUT(req, 0, sizeof(snap)));
543
544 snap = nchstats;
545 snap.ncs_goodhits = counter_u64_fetch(numposhits);
546 snap.ncs_neghits = counter_u64_fetch(numneghits);
547 snap.ncs_badhits = counter_u64_fetch(numposzaps) +
548 counter_u64_fetch(numnegzaps);
549 snap.ncs_miss = counter_u64_fetch(nummisszap) +
550 counter_u64_fetch(nummiss);
551
552 return (SYSCTL_OUT(req, &snap, sizeof(snap)));
553 }
554 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD |
555 CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU",
556 "VFS cache effectiveness statistics");
557
558 #ifdef DIAGNOSTIC
559 /*
560 * Grab an atomic snapshot of the name cache hash chain lengths
561 */
562 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL,
563 "hash table stats");
564
565 static int
sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)566 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
567 {
568 struct nchashhead *ncpp;
569 struct namecache *ncp;
570 int i, error, n_nchash, *cntbuf;
571
572 retry:
573 n_nchash = nchash + 1; /* nchash is max index, not count */
574 if (req->oldptr == NULL)
575 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
576 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK);
577 cache_lock_all_buckets();
578 if (n_nchash != nchash + 1) {
579 cache_unlock_all_buckets();
580 free(cntbuf, M_TEMP);
581 goto retry;
582 }
583 /* Scan hash tables counting entries */
584 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++)
585 LIST_FOREACH(ncp, ncpp, nc_hash)
586 cntbuf[i]++;
587 cache_unlock_all_buckets();
588 for (error = 0, i = 0; i < n_nchash; i++)
589 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0)
590 break;
591 free(cntbuf, M_TEMP);
592 return (error);
593 }
594 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
595 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
596 "nchash chain lengths");
597
598 static int
sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)599 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
600 {
601 int error;
602 struct nchashhead *ncpp;
603 struct namecache *ncp;
604 int n_nchash;
605 int count, maxlength, used, pct;
606
607 if (!req->oldptr)
608 return SYSCTL_OUT(req, 0, 4 * sizeof(int));
609
610 cache_lock_all_buckets();
611 n_nchash = nchash + 1; /* nchash is max index, not count */
612 used = 0;
613 maxlength = 0;
614
615 /* Scan hash tables for applicable entries */
616 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
617 count = 0;
618 LIST_FOREACH(ncp, ncpp, nc_hash) {
619 count++;
620 }
621 if (count)
622 used++;
623 if (maxlength < count)
624 maxlength = count;
625 }
626 n_nchash = nchash + 1;
627 cache_unlock_all_buckets();
628 pct = (used * 100) / (n_nchash / 100);
629 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
630 if (error)
631 return (error);
632 error = SYSCTL_OUT(req, &used, sizeof(used));
633 if (error)
634 return (error);
635 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
636 if (error)
637 return (error);
638 error = SYSCTL_OUT(req, &pct, sizeof(pct));
639 if (error)
640 return (error);
641 return (0);
642 }
643 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
644 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
645 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)");
646 #endif
647
648 /*
649 * Negative entries management
650 *
651 * A variation of LRU scheme is used. New entries are hashed into one of
652 * numneglists cold lists. Entries get promoted to the hot list on first hit.
653 * Partial LRU for the hot list is maintained by requeueing them every
654 * ncneghitsrequeue hits.
655 *
656 * The shrinker will demote hot list head and evict from the cold list in a
657 * round-robin manner.
658 */
659 static void
cache_negative_hit(struct namecache * ncp)660 cache_negative_hit(struct namecache *ncp)
661 {
662 struct neglist *neglist;
663 u_int hits;
664
665 MPASS(ncp->nc_flag & NCF_NEGATIVE);
666 hits = atomic_fetchadd_int(&ncp->nc_neghits, 1);
667 if (ncp->nc_flag & NCF_HOTNEGATIVE) {
668 if ((hits % ncneghitsrequeue) != 0)
669 return;
670 mtx_lock(&ncneg_hot.nl_lock);
671 if (ncp->nc_flag & NCF_HOTNEGATIVE) {
672 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
673 TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
674 mtx_unlock(&ncneg_hot.nl_lock);
675 return;
676 }
677 /*
678 * The shrinker cleared the flag and removed the entry from
679 * the hot list. Put it back.
680 */
681 } else {
682 mtx_lock(&ncneg_hot.nl_lock);
683 }
684 neglist = NCP2NEGLIST(ncp);
685 mtx_lock(&neglist->nl_lock);
686 if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) {
687 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
688 TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
689 ncp->nc_flag |= NCF_HOTNEGATIVE;
690 }
691 mtx_unlock(&neglist->nl_lock);
692 mtx_unlock(&ncneg_hot.nl_lock);
693 }
694
695 static void
cache_negative_insert(struct namecache * ncp,bool neg_locked)696 cache_negative_insert(struct namecache *ncp, bool neg_locked)
697 {
698 struct neglist *neglist;
699
700 MPASS(ncp->nc_flag & NCF_NEGATIVE);
701 cache_assert_bucket_locked(ncp, RA_WLOCKED);
702 neglist = NCP2NEGLIST(ncp);
703 if (!neg_locked) {
704 mtx_lock(&neglist->nl_lock);
705 } else {
706 mtx_assert(&neglist->nl_lock, MA_OWNED);
707 }
708 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
709 if (!neg_locked)
710 mtx_unlock(&neglist->nl_lock);
711 atomic_add_rel_long(&numneg, 1);
712 }
713
714 static void
cache_negative_remove(struct namecache * ncp,bool neg_locked)715 cache_negative_remove(struct namecache *ncp, bool neg_locked)
716 {
717 struct neglist *neglist;
718 bool hot_locked = false;
719 bool list_locked = false;
720
721 MPASS(ncp->nc_flag & NCF_NEGATIVE);
722 cache_assert_bucket_locked(ncp, RA_WLOCKED);
723 neglist = NCP2NEGLIST(ncp);
724 if (!neg_locked) {
725 if (ncp->nc_flag & NCF_HOTNEGATIVE) {
726 hot_locked = true;
727 mtx_lock(&ncneg_hot.nl_lock);
728 if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) {
729 list_locked = true;
730 mtx_lock(&neglist->nl_lock);
731 }
732 } else {
733 list_locked = true;
734 mtx_lock(&neglist->nl_lock);
735 }
736 }
737 if (ncp->nc_flag & NCF_HOTNEGATIVE) {
738 mtx_assert(&ncneg_hot.nl_lock, MA_OWNED);
739 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
740 } else {
741 mtx_assert(&neglist->nl_lock, MA_OWNED);
742 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
743 }
744 if (list_locked)
745 mtx_unlock(&neglist->nl_lock);
746 if (hot_locked)
747 mtx_unlock(&ncneg_hot.nl_lock);
748 atomic_subtract_rel_long(&numneg, 1);
749 }
750
751 static void
cache_negative_shrink_select(int start,struct namecache ** ncpp,struct neglist ** neglistpp)752 cache_negative_shrink_select(int start, struct namecache **ncpp,
753 struct neglist **neglistpp)
754 {
755 struct neglist *neglist;
756 struct namecache *ncp;
757 int i;
758
759 *ncpp = ncp = NULL;
760 neglist = NULL;
761
762 for (i = start; i < numneglists; i++) {
763 neglist = &neglists[i];
764 if (TAILQ_FIRST(&neglist->nl_list) == NULL)
765 continue;
766 mtx_lock(&neglist->nl_lock);
767 ncp = TAILQ_FIRST(&neglist->nl_list);
768 if (ncp != NULL)
769 break;
770 mtx_unlock(&neglist->nl_lock);
771 }
772
773 *neglistpp = neglist;
774 *ncpp = ncp;
775 }
776
777 static void
cache_negative_zap_one(void)778 cache_negative_zap_one(void)
779 {
780 struct namecache *ncp, *ncp2;
781 struct neglist *neglist;
782 struct mtx *dvlp;
783 struct rwlock *blp;
784
785 if (!mtx_trylock(&ncneg_shrink_lock))
786 return;
787
788 mtx_lock(&ncneg_hot.nl_lock);
789 ncp = TAILQ_FIRST(&ncneg_hot.nl_list);
790 if (ncp != NULL) {
791 neglist = NCP2NEGLIST(ncp);
792 mtx_lock(&neglist->nl_lock);
793 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
794 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
795 ncp->nc_flag &= ~NCF_HOTNEGATIVE;
796 mtx_unlock(&neglist->nl_lock);
797 }
798
799 cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist);
800 shrink_list_turn++;
801 if (shrink_list_turn == numneglists)
802 shrink_list_turn = 0;
803 if (ncp == NULL && shrink_list_turn == 0)
804 cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist);
805 if (ncp == NULL) {
806 mtx_unlock(&ncneg_hot.nl_lock);
807 goto out;
808 }
809
810 MPASS(ncp->nc_flag & NCF_NEGATIVE);
811 dvlp = VP2VNODELOCK(ncp->nc_dvp);
812 blp = NCP2BUCKETLOCK(ncp);
813 mtx_unlock(&neglist->nl_lock);
814 mtx_unlock(&ncneg_hot.nl_lock);
815 mtx_lock(dvlp);
816 rw_wlock(blp);
817 mtx_lock(&neglist->nl_lock);
818 ncp2 = TAILQ_FIRST(&neglist->nl_list);
819 if (ncp != ncp2 || dvlp != VP2VNODELOCK(ncp2->nc_dvp) ||
820 blp != NCP2BUCKETLOCK(ncp2) || !(ncp2->nc_flag & NCF_NEGATIVE)) {
821 ncp = NULL;
822 goto out_unlock_all;
823 }
824 SDT_PROBE3(vfs, namecache, shrink_negative, done, ncp->nc_dvp,
825 ncp->nc_name, ncp->nc_neghits);
826
827 cache_zap_locked(ncp, true);
828 out_unlock_all:
829 mtx_unlock(&neglist->nl_lock);
830 rw_wunlock(blp);
831 mtx_unlock(dvlp);
832 out:
833 mtx_unlock(&ncneg_shrink_lock);
834 cache_free(ncp);
835 }
836
837 /*
838 * cache_zap_locked():
839 *
840 * Removes a namecache entry from cache, whether it contains an actual
841 * pointer to a vnode or if it is just a negative cache entry.
842 */
843 static void
cache_zap_locked(struct namecache * ncp,bool neg_locked)844 cache_zap_locked(struct namecache *ncp, bool neg_locked)
845 {
846
847 if (!(ncp->nc_flag & NCF_NEGATIVE))
848 cache_assert_vnode_locked(ncp->nc_vp);
849 cache_assert_vnode_locked(ncp->nc_dvp);
850 cache_assert_bucket_locked(ncp, RA_WLOCKED);
851
852 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp,
853 (ncp->nc_flag & NCF_NEGATIVE) ? NULL : ncp->nc_vp);
854 if (!(ncp->nc_flag & NCF_NEGATIVE)) {
855 SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp,
856 ncp->nc_name, ncp->nc_vp);
857 } else {
858 SDT_PROBE3(vfs, namecache, zap_negative, done, ncp->nc_dvp,
859 ncp->nc_name, ncp->nc_neghits);
860 }
861 LIST_REMOVE(ncp, nc_hash);
862 if (!(ncp->nc_flag & NCF_NEGATIVE)) {
863 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
864 if (ncp == ncp->nc_vp->v_cache_dd)
865 ncp->nc_vp->v_cache_dd = NULL;
866 } else {
867 cache_negative_remove(ncp, neg_locked);
868 }
869 if (ncp->nc_flag & NCF_ISDOTDOT) {
870 if (ncp == ncp->nc_dvp->v_cache_dd)
871 ncp->nc_dvp->v_cache_dd = NULL;
872 } else {
873 LIST_REMOVE(ncp, nc_src);
874 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
875 ncp->nc_flag |= NCF_DVDROP;
876 atomic_subtract_rel_long(&numcachehv, 1);
877 }
878 }
879 atomic_subtract_rel_long(&numcache, 1);
880 }
881
882 static void
cache_zap_negative_locked_vnode_kl(struct namecache * ncp,struct vnode * vp)883 cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp)
884 {
885 struct rwlock *blp;
886
887 MPASS(ncp->nc_dvp == vp);
888 MPASS(ncp->nc_flag & NCF_NEGATIVE);
889 cache_assert_vnode_locked(vp);
890
891 blp = NCP2BUCKETLOCK(ncp);
892 rw_wlock(blp);
893 cache_zap_locked(ncp, false);
894 rw_wunlock(blp);
895 }
896
897 static bool
cache_zap_locked_vnode_kl2(struct namecache * ncp,struct vnode * vp,struct mtx ** vlpp)898 cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp,
899 struct mtx **vlpp)
900 {
901 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
902 struct rwlock *blp;
903
904 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
905 cache_assert_vnode_locked(vp);
906
907 if (ncp->nc_flag & NCF_NEGATIVE) {
908 if (*vlpp != NULL) {
909 mtx_unlock(*vlpp);
910 *vlpp = NULL;
911 }
912 cache_zap_negative_locked_vnode_kl(ncp, vp);
913 return (true);
914 }
915
916 pvlp = VP2VNODELOCK(vp);
917 blp = NCP2BUCKETLOCK(ncp);
918 vlp1 = VP2VNODELOCK(ncp->nc_dvp);
919 vlp2 = VP2VNODELOCK(ncp->nc_vp);
920
921 if (*vlpp == vlp1 || *vlpp == vlp2) {
922 to_unlock = *vlpp;
923 *vlpp = NULL;
924 } else {
925 if (*vlpp != NULL) {
926 mtx_unlock(*vlpp);
927 *vlpp = NULL;
928 }
929 cache_sort(&vlp1, &vlp2);
930 if (vlp1 == pvlp) {
931 mtx_lock(vlp2);
932 to_unlock = vlp2;
933 } else {
934 if (!mtx_trylock(vlp1))
935 goto out_relock;
936 to_unlock = vlp1;
937 }
938 }
939 rw_wlock(blp);
940 cache_zap_locked(ncp, false);
941 rw_wunlock(blp);
942 if (to_unlock != NULL)
943 mtx_unlock(to_unlock);
944 return (true);
945
946 out_relock:
947 mtx_unlock(vlp2);
948 mtx_lock(vlp1);
949 mtx_lock(vlp2);
950 MPASS(*vlpp == NULL);
951 *vlpp = vlp1;
952 return (false);
953 }
954
955 static int
cache_zap_locked_vnode(struct namecache * ncp,struct vnode * vp)956 cache_zap_locked_vnode(struct namecache *ncp, struct vnode *vp)
957 {
958 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
959 struct rwlock *blp;
960 int error = 0;
961
962 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
963 cache_assert_vnode_locked(vp);
964
965 pvlp = VP2VNODELOCK(vp);
966 if (ncp->nc_flag & NCF_NEGATIVE) {
967 cache_zap_negative_locked_vnode_kl(ncp, vp);
968 goto out;
969 }
970
971 blp = NCP2BUCKETLOCK(ncp);
972 vlp1 = VP2VNODELOCK(ncp->nc_dvp);
973 vlp2 = VP2VNODELOCK(ncp->nc_vp);
974 cache_sort(&vlp1, &vlp2);
975 if (vlp1 == pvlp) {
976 mtx_lock(vlp2);
977 to_unlock = vlp2;
978 } else {
979 if (!mtx_trylock(vlp1)) {
980 error = EAGAIN;
981 goto out;
982 }
983 to_unlock = vlp1;
984 }
985 rw_wlock(blp);
986 cache_zap_locked(ncp, false);
987 rw_wunlock(blp);
988 mtx_unlock(to_unlock);
989 out:
990 mtx_unlock(pvlp);
991 return (error);
992 }
993
994 static int
cache_zap_wlocked_bucket(struct namecache * ncp,struct rwlock * blp)995 cache_zap_wlocked_bucket(struct namecache *ncp, struct rwlock *blp)
996 {
997 struct mtx *dvlp, *vlp;
998
999 cache_assert_bucket_locked(ncp, RA_WLOCKED);
1000
1001 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1002 vlp = NULL;
1003 if (!(ncp->nc_flag & NCF_NEGATIVE))
1004 vlp = VP2VNODELOCK(ncp->nc_vp);
1005 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1006 cache_zap_locked(ncp, false);
1007 rw_wunlock(blp);
1008 cache_unlock_vnodes(dvlp, vlp);
1009 return (0);
1010 }
1011
1012 rw_wunlock(blp);
1013 return (EAGAIN);
1014 }
1015
1016 static int
cache_zap_rlocked_bucket(struct namecache * ncp,struct rwlock * blp)1017 cache_zap_rlocked_bucket(struct namecache *ncp, struct rwlock *blp)
1018 {
1019 struct mtx *dvlp, *vlp;
1020
1021 cache_assert_bucket_locked(ncp, RA_RLOCKED);
1022
1023 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1024 vlp = NULL;
1025 if (!(ncp->nc_flag & NCF_NEGATIVE))
1026 vlp = VP2VNODELOCK(ncp->nc_vp);
1027 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1028 rw_runlock(blp);
1029 rw_wlock(blp);
1030 cache_zap_locked(ncp, false);
1031 rw_wunlock(blp);
1032 cache_unlock_vnodes(dvlp, vlp);
1033 return (0);
1034 }
1035
1036 rw_runlock(blp);
1037 return (EAGAIN);
1038 }
1039
1040 static int
cache_zap_wlocked_bucket_kl(struct namecache * ncp,struct rwlock * blp,struct mtx ** vlpp1,struct mtx ** vlpp2)1041 cache_zap_wlocked_bucket_kl(struct namecache *ncp, struct rwlock *blp,
1042 struct mtx **vlpp1, struct mtx **vlpp2)
1043 {
1044 struct mtx *dvlp, *vlp;
1045
1046 cache_assert_bucket_locked(ncp, RA_WLOCKED);
1047
1048 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1049 vlp = NULL;
1050 if (!(ncp->nc_flag & NCF_NEGATIVE))
1051 vlp = VP2VNODELOCK(ncp->nc_vp);
1052 cache_sort(&dvlp, &vlp);
1053
1054 if (*vlpp1 == dvlp && *vlpp2 == vlp) {
1055 cache_zap_locked(ncp, false);
1056 cache_unlock_vnodes(dvlp, vlp);
1057 *vlpp1 = NULL;
1058 *vlpp2 = NULL;
1059 return (0);
1060 }
1061
1062 if (*vlpp1 != NULL)
1063 mtx_unlock(*vlpp1);
1064 if (*vlpp2 != NULL)
1065 mtx_unlock(*vlpp2);
1066 *vlpp1 = NULL;
1067 *vlpp2 = NULL;
1068
1069 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1070 cache_zap_locked(ncp, false);
1071 cache_unlock_vnodes(dvlp, vlp);
1072 return (0);
1073 }
1074
1075 rw_wunlock(blp);
1076 *vlpp1 = dvlp;
1077 *vlpp2 = vlp;
1078 if (*vlpp1 != NULL)
1079 mtx_lock(*vlpp1);
1080 mtx_lock(*vlpp2);
1081 rw_wlock(blp);
1082 return (EAGAIN);
1083 }
1084
1085 static void
cache_lookup_unlock(struct rwlock * blp,struct mtx * vlp)1086 cache_lookup_unlock(struct rwlock *blp, struct mtx *vlp)
1087 {
1088
1089 if (blp != NULL) {
1090 rw_runlock(blp);
1091 } else {
1092 mtx_unlock(vlp);
1093 }
1094 }
1095
1096 static int __noinline
cache_lookup_dot(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct timespec * tsp,int * ticksp)1097 cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1098 struct timespec *tsp, int *ticksp)
1099 {
1100 int ltype;
1101
1102 *vpp = dvp;
1103 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
1104 dvp, cnp->cn_nameptr);
1105 counter_u64_add(dothits, 1);
1106 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
1107 if (tsp != NULL)
1108 timespecclear(tsp);
1109 if (ticksp != NULL)
1110 *ticksp = ticks;
1111 vrefact(*vpp);
1112 /*
1113 * When we lookup "." we still can be asked to lock it
1114 * differently...
1115 */
1116 ltype = cnp->cn_lkflags & LK_TYPE_MASK;
1117 if (ltype != VOP_ISLOCKED(*vpp)) {
1118 if (ltype == LK_EXCLUSIVE) {
1119 vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
1120 if ((*vpp)->v_iflag & VI_DOOMED) {
1121 /* forced unmount */
1122 vrele(*vpp);
1123 *vpp = NULL;
1124 return (ENOENT);
1125 }
1126 } else
1127 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
1128 }
1129 return (-1);
1130 }
1131
1132 static __noinline int
cache_lookup_nomakeentry(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct timespec * tsp,int * ticksp)1133 cache_lookup_nomakeentry(struct vnode *dvp, struct vnode **vpp,
1134 struct componentname *cnp, struct timespec *tsp, int *ticksp)
1135 {
1136 struct namecache *ncp;
1137 struct rwlock *blp;
1138 struct mtx *dvlp, *dvlp2;
1139 uint32_t hash;
1140 int error;
1141
1142 if (cnp->cn_namelen == 2 &&
1143 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') {
1144 counter_u64_add(dotdothits, 1);
1145 dvlp = VP2VNODELOCK(dvp);
1146 dvlp2 = NULL;
1147 mtx_lock(dvlp);
1148 retry_dotdot:
1149 ncp = dvp->v_cache_dd;
1150 if (ncp == NULL) {
1151 SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
1152 "..", NULL);
1153 mtx_unlock(dvlp);
1154 if (dvlp2 != NULL)
1155 mtx_unlock(dvlp2);
1156 return (0);
1157 }
1158 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1159 if (ncp->nc_dvp != dvp)
1160 panic("dvp %p v_cache_dd %p\n", dvp, ncp);
1161 if (!cache_zap_locked_vnode_kl2(ncp,
1162 dvp, &dvlp2))
1163 goto retry_dotdot;
1164 MPASS(dvp->v_cache_dd == NULL);
1165 mtx_unlock(dvlp);
1166 if (dvlp2 != NULL)
1167 mtx_unlock(dvlp2);
1168 cache_free(ncp);
1169 } else {
1170 dvp->v_cache_dd = NULL;
1171 mtx_unlock(dvlp);
1172 if (dvlp2 != NULL)
1173 mtx_unlock(dvlp2);
1174 }
1175 return (0);
1176 }
1177
1178 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
1179 blp = HASH2BUCKETLOCK(hash);
1180 retry:
1181 if (LIST_EMPTY(NCHHASH(hash)))
1182 goto out_no_entry;
1183
1184 rw_wlock(blp);
1185
1186 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1187 counter_u64_add(numchecks, 1);
1188 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
1189 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
1190 break;
1191 }
1192
1193 /* We failed to find an entry */
1194 if (ncp == NULL) {
1195 rw_wunlock(blp);
1196 goto out_no_entry;
1197 }
1198
1199 counter_u64_add(numposzaps, 1);
1200
1201 error = cache_zap_wlocked_bucket(ncp, blp);
1202 if (error != 0) {
1203 zap_and_exit_bucket_fail++;
1204 cache_maybe_yield();
1205 goto retry;
1206 }
1207 cache_free(ncp);
1208 return (0);
1209 out_no_entry:
1210 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, NULL);
1211 counter_u64_add(nummisszap, 1);
1212 return (0);
1213 }
1214
1215 /**
1216 * Lookup a name in the name cache
1217 *
1218 * # Arguments
1219 *
1220 * - dvp: Parent directory in which to search.
1221 * - vpp: Return argument. Will contain desired vnode on cache hit.
1222 * - cnp: Parameters of the name search. The most interesting bits of
1223 * the cn_flags field have the following meanings:
1224 * - MAKEENTRY: If clear, free an entry from the cache rather than look
1225 * it up.
1226 * - ISDOTDOT: Must be set if and only if cn_nameptr == ".."
1227 * - tsp: Return storage for cache timestamp. On a successful (positive
1228 * or negative) lookup, tsp will be filled with any timespec that
1229 * was stored when this cache entry was created. However, it will
1230 * be clear for "." entries.
1231 * - ticks: Return storage for alternate cache timestamp. On a successful
1232 * (positive or negative) lookup, it will contain the ticks value
1233 * that was current when the cache entry was created, unless cnp
1234 * was ".".
1235 *
1236 * # Returns
1237 *
1238 * - -1: A positive cache hit. vpp will contain the desired vnode.
1239 * - ENOENT: A negative cache hit, or dvp was recycled out from under us due
1240 * to a forced unmount. vpp will not be modified. If the entry
1241 * is a whiteout, then the ISWHITEOUT flag will be set in
1242 * cnp->cn_flags.
1243 * - 0: A cache miss. vpp will not be modified.
1244 *
1245 * # Locking
1246 *
1247 * On a cache hit, vpp will be returned locked and ref'd. If we're looking up
1248 * .., dvp is unlocked. If we're looking up . an extra ref is taken, but the
1249 * lock is not recursively acquired.
1250 */
1251 int
cache_lookup(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct timespec * tsp,int * ticksp)1252 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1253 struct timespec *tsp, int *ticksp)
1254 {
1255 struct namecache_ts *ncp_ts;
1256 struct namecache *ncp;
1257 struct rwlock *blp;
1258 struct mtx *dvlp;
1259 uint32_t hash;
1260 int error, ltype;
1261
1262 if (__predict_false(!doingcache)) {
1263 cnp->cn_flags &= ~MAKEENTRY;
1264 return (0);
1265 }
1266
1267 counter_u64_add(numcalls, 1);
1268
1269 if (__predict_false(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.'))
1270 return (cache_lookup_dot(dvp, vpp, cnp, tsp, ticksp));
1271
1272 if ((cnp->cn_flags & MAKEENTRY) == 0)
1273 return (cache_lookup_nomakeentry(dvp, vpp, cnp, tsp, ticksp));
1274
1275 retry:
1276 blp = NULL;
1277 dvlp = NULL;
1278 error = 0;
1279 if (cnp->cn_namelen == 2 &&
1280 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') {
1281 counter_u64_add(dotdothits, 1);
1282 dvlp = VP2VNODELOCK(dvp);
1283 mtx_lock(dvlp);
1284 ncp = dvp->v_cache_dd;
1285 if (ncp == NULL) {
1286 SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
1287 "..", NULL);
1288 mtx_unlock(dvlp);
1289 return (0);
1290 }
1291 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1292 if (ncp->nc_flag & NCF_NEGATIVE)
1293 *vpp = NULL;
1294 else
1295 *vpp = ncp->nc_vp;
1296 } else
1297 *vpp = ncp->nc_dvp;
1298 /* Return failure if negative entry was found. */
1299 if (*vpp == NULL)
1300 goto negative_success;
1301 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
1302 dvp, cnp->cn_nameptr, *vpp);
1303 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..",
1304 *vpp);
1305 cache_out_ts(ncp, tsp, ticksp);
1306 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) ==
1307 NCF_DTS && tsp != NULL) {
1308 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
1309 *tsp = ncp_ts->nc_dotdottime;
1310 }
1311 goto success;
1312 }
1313
1314 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
1315 blp = HASH2BUCKETLOCK(hash);
1316 rw_rlock(blp);
1317
1318 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1319 counter_u64_add(numchecks, 1);
1320 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
1321 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
1322 break;
1323 }
1324
1325 /* We failed to find an entry */
1326 if (ncp == NULL) {
1327 rw_runlock(blp);
1328 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
1329 NULL);
1330 counter_u64_add(nummiss, 1);
1331 return (0);
1332 }
1333
1334 /* We found a "positive" match, return the vnode */
1335 if (!(ncp->nc_flag & NCF_NEGATIVE)) {
1336 counter_u64_add(numposhits, 1);
1337 *vpp = ncp->nc_vp;
1338 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
1339 dvp, cnp->cn_nameptr, *vpp, ncp);
1340 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
1341 *vpp);
1342 cache_out_ts(ncp, tsp, ticksp);
1343 goto success;
1344 }
1345
1346 negative_success:
1347 /* We found a negative match, and want to create it, so purge */
1348 if (cnp->cn_nameiop == CREATE) {
1349 counter_u64_add(numnegzaps, 1);
1350 goto zap_and_exit;
1351 }
1352
1353 counter_u64_add(numneghits, 1);
1354 cache_negative_hit(ncp);
1355 if (ncp->nc_flag & NCF_WHITE)
1356 cnp->cn_flags |= ISWHITEOUT;
1357 SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp,
1358 ncp->nc_name);
1359 cache_out_ts(ncp, tsp, ticksp);
1360 cache_lookup_unlock(blp, dvlp);
1361 return (ENOENT);
1362
1363 success:
1364 /*
1365 * On success we return a locked and ref'd vnode as per the lookup
1366 * protocol.
1367 */
1368 MPASS(dvp != *vpp);
1369 ltype = 0; /* silence gcc warning */
1370 if (cnp->cn_flags & ISDOTDOT) {
1371 ltype = VOP_ISLOCKED(dvp);
1372 VOP_UNLOCK(dvp, 0);
1373 }
1374 vhold(*vpp);
1375 cache_lookup_unlock(blp, dvlp);
1376 error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread);
1377 if (cnp->cn_flags & ISDOTDOT) {
1378 vn_lock(dvp, ltype | LK_RETRY);
1379 if (dvp->v_iflag & VI_DOOMED) {
1380 if (error == 0)
1381 vput(*vpp);
1382 *vpp = NULL;
1383 return (ENOENT);
1384 }
1385 }
1386 if (error) {
1387 *vpp = NULL;
1388 goto retry;
1389 }
1390 if ((cnp->cn_flags & ISLASTCN) &&
1391 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1392 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
1393 }
1394 return (-1);
1395
1396 zap_and_exit:
1397 if (blp != NULL)
1398 error = cache_zap_rlocked_bucket(ncp, blp);
1399 else
1400 error = cache_zap_locked_vnode(ncp, dvp);
1401 if (error != 0) {
1402 zap_and_exit_bucket_fail++;
1403 cache_maybe_yield();
1404 goto retry;
1405 }
1406 cache_free(ncp);
1407 return (0);
1408 }
1409
1410 struct celockstate {
1411 struct mtx *vlp[3];
1412 struct rwlock *blp[2];
1413 };
1414 CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3));
1415 CTASSERT((nitems(((struct celockstate *)0)->blp) == 2));
1416
1417 static inline void
cache_celockstate_init(struct celockstate * cel)1418 cache_celockstate_init(struct celockstate *cel)
1419 {
1420
1421 bzero(cel, sizeof(*cel));
1422 }
1423
1424 static void
cache_lock_vnodes_cel(struct celockstate * cel,struct vnode * vp,struct vnode * dvp)1425 cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp,
1426 struct vnode *dvp)
1427 {
1428 struct mtx *vlp1, *vlp2;
1429
1430 MPASS(cel->vlp[0] == NULL);
1431 MPASS(cel->vlp[1] == NULL);
1432 MPASS(cel->vlp[2] == NULL);
1433
1434 MPASS(vp != NULL || dvp != NULL);
1435
1436 vlp1 = VP2VNODELOCK(vp);
1437 vlp2 = VP2VNODELOCK(dvp);
1438 cache_sort(&vlp1, &vlp2);
1439
1440 if (vlp1 != NULL) {
1441 mtx_lock(vlp1);
1442 cel->vlp[0] = vlp1;
1443 }
1444 mtx_lock(vlp2);
1445 cel->vlp[1] = vlp2;
1446 }
1447
1448 static void
cache_unlock_vnodes_cel(struct celockstate * cel)1449 cache_unlock_vnodes_cel(struct celockstate *cel)
1450 {
1451
1452 MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL);
1453
1454 if (cel->vlp[0] != NULL)
1455 mtx_unlock(cel->vlp[0]);
1456 if (cel->vlp[1] != NULL)
1457 mtx_unlock(cel->vlp[1]);
1458 if (cel->vlp[2] != NULL)
1459 mtx_unlock(cel->vlp[2]);
1460 }
1461
1462 static bool
cache_lock_vnodes_cel_3(struct celockstate * cel,struct vnode * vp)1463 cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp)
1464 {
1465 struct mtx *vlp;
1466 bool ret;
1467
1468 cache_assert_vlp_locked(cel->vlp[0]);
1469 cache_assert_vlp_locked(cel->vlp[1]);
1470 MPASS(cel->vlp[2] == NULL);
1471
1472 MPASS(vp != NULL);
1473 vlp = VP2VNODELOCK(vp);
1474
1475 ret = true;
1476 if (vlp >= cel->vlp[1]) {
1477 mtx_lock(vlp);
1478 } else {
1479 if (mtx_trylock(vlp))
1480 goto out;
1481 cache_lock_vnodes_cel_3_failures++;
1482 cache_unlock_vnodes_cel(cel);
1483 if (vlp < cel->vlp[0]) {
1484 mtx_lock(vlp);
1485 mtx_lock(cel->vlp[0]);
1486 mtx_lock(cel->vlp[1]);
1487 } else {
1488 if (cel->vlp[0] != NULL)
1489 mtx_lock(cel->vlp[0]);
1490 mtx_lock(vlp);
1491 mtx_lock(cel->vlp[1]);
1492 }
1493 ret = false;
1494 }
1495 out:
1496 cel->vlp[2] = vlp;
1497 return (ret);
1498 }
1499
1500 static void
cache_lock_buckets_cel(struct celockstate * cel,struct rwlock * blp1,struct rwlock * blp2)1501 cache_lock_buckets_cel(struct celockstate *cel, struct rwlock *blp1,
1502 struct rwlock *blp2)
1503 {
1504
1505 MPASS(cel->blp[0] == NULL);
1506 MPASS(cel->blp[1] == NULL);
1507
1508 cache_sort(&blp1, &blp2);
1509
1510 if (blp1 != NULL) {
1511 rw_wlock(blp1);
1512 cel->blp[0] = blp1;
1513 }
1514 rw_wlock(blp2);
1515 cel->blp[1] = blp2;
1516 }
1517
1518 static void
cache_unlock_buckets_cel(struct celockstate * cel)1519 cache_unlock_buckets_cel(struct celockstate *cel)
1520 {
1521
1522 if (cel->blp[0] != NULL)
1523 rw_wunlock(cel->blp[0]);
1524 rw_wunlock(cel->blp[1]);
1525 }
1526
1527 /*
1528 * Lock part of the cache affected by the insertion.
1529 *
1530 * This means vnodelocks for dvp, vp and the relevant bucketlock.
1531 * However, insertion can result in removal of an old entry. In this
1532 * case we have an additional vnode and bucketlock pair to lock. If the
1533 * entry is negative, ncelock is locked instead of the vnode.
1534 *
1535 * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while
1536 * preserving the locking order (smaller address first).
1537 */
1538 static void
cache_enter_lock(struct celockstate * cel,struct vnode * dvp,struct vnode * vp,uint32_t hash)1539 cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1540 uint32_t hash)
1541 {
1542 struct namecache *ncp;
1543 struct rwlock *blps[2];
1544
1545 blps[0] = HASH2BUCKETLOCK(hash);
1546 for (;;) {
1547 blps[1] = NULL;
1548 cache_lock_vnodes_cel(cel, dvp, vp);
1549 if (vp == NULL || vp->v_type != VDIR)
1550 break;
1551 ncp = vp->v_cache_dd;
1552 if (ncp == NULL)
1553 break;
1554 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1555 break;
1556 MPASS(ncp->nc_dvp == vp);
1557 blps[1] = NCP2BUCKETLOCK(ncp);
1558 if (ncp->nc_flag & NCF_NEGATIVE)
1559 break;
1560 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1561 break;
1562 /*
1563 * All vnodes got re-locked. Re-validate the state and if
1564 * nothing changed we are done. Otherwise restart.
1565 */
1566 if (ncp == vp->v_cache_dd &&
1567 (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1568 blps[1] == NCP2BUCKETLOCK(ncp) &&
1569 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1570 break;
1571 cache_unlock_vnodes_cel(cel);
1572 cel->vlp[0] = NULL;
1573 cel->vlp[1] = NULL;
1574 cel->vlp[2] = NULL;
1575 }
1576 cache_lock_buckets_cel(cel, blps[0], blps[1]);
1577 }
1578
1579 static void
cache_enter_lock_dd(struct celockstate * cel,struct vnode * dvp,struct vnode * vp,uint32_t hash)1580 cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1581 uint32_t hash)
1582 {
1583 struct namecache *ncp;
1584 struct rwlock *blps[2];
1585
1586 blps[0] = HASH2BUCKETLOCK(hash);
1587 for (;;) {
1588 blps[1] = NULL;
1589 cache_lock_vnodes_cel(cel, dvp, vp);
1590 ncp = dvp->v_cache_dd;
1591 if (ncp == NULL)
1592 break;
1593 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1594 break;
1595 MPASS(ncp->nc_dvp == dvp);
1596 blps[1] = NCP2BUCKETLOCK(ncp);
1597 if (ncp->nc_flag & NCF_NEGATIVE)
1598 break;
1599 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1600 break;
1601 if (ncp == dvp->v_cache_dd &&
1602 (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1603 blps[1] == NCP2BUCKETLOCK(ncp) &&
1604 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1605 break;
1606 cache_unlock_vnodes_cel(cel);
1607 cel->vlp[0] = NULL;
1608 cel->vlp[1] = NULL;
1609 cel->vlp[2] = NULL;
1610 }
1611 cache_lock_buckets_cel(cel, blps[0], blps[1]);
1612 }
1613
1614 static void
cache_enter_unlock(struct celockstate * cel)1615 cache_enter_unlock(struct celockstate *cel)
1616 {
1617
1618 cache_unlock_buckets_cel(cel);
1619 cache_unlock_vnodes_cel(cel);
1620 }
1621
1622 /*
1623 * Add an entry to the cache.
1624 */
1625 void
cache_enter_time(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,struct timespec * tsp,struct timespec * dtsp)1626 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1627 struct timespec *tsp, struct timespec *dtsp)
1628 {
1629 struct celockstate cel;
1630 struct namecache *ncp, *n2, *ndd;
1631 struct namecache_ts *ncp_ts, *n2_ts;
1632 struct nchashhead *ncpp;
1633 struct neglist *neglist;
1634 uint32_t hash;
1635 int flag;
1636 int len;
1637 bool neg_locked;
1638 int lnumcache;
1639
1640 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
1641 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
1642 ("cache_enter: Adding a doomed vnode"));
1643 VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
1644 ("cache_enter: Doomed vnode used as src"));
1645
1646 if (__predict_false(!doingcache))
1647 return;
1648
1649 /*
1650 * Avoid blowout in namecache entries.
1651 */
1652 if (__predict_false(numcache >= desiredvnodes * ncsizefactor))
1653 return;
1654
1655 cache_celockstate_init(&cel);
1656 ndd = NULL;
1657 ncp_ts = NULL;
1658 flag = 0;
1659 if (cnp->cn_nameptr[0] == '.') {
1660 if (cnp->cn_namelen == 1)
1661 return;
1662 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
1663 len = cnp->cn_namelen;
1664 hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1665 cache_enter_lock_dd(&cel, dvp, vp, hash);
1666 /*
1667 * If dotdot entry already exists, just retarget it
1668 * to new parent vnode, otherwise continue with new
1669 * namecache entry allocation.
1670 */
1671 if ((ncp = dvp->v_cache_dd) != NULL &&
1672 ncp->nc_flag & NCF_ISDOTDOT) {
1673 KASSERT(ncp->nc_dvp == dvp,
1674 ("wrong isdotdot parent"));
1675 neg_locked = false;
1676 if (ncp->nc_flag & NCF_NEGATIVE || vp == NULL) {
1677 neglist = NCP2NEGLIST(ncp);
1678 mtx_lock(&ncneg_hot.nl_lock);
1679 mtx_lock(&neglist->nl_lock);
1680 neg_locked = true;
1681 }
1682 if (!(ncp->nc_flag & NCF_NEGATIVE)) {
1683 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
1684 ncp, nc_dst);
1685 } else {
1686 cache_negative_remove(ncp, true);
1687 }
1688 if (vp != NULL) {
1689 TAILQ_INSERT_HEAD(&vp->v_cache_dst,
1690 ncp, nc_dst);
1691 ncp->nc_flag &= ~(NCF_NEGATIVE|NCF_HOTNEGATIVE);
1692 } else {
1693 ncp->nc_flag &= ~(NCF_HOTNEGATIVE);
1694 ncp->nc_flag |= NCF_NEGATIVE;
1695 cache_negative_insert(ncp, true);
1696 }
1697 if (neg_locked) {
1698 mtx_unlock(&neglist->nl_lock);
1699 mtx_unlock(&ncneg_hot.nl_lock);
1700 }
1701 ncp->nc_vp = vp;
1702 cache_enter_unlock(&cel);
1703 return;
1704 }
1705 dvp->v_cache_dd = NULL;
1706 cache_enter_unlock(&cel);
1707 cache_celockstate_init(&cel);
1708 SDT_PROBE3(vfs, namecache, enter, done, dvp, "..", vp);
1709 flag = NCF_ISDOTDOT;
1710 }
1711 }
1712
1713 /*
1714 * Calculate the hash key and setup as much of the new
1715 * namecache entry as possible before acquiring the lock.
1716 */
1717 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL);
1718 ncp->nc_flag = flag;
1719 ncp->nc_vp = vp;
1720 if (vp == NULL)
1721 ncp->nc_flag |= NCF_NEGATIVE;
1722 ncp->nc_dvp = dvp;
1723 if (tsp != NULL) {
1724 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
1725 ncp_ts->nc_time = *tsp;
1726 ncp_ts->nc_ticks = ticks;
1727 ncp_ts->nc_nc.nc_flag |= NCF_TS;
1728 if (dtsp != NULL) {
1729 ncp_ts->nc_dotdottime = *dtsp;
1730 ncp_ts->nc_nc.nc_flag |= NCF_DTS;
1731 }
1732 }
1733 len = ncp->nc_nlen = cnp->cn_namelen;
1734 hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1735 strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1);
1736 cache_enter_lock(&cel, dvp, vp, hash);
1737
1738 /*
1739 * See if this vnode or negative entry is already in the cache
1740 * with this name. This can happen with concurrent lookups of
1741 * the same path name.
1742 */
1743 ncpp = NCHHASH(hash);
1744 LIST_FOREACH(n2, ncpp, nc_hash) {
1745 if (n2->nc_dvp == dvp &&
1746 n2->nc_nlen == cnp->cn_namelen &&
1747 !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
1748 if (tsp != NULL) {
1749 KASSERT((n2->nc_flag & NCF_TS) != 0,
1750 ("no NCF_TS"));
1751 n2_ts = __containerof(n2, struct namecache_ts, nc_nc);
1752 n2_ts->nc_time = ncp_ts->nc_time;
1753 n2_ts->nc_ticks = ncp_ts->nc_ticks;
1754 if (dtsp != NULL) {
1755 n2_ts->nc_dotdottime = ncp_ts->nc_dotdottime;
1756 if (ncp->nc_flag & NCF_NEGATIVE)
1757 mtx_lock(&ncneg_hot.nl_lock);
1758 n2_ts->nc_nc.nc_flag |= NCF_DTS;
1759 if (ncp->nc_flag & NCF_NEGATIVE)
1760 mtx_unlock(&ncneg_hot.nl_lock);
1761 }
1762 }
1763 goto out_unlock_free;
1764 }
1765 }
1766
1767 if (flag == NCF_ISDOTDOT) {
1768 /*
1769 * See if we are trying to add .. entry, but some other lookup
1770 * has populated v_cache_dd pointer already.
1771 */
1772 if (dvp->v_cache_dd != NULL)
1773 goto out_unlock_free;
1774 KASSERT(vp == NULL || vp->v_type == VDIR,
1775 ("wrong vnode type %p", vp));
1776 dvp->v_cache_dd = ncp;
1777 }
1778
1779 if (vp != NULL) {
1780 if (vp->v_type == VDIR) {
1781 if (flag != NCF_ISDOTDOT) {
1782 /*
1783 * For this case, the cache entry maps both the
1784 * directory name in it and the name ".." for the
1785 * directory's parent.
1786 */
1787 if ((ndd = vp->v_cache_dd) != NULL) {
1788 if ((ndd->nc_flag & NCF_ISDOTDOT) != 0)
1789 cache_zap_locked(ndd, false);
1790 else
1791 ndd = NULL;
1792 }
1793 vp->v_cache_dd = ncp;
1794 }
1795 } else {
1796 vp->v_cache_dd = NULL;
1797 }
1798 }
1799
1800 if (flag != NCF_ISDOTDOT) {
1801 if (LIST_EMPTY(&dvp->v_cache_src)) {
1802 vhold(dvp);
1803 atomic_add_rel_long(&numcachehv, 1);
1804 }
1805 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
1806 }
1807
1808 /*
1809 * Insert the new namecache entry into the appropriate chain
1810 * within the cache entries table.
1811 */
1812 LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
1813
1814 /*
1815 * If the entry is "negative", we place it into the
1816 * "negative" cache queue, otherwise, we place it into the
1817 * destination vnode's cache entries queue.
1818 */
1819 if (vp != NULL) {
1820 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
1821 SDT_PROBE3(vfs, namecache, enter, done, dvp, ncp->nc_name,
1822 vp);
1823 } else {
1824 if (cnp->cn_flags & ISWHITEOUT)
1825 ncp->nc_flag |= NCF_WHITE;
1826 cache_negative_insert(ncp, false);
1827 SDT_PROBE2(vfs, namecache, enter_negative, done, dvp,
1828 ncp->nc_name);
1829 }
1830 cache_enter_unlock(&cel);
1831 lnumcache = atomic_fetchadd_long(&numcache, 1) + 1;
1832 if (numneg * ncnegfactor > lnumcache)
1833 cache_negative_zap_one();
1834 cache_free(ndd);
1835 return;
1836 out_unlock_free:
1837 cache_enter_unlock(&cel);
1838 cache_free(ncp);
1839 return;
1840 }
1841
1842 static u_int
cache_roundup_2(u_int val)1843 cache_roundup_2(u_int val)
1844 {
1845 u_int res;
1846
1847 for (res = 1; res <= val; res <<= 1)
1848 continue;
1849
1850 return (res);
1851 }
1852
1853 /*
1854 * Name cache initialization, from vfs_init() when we are booting
1855 */
1856 static void
nchinit(void * dummy __unused)1857 nchinit(void *dummy __unused)
1858 {
1859 u_int i;
1860
1861 cache_zone_small = uma_zcreate("S VFS Cache",
1862 sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1,
1863 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache),
1864 UMA_ZONE_ZINIT);
1865 cache_zone_small_ts = uma_zcreate("STS VFS Cache",
1866 sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1,
1867 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache_ts),
1868 UMA_ZONE_ZINIT);
1869 cache_zone_large = uma_zcreate("L VFS Cache",
1870 sizeof(struct namecache) + NAME_MAX + 1,
1871 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache),
1872 UMA_ZONE_ZINIT);
1873 cache_zone_large_ts = uma_zcreate("LTS VFS Cache",
1874 sizeof(struct namecache_ts) + NAME_MAX + 1,
1875 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache_ts),
1876 UMA_ZONE_ZINIT);
1877
1878 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
1879 ncbuckethash = cache_roundup_2(mp_ncpus * 64) - 1;
1880 if (ncbuckethash > nchash)
1881 ncbuckethash = nchash;
1882 bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE,
1883 M_WAITOK | M_ZERO);
1884 for (i = 0; i < numbucketlocks; i++)
1885 rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE);
1886 ncvnodehash = cache_roundup_2(mp_ncpus * 64) - 1;
1887 vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE,
1888 M_WAITOK | M_ZERO);
1889 for (i = 0; i < numvnodelocks; i++)
1890 mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE);
1891 ncpurgeminvnodes = numbucketlocks;
1892
1893 ncneghash = 3;
1894 neglists = malloc(sizeof(*neglists) * numneglists, M_VFSCACHE,
1895 M_WAITOK | M_ZERO);
1896 for (i = 0; i < numneglists; i++) {
1897 mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF);
1898 TAILQ_INIT(&neglists[i].nl_list);
1899 }
1900 mtx_init(&ncneg_hot.nl_lock, "ncneglh", NULL, MTX_DEF);
1901 TAILQ_INIT(&ncneg_hot.nl_list);
1902
1903 mtx_init(&ncneg_shrink_lock, "ncnegs", NULL, MTX_DEF);
1904
1905 numcalls = counter_u64_alloc(M_WAITOK);
1906 dothits = counter_u64_alloc(M_WAITOK);
1907 dotdothits = counter_u64_alloc(M_WAITOK);
1908 numchecks = counter_u64_alloc(M_WAITOK);
1909 nummiss = counter_u64_alloc(M_WAITOK);
1910 nummisszap = counter_u64_alloc(M_WAITOK);
1911 numposzaps = counter_u64_alloc(M_WAITOK);
1912 numposhits = counter_u64_alloc(M_WAITOK);
1913 numnegzaps = counter_u64_alloc(M_WAITOK);
1914 numneghits = counter_u64_alloc(M_WAITOK);
1915 numfullpathcalls = counter_u64_alloc(M_WAITOK);
1916 numfullpathfail1 = counter_u64_alloc(M_WAITOK);
1917 numfullpathfail2 = counter_u64_alloc(M_WAITOK);
1918 numfullpathfail4 = counter_u64_alloc(M_WAITOK);
1919 numfullpathfound = counter_u64_alloc(M_WAITOK);
1920 }
1921 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
1922
1923 void
cache_changesize(int newmaxvnodes)1924 cache_changesize(int newmaxvnodes)
1925 {
1926 struct nchashhead *new_nchashtbl, *old_nchashtbl;
1927 u_long new_nchash, old_nchash;
1928 struct namecache *ncp;
1929 uint32_t hash;
1930 int i;
1931
1932 newmaxvnodes = cache_roundup_2(newmaxvnodes * 2);
1933 if (newmaxvnodes < numbucketlocks)
1934 newmaxvnodes = numbucketlocks;
1935
1936 new_nchashtbl = hashinit(newmaxvnodes, M_VFSCACHE, &new_nchash);
1937 /* If same hash table size, nothing to do */
1938 if (nchash == new_nchash) {
1939 free(new_nchashtbl, M_VFSCACHE);
1940 return;
1941 }
1942 /*
1943 * Move everything from the old hash table to the new table.
1944 * None of the namecache entries in the table can be removed
1945 * because to do so, they have to be removed from the hash table.
1946 */
1947 cache_lock_all_vnodes();
1948 cache_lock_all_buckets();
1949 old_nchashtbl = nchashtbl;
1950 old_nchash = nchash;
1951 nchashtbl = new_nchashtbl;
1952 nchash = new_nchash;
1953 for (i = 0; i <= old_nchash; i++) {
1954 while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) {
1955 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen,
1956 ncp->nc_dvp);
1957 LIST_REMOVE(ncp, nc_hash);
1958 LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
1959 }
1960 }
1961 cache_unlock_all_buckets();
1962 cache_unlock_all_vnodes();
1963 free(old_nchashtbl, M_VFSCACHE);
1964 }
1965
1966 /*
1967 * Invalidate all entries from and to a particular vnode.
1968 */
1969 void
cache_purge(struct vnode * vp)1970 cache_purge(struct vnode *vp)
1971 {
1972 TAILQ_HEAD(, namecache) ncps;
1973 struct namecache *ncp, *nnp;
1974 struct mtx *vlp, *vlp2;
1975
1976 CTR1(KTR_VFS, "cache_purge(%p)", vp);
1977 SDT_PROBE1(vfs, namecache, purge, done, vp);
1978 if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
1979 vp->v_cache_dd == NULL)
1980 return;
1981 TAILQ_INIT(&ncps);
1982 vlp = VP2VNODELOCK(vp);
1983 vlp2 = NULL;
1984 mtx_lock(vlp);
1985 retry:
1986 while (!LIST_EMPTY(&vp->v_cache_src)) {
1987 ncp = LIST_FIRST(&vp->v_cache_src);
1988 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
1989 goto retry;
1990 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1991 }
1992 while (!TAILQ_EMPTY(&vp->v_cache_dst)) {
1993 ncp = TAILQ_FIRST(&vp->v_cache_dst);
1994 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
1995 goto retry;
1996 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1997 }
1998 ncp = vp->v_cache_dd;
1999 if (ncp != NULL) {
2000 KASSERT(ncp->nc_flag & NCF_ISDOTDOT,
2001 ("lost dotdot link"));
2002 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
2003 goto retry;
2004 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2005 }
2006 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
2007 mtx_unlock(vlp);
2008 if (vlp2 != NULL)
2009 mtx_unlock(vlp2);
2010 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2011 cache_free(ncp);
2012 }
2013 }
2014
2015 /*
2016 * Invalidate all negative entries for a particular directory vnode.
2017 */
2018 void
cache_purge_negative(struct vnode * vp)2019 cache_purge_negative(struct vnode *vp)
2020 {
2021 TAILQ_HEAD(, namecache) ncps;
2022 struct namecache *ncp, *nnp;
2023 struct mtx *vlp;
2024
2025 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
2026 SDT_PROBE1(vfs, namecache, purge_negative, done, vp);
2027 if (LIST_EMPTY(&vp->v_cache_src))
2028 return;
2029 TAILQ_INIT(&ncps);
2030 vlp = VP2VNODELOCK(vp);
2031 mtx_lock(vlp);
2032 LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) {
2033 if (!(ncp->nc_flag & NCF_NEGATIVE))
2034 continue;
2035 cache_zap_negative_locked_vnode_kl(ncp, vp);
2036 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2037 }
2038 mtx_unlock(vlp);
2039 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2040 cache_free(ncp);
2041 }
2042 }
2043
2044 /*
2045 * Flush all entries referencing a particular filesystem.
2046 */
2047 void
cache_purgevfs(struct mount * mp,bool force)2048 cache_purgevfs(struct mount *mp, bool force)
2049 {
2050 TAILQ_HEAD(, namecache) ncps;
2051 struct mtx *vlp1, *vlp2;
2052 struct rwlock *blp;
2053 struct nchashhead *bucket;
2054 struct namecache *ncp, *nnp;
2055 u_long i, j, n_nchash;
2056 int error;
2057
2058 /* Scan hash tables for applicable entries */
2059 SDT_PROBE1(vfs, namecache, purgevfs, done, mp);
2060 if (!force && mp->mnt_nvnodelistsize <= ncpurgeminvnodes)
2061 return;
2062 TAILQ_INIT(&ncps);
2063 n_nchash = nchash + 1;
2064 vlp1 = vlp2 = NULL;
2065 for (i = 0; i < numbucketlocks; i++) {
2066 blp = (struct rwlock *)&bucketlocks[i];
2067 rw_wlock(blp);
2068 for (j = i; j < n_nchash; j += numbucketlocks) {
2069 retry:
2070 bucket = &nchashtbl[j];
2071 LIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) {
2072 cache_assert_bucket_locked(ncp, RA_WLOCKED);
2073 if (ncp->nc_dvp->v_mount != mp)
2074 continue;
2075 error = cache_zap_wlocked_bucket_kl(ncp, blp,
2076 &vlp1, &vlp2);
2077 if (error != 0)
2078 goto retry;
2079 TAILQ_INSERT_HEAD(&ncps, ncp, nc_dst);
2080 }
2081 }
2082 rw_wunlock(blp);
2083 if (vlp1 == NULL && vlp2 == NULL)
2084 cache_maybe_yield();
2085 }
2086 if (vlp1 != NULL)
2087 mtx_unlock(vlp1);
2088 if (vlp2 != NULL)
2089 mtx_unlock(vlp2);
2090
2091 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2092 cache_free(ncp);
2093 }
2094 }
2095
2096 /*
2097 * Perform canonical checks and cache lookup and pass on to filesystem
2098 * through the vop_cachedlookup only if needed.
2099 */
2100
2101 int
vfs_cache_lookup(struct vop_lookup_args * ap)2102 vfs_cache_lookup(struct vop_lookup_args *ap)
2103 {
2104 struct vnode *dvp;
2105 int error;
2106 struct vnode **vpp = ap->a_vpp;
2107 struct componentname *cnp = ap->a_cnp;
2108 struct ucred *cred = cnp->cn_cred;
2109 int flags = cnp->cn_flags;
2110 struct thread *td = cnp->cn_thread;
2111
2112 *vpp = NULL;
2113 dvp = ap->a_dvp;
2114
2115 if (dvp->v_type != VDIR)
2116 return (ENOTDIR);
2117
2118 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
2119 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
2120 return (EROFS);
2121
2122 error = VOP_ACCESS(dvp, VEXEC, cred, td);
2123 if (error)
2124 return (error);
2125
2126 error = cache_lookup(dvp, vpp, cnp, NULL, NULL);
2127 if (error == 0)
2128 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
2129 if (error == -1)
2130 return (0);
2131 return (error);
2132 }
2133
2134 /*
2135 * XXX All of these sysctls would probably be more productive dead.
2136 */
2137 static int __read_mostly disablecwd;
2138 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
2139 "Disable the getcwd syscall");
2140
2141 /* Implementation of the getcwd syscall. */
2142 int
sys___getcwd(struct thread * td,struct __getcwd_args * uap)2143 sys___getcwd(struct thread *td, struct __getcwd_args *uap)
2144 {
2145
2146 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen,
2147 MAXPATHLEN));
2148 }
2149
2150 int
kern___getcwd(struct thread * td,char * buf,enum uio_seg bufseg,size_t buflen,size_t path_max)2151 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, size_t buflen,
2152 size_t path_max)
2153 {
2154 char *bp, *tmpbuf;
2155 struct filedesc *fdp;
2156 struct vnode *cdir, *rdir;
2157 int error;
2158
2159 if (__predict_false(disablecwd))
2160 return (ENODEV);
2161 if (__predict_false(buflen < 2))
2162 return (EINVAL);
2163 if (buflen > path_max)
2164 buflen = path_max;
2165
2166 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
2167 fdp = td->td_proc->p_fd;
2168 FILEDESC_SLOCK(fdp);
2169 cdir = fdp->fd_cdir;
2170 vrefact(cdir);
2171 rdir = fdp->fd_rdir;
2172 vrefact(rdir);
2173 FILEDESC_SUNLOCK(fdp);
2174 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
2175 vrele(rdir);
2176 vrele(cdir);
2177
2178 if (!error) {
2179 if (bufseg == UIO_SYSSPACE)
2180 bcopy(bp, buf, strlen(bp) + 1);
2181 else
2182 error = copyout(bp, buf, strlen(bp) + 1);
2183 #ifdef KTRACE
2184 if (KTRPOINT(curthread, KTR_NAMEI))
2185 ktrnamei(bp);
2186 #endif
2187 }
2188 free(tmpbuf, M_TEMP);
2189 return (error);
2190 }
2191
2192 /*
2193 * Thus begins the fullpath magic.
2194 */
2195
2196 static int __read_mostly disablefullpath;
2197 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
2198 "Disable the vn_fullpath function");
2199
2200 /*
2201 * Retrieve the full filesystem path that correspond to a vnode from the name
2202 * cache (if available)
2203 */
2204 int
vn_fullpath(struct thread * td,struct vnode * vn,char ** retbuf,char ** freebuf)2205 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
2206 {
2207 char *buf;
2208 struct filedesc *fdp;
2209 struct vnode *rdir;
2210 int error;
2211
2212 if (__predict_false(disablefullpath))
2213 return (ENODEV);
2214 if (__predict_false(vn == NULL))
2215 return (EINVAL);
2216
2217 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2218 fdp = td->td_proc->p_fd;
2219 FILEDESC_SLOCK(fdp);
2220 rdir = fdp->fd_rdir;
2221 vrefact(rdir);
2222 FILEDESC_SUNLOCK(fdp);
2223 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
2224 vrele(rdir);
2225
2226 if (!error)
2227 *freebuf = buf;
2228 else
2229 free(buf, M_TEMP);
2230 return (error);
2231 }
2232
2233 /*
2234 * This function is similar to vn_fullpath, but it attempts to lookup the
2235 * pathname relative to the global root mount point. This is required for the
2236 * auditing sub-system, as audited pathnames must be absolute, relative to the
2237 * global root mount point.
2238 */
2239 int
vn_fullpath_global(struct thread * td,struct vnode * vn,char ** retbuf,char ** freebuf)2240 vn_fullpath_global(struct thread *td, struct vnode *vn,
2241 char **retbuf, char **freebuf)
2242 {
2243 char *buf;
2244 int error;
2245
2246 if (__predict_false(disablefullpath))
2247 return (ENODEV);
2248 if (__predict_false(vn == NULL))
2249 return (EINVAL);
2250 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2251 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
2252 if (!error)
2253 *freebuf = buf;
2254 else
2255 free(buf, M_TEMP);
2256 return (error);
2257 }
2258
2259 int
vn_vptocnp(struct vnode ** vp,struct ucred * cred,char * buf,u_int * buflen)2260 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
2261 {
2262 struct vnode *dvp;
2263 struct namecache *ncp;
2264 struct mtx *vlp;
2265 int error;
2266
2267 vlp = VP2VNODELOCK(*vp);
2268 mtx_lock(vlp);
2269 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
2270 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2271 break;
2272 }
2273 if (ncp != NULL) {
2274 if (*buflen < ncp->nc_nlen) {
2275 mtx_unlock(vlp);
2276 vrele(*vp);
2277 counter_u64_add(numfullpathfail4, 1);
2278 error = ENOMEM;
2279 SDT_PROBE3(vfs, namecache, fullpath, return, error,
2280 vp, NULL);
2281 return (error);
2282 }
2283 *buflen -= ncp->nc_nlen;
2284 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
2285 SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp,
2286 ncp->nc_name, vp);
2287 dvp = *vp;
2288 *vp = ncp->nc_dvp;
2289 vref(*vp);
2290 mtx_unlock(vlp);
2291 vrele(dvp);
2292 return (0);
2293 }
2294 SDT_PROBE1(vfs, namecache, fullpath, miss, vp);
2295
2296 mtx_unlock(vlp);
2297 vn_lock(*vp, LK_SHARED | LK_RETRY);
2298 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
2299 vput(*vp);
2300 if (error) {
2301 counter_u64_add(numfullpathfail2, 1);
2302 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
2303 return (error);
2304 }
2305
2306 *vp = dvp;
2307 if (dvp->v_iflag & VI_DOOMED) {
2308 /* forced unmount */
2309 vrele(dvp);
2310 error = ENOENT;
2311 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
2312 return (error);
2313 }
2314 /*
2315 * *vp has its use count incremented still.
2316 */
2317
2318 return (0);
2319 }
2320
2321 /*
2322 * The magic behind kern___getcwd() and vn_fullpath().
2323 */
2324 static int
vn_fullpath1(struct thread * td,struct vnode * vp,struct vnode * rdir,char * buf,char ** retbuf,u_int buflen)2325 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
2326 char *buf, char **retbuf, u_int buflen)
2327 {
2328 int error, slash_prefixed;
2329 #ifdef KDTRACE_HOOKS
2330 struct vnode *startvp = vp;
2331 #endif
2332 struct vnode *vp1;
2333
2334 buflen--;
2335 buf[buflen] = '\0';
2336 error = 0;
2337 slash_prefixed = 0;
2338
2339 SDT_PROBE1(vfs, namecache, fullpath, entry, vp);
2340 counter_u64_add(numfullpathcalls, 1);
2341 vref(vp);
2342 if (vp->v_type != VDIR) {
2343 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2344 if (error)
2345 return (error);
2346 if (buflen == 0) {
2347 vrele(vp);
2348 return (ENOMEM);
2349 }
2350 buf[--buflen] = '/';
2351 slash_prefixed = 1;
2352 }
2353 while (vp != rdir && vp != rootvnode) {
2354 /*
2355 * The vp vnode must be already fully constructed,
2356 * since it is either found in namecache or obtained
2357 * from VOP_VPTOCNP(). We may test for VV_ROOT safely
2358 * without obtaining the vnode lock.
2359 */
2360 if ((vp->v_vflag & VV_ROOT) != 0) {
2361 vn_lock(vp, LK_RETRY | LK_SHARED);
2362
2363 /*
2364 * With the vnode locked, check for races with
2365 * unmount, forced or not. Note that we
2366 * already verified that vp is not equal to
2367 * the root vnode, which means that
2368 * mnt_vnodecovered can be NULL only for the
2369 * case of unmount.
2370 */
2371 if ((vp->v_iflag & VI_DOOMED) != 0 ||
2372 (vp1 = vp->v_mount->mnt_vnodecovered) == NULL ||
2373 vp1->v_mountedhere != vp->v_mount) {
2374 vput(vp);
2375 error = ENOENT;
2376 SDT_PROBE3(vfs, namecache, fullpath, return,
2377 error, vp, NULL);
2378 break;
2379 }
2380
2381 vref(vp1);
2382 vput(vp);
2383 vp = vp1;
2384 continue;
2385 }
2386 if (vp->v_type != VDIR) {
2387 vrele(vp);
2388 counter_u64_add(numfullpathfail1, 1);
2389 error = ENOTDIR;
2390 SDT_PROBE3(vfs, namecache, fullpath, return,
2391 error, vp, NULL);
2392 break;
2393 }
2394 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2395 if (error)
2396 break;
2397 if (buflen == 0) {
2398 vrele(vp);
2399 error = ENOMEM;
2400 SDT_PROBE3(vfs, namecache, fullpath, return, error,
2401 startvp, NULL);
2402 break;
2403 }
2404 buf[--buflen] = '/';
2405 slash_prefixed = 1;
2406 }
2407 if (error)
2408 return (error);
2409 if (!slash_prefixed) {
2410 if (buflen == 0) {
2411 vrele(vp);
2412 counter_u64_add(numfullpathfail4, 1);
2413 SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM,
2414 startvp, NULL);
2415 return (ENOMEM);
2416 }
2417 buf[--buflen] = '/';
2418 }
2419 counter_u64_add(numfullpathfound, 1);
2420 vrele(vp);
2421
2422 SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen);
2423 *retbuf = buf + buflen;
2424 return (0);
2425 }
2426
2427 struct vnode *
vn_dir_dd_ino(struct vnode * vp)2428 vn_dir_dd_ino(struct vnode *vp)
2429 {
2430 struct namecache *ncp;
2431 struct vnode *ddvp;
2432 struct mtx *vlp;
2433
2434 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino");
2435 vlp = VP2VNODELOCK(vp);
2436 mtx_lock(vlp);
2437 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) {
2438 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0)
2439 continue;
2440 ddvp = ncp->nc_dvp;
2441 vhold(ddvp);
2442 mtx_unlock(vlp);
2443 if (vget(ddvp, LK_SHARED | LK_NOWAIT | LK_VNHELD, curthread))
2444 return (NULL);
2445 return (ddvp);
2446 }
2447 mtx_unlock(vlp);
2448 return (NULL);
2449 }
2450
2451 int
vn_commname(struct vnode * vp,char * buf,u_int buflen)2452 vn_commname(struct vnode *vp, char *buf, u_int buflen)
2453 {
2454 struct namecache *ncp;
2455 struct mtx *vlp;
2456 int l;
2457
2458 vlp = VP2VNODELOCK(vp);
2459 mtx_lock(vlp);
2460 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
2461 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2462 break;
2463 if (ncp == NULL) {
2464 mtx_unlock(vlp);
2465 return (ENOENT);
2466 }
2467 l = min(ncp->nc_nlen, buflen - 1);
2468 memcpy(buf, ncp->nc_name, l);
2469 mtx_unlock(vlp);
2470 buf[l] = '\0';
2471 return (0);
2472 }
2473
2474 /* ABI compat shims for old kernel modules. */
2475 #undef cache_enter
2476
2477 void cache_enter(struct vnode *dvp, struct vnode *vp,
2478 struct componentname *cnp);
2479
2480 void
cache_enter(struct vnode * dvp,struct vnode * vp,struct componentname * cnp)2481 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2482 {
2483
2484 cache_enter_time(dvp, vp, cnp, NULL, NULL);
2485 }
2486
2487 /*
2488 * This function updates path string to vnode's full global path
2489 * and checks the size of the new path string against the pathlen argument.
2490 *
2491 * Requires a locked, referenced vnode.
2492 * Vnode is re-locked on success or ENODEV, otherwise unlocked.
2493 *
2494 * If sysctl debug.disablefullpath is set, ENODEV is returned,
2495 * vnode is left locked and path remain untouched.
2496 *
2497 * If vp is a directory, the call to vn_fullpath_global() always succeeds
2498 * because it falls back to the ".." lookup if the namecache lookup fails.
2499 */
2500 int
vn_path_to_global_path(struct thread * td,struct vnode * vp,char * path,u_int pathlen)2501 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path,
2502 u_int pathlen)
2503 {
2504 struct nameidata nd;
2505 struct vnode *vp1;
2506 char *rpath, *fbuf;
2507 int error;
2508
2509 ASSERT_VOP_ELOCKED(vp, __func__);
2510
2511 /* Return ENODEV if sysctl debug.disablefullpath==1 */
2512 if (__predict_false(disablefullpath))
2513 return (ENODEV);
2514
2515 /* Construct global filesystem path from vp. */
2516 VOP_UNLOCK(vp, 0);
2517 error = vn_fullpath_global(td, vp, &rpath, &fbuf);
2518
2519 if (error != 0) {
2520 vrele(vp);
2521 return (error);
2522 }
2523
2524 if (strlen(rpath) >= pathlen) {
2525 vrele(vp);
2526 error = ENAMETOOLONG;
2527 goto out;
2528 }
2529
2530 /*
2531 * Re-lookup the vnode by path to detect a possible rename.
2532 * As a side effect, the vnode is relocked.
2533 * If vnode was renamed, return ENOENT.
2534 */
2535 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
2536 UIO_SYSSPACE, path, td);
2537 error = namei(&nd);
2538 if (error != 0) {
2539 vrele(vp);
2540 goto out;
2541 }
2542 NDFREE(&nd, NDF_ONLY_PNBUF);
2543 vp1 = nd.ni_vp;
2544 vrele(vp);
2545 if (vp1 == vp)
2546 strcpy(path, rpath);
2547 else {
2548 vput(vp1);
2549 error = ENOENT;
2550 }
2551
2552 out:
2553 free(fbuf, M_TEMP);
2554 return (error);
2555 }
2556
2557 #ifdef DDB
2558 static void
db_print_vpath(struct vnode * vp)2559 db_print_vpath(struct vnode *vp)
2560 {
2561
2562 while (vp != NULL) {
2563 db_printf("%p: ", vp);
2564 if (vp == rootvnode) {
2565 db_printf("/");
2566 vp = NULL;
2567 } else {
2568 if (vp->v_vflag & VV_ROOT) {
2569 db_printf("<mount point>");
2570 vp = vp->v_mount->mnt_vnodecovered;
2571 } else {
2572 struct namecache *ncp;
2573 char *ncn;
2574 int i;
2575
2576 ncp = TAILQ_FIRST(&vp->v_cache_dst);
2577 if (ncp != NULL) {
2578 ncn = ncp->nc_name;
2579 for (i = 0; i < ncp->nc_nlen; i++)
2580 db_printf("%c", *ncn++);
2581 vp = ncp->nc_dvp;
2582 } else {
2583 vp = NULL;
2584 }
2585 }
2586 }
2587 db_printf("\n");
2588 }
2589
2590 return;
2591 }
2592
DB_SHOW_COMMAND(vpath,db_show_vpath)2593 DB_SHOW_COMMAND(vpath, db_show_vpath)
2594 {
2595 struct vnode *vp;
2596
2597 if (!have_addr) {
2598 db_printf("usage: show vpath <struct vnode *>\n");
2599 return;
2600 }
2601
2602 vp = (struct vnode *)addr;
2603 db_print_vpath(vp);
2604 }
2605
2606 #endif
2607