xref: /linux-6.15/fs/locks.c (revision e8a166cf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/locks.c
4  *
5  * We implement four types of file locks: BSD locks, posix locks, open
6  * file description locks, and leases.  For details about BSD locks,
7  * see the flock(2) man page; for details about the other three, see
8  * fcntl(2).
9  *
10  *
11  * Locking conflicts and dependencies:
12  * If multiple threads attempt to lock the same byte (or flock the same file)
13  * only one can be granted the lock, and other must wait their turn.
14  * The first lock has been "applied" or "granted", the others are "waiting"
15  * and are "blocked" by the "applied" lock..
16  *
17  * Waiting and applied locks are all kept in trees whose properties are:
18  *
19  *	- the root of a tree may be an applied or waiting lock.
20  *	- every other node in the tree is a waiting lock that
21  *	  conflicts with every ancestor of that node.
22  *
23  * Every such tree begins life as a waiting singleton which obviously
24  * satisfies the above properties.
25  *
26  * The only ways we modify trees preserve these properties:
27  *
28  *	1. We may add a new leaf node, but only after first verifying that it
29  *	   conflicts with all of its ancestors.
30  *	2. We may remove the root of a tree, creating a new singleton
31  *	   tree from the root and N new trees rooted in the immediate
32  *	   children.
33  *	3. If the root of a tree is not currently an applied lock, we may
34  *	   apply it (if possible).
35  *	4. We may upgrade the root of the tree (either extend its range,
36  *	   or upgrade its entire range from read to write).
37  *
38  * When an applied lock is modified in a way that reduces or downgrades any
39  * part of its range, we remove all its children (2 above).  This particularly
40  * happens when a lock is unlocked.
41  *
42  * For each of those child trees we "wake up" the thread which is
43  * waiting for the lock so it can continue handling as follows: if the
44  * root of the tree applies, we do so (3).  If it doesn't, it must
45  * conflict with some applied lock.  We remove (wake up) all of its children
46  * (2), and add it is a new leaf to the tree rooted in the applied
47  * lock (1).  We then repeat the process recursively with those
48  * children.
49  *
50  */
51 #include <linux/capability.h>
52 #include <linux/file.h>
53 #include <linux/fdtable.h>
54 #include <linux/filelock.h>
55 #include <linux/fs.h>
56 #include <linux/init.h>
57 #include <linux/security.h>
58 #include <linux/slab.h>
59 #include <linux/syscalls.h>
60 #include <linux/time.h>
61 #include <linux/rcupdate.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/hashtable.h>
64 #include <linux/percpu.h>
65 #include <linux/sysctl.h>
66 
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/filelock.h>
69 
70 #include <linux/uaccess.h>
71 
72 static struct file_lock *file_lock(struct file_lock_core *flc)
73 {
74 	return container_of(flc, struct file_lock, c);
75 }
76 
77 static bool lease_breaking(struct file_lock *fl)
78 {
79 	return fl->c.flc_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
80 }
81 
82 static int target_leasetype(struct file_lock *fl)
83 {
84 	if (fl->c.flc_flags & FL_UNLOCK_PENDING)
85 		return F_UNLCK;
86 	if (fl->c.flc_flags & FL_DOWNGRADE_PENDING)
87 		return F_RDLCK;
88 	return fl->c.flc_type;
89 }
90 
91 static int leases_enable = 1;
92 static int lease_break_time = 45;
93 
94 #ifdef CONFIG_SYSCTL
95 static struct ctl_table locks_sysctls[] = {
96 	{
97 		.procname	= "leases-enable",
98 		.data		= &leases_enable,
99 		.maxlen		= sizeof(int),
100 		.mode		= 0644,
101 		.proc_handler	= proc_dointvec,
102 	},
103 #ifdef CONFIG_MMU
104 	{
105 		.procname	= "lease-break-time",
106 		.data		= &lease_break_time,
107 		.maxlen		= sizeof(int),
108 		.mode		= 0644,
109 		.proc_handler	= proc_dointvec,
110 	},
111 #endif /* CONFIG_MMU */
112 };
113 
114 static int __init init_fs_locks_sysctls(void)
115 {
116 	register_sysctl_init("fs", locks_sysctls);
117 	return 0;
118 }
119 early_initcall(init_fs_locks_sysctls);
120 #endif /* CONFIG_SYSCTL */
121 
122 /*
123  * The global file_lock_list is only used for displaying /proc/locks, so we
124  * keep a list on each CPU, with each list protected by its own spinlock.
125  * Global serialization is done using file_rwsem.
126  *
127  * Note that alterations to the list also require that the relevant flc_lock is
128  * held.
129  */
130 struct file_lock_list_struct {
131 	spinlock_t		lock;
132 	struct hlist_head	hlist;
133 };
134 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
135 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
136 
137 
138 /*
139  * The blocked_hash is used to find POSIX lock loops for deadlock detection.
140  * It is protected by blocked_lock_lock.
141  *
142  * We hash locks by lockowner in order to optimize searching for the lock a
143  * particular lockowner is waiting on.
144  *
145  * FIXME: make this value scale via some heuristic? We generally will want more
146  * buckets when we have more lockowners holding locks, but that's a little
147  * difficult to determine without knowing what the workload will look like.
148  */
149 #define BLOCKED_HASH_BITS	7
150 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
151 
152 /*
153  * This lock protects the blocked_hash. Generally, if you're accessing it, you
154  * want to be holding this lock.
155  *
156  * In addition, it also protects the fl->fl_blocked_requests list, and the
157  * fl->fl_blocker pointer for file_lock structures that are acting as lock
158  * requests (in contrast to those that are acting as records of acquired locks).
159  *
160  * Note that when we acquire this lock in order to change the above fields,
161  * we often hold the flc_lock as well. In certain cases, when reading the fields
162  * protected by this lock, we can skip acquiring it iff we already hold the
163  * flc_lock.
164  */
165 static DEFINE_SPINLOCK(blocked_lock_lock);
166 
167 static struct kmem_cache *flctx_cache __ro_after_init;
168 static struct kmem_cache *filelock_cache __ro_after_init;
169 
170 static struct file_lock_context *
171 locks_get_lock_context(struct inode *inode, int type)
172 {
173 	struct file_lock_context *ctx;
174 
175 	/* paired with cmpxchg() below */
176 	ctx = locks_inode_context(inode);
177 	if (likely(ctx) || type == F_UNLCK)
178 		goto out;
179 
180 	ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
181 	if (!ctx)
182 		goto out;
183 
184 	spin_lock_init(&ctx->flc_lock);
185 	INIT_LIST_HEAD(&ctx->flc_flock);
186 	INIT_LIST_HEAD(&ctx->flc_posix);
187 	INIT_LIST_HEAD(&ctx->flc_lease);
188 
189 	/*
190 	 * Assign the pointer if it's not already assigned. If it is, then
191 	 * free the context we just allocated.
192 	 */
193 	if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
194 		kmem_cache_free(flctx_cache, ctx);
195 		ctx = locks_inode_context(inode);
196 	}
197 out:
198 	trace_locks_get_lock_context(inode, type, ctx);
199 	return ctx;
200 }
201 
202 static void
203 locks_dump_ctx_list(struct list_head *list, char *list_type)
204 {
205 	struct file_lock_core *flc;
206 
207 	list_for_each_entry(flc, list, flc_list)
208 		pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
209 			list_type, flc->flc_owner, flc->flc_flags,
210 			flc->flc_type, flc->flc_pid);
211 }
212 
213 static void
214 locks_check_ctx_lists(struct inode *inode)
215 {
216 	struct file_lock_context *ctx = inode->i_flctx;
217 
218 	if (unlikely(!list_empty(&ctx->flc_flock) ||
219 		     !list_empty(&ctx->flc_posix) ||
220 		     !list_empty(&ctx->flc_lease))) {
221 		pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
222 			MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
223 			inode->i_ino);
224 		locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
225 		locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
226 		locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
227 	}
228 }
229 
230 static void
231 locks_check_ctx_file_list(struct file *filp, struct list_head *list, char *list_type)
232 {
233 	struct file_lock_core *flc;
234 	struct inode *inode = file_inode(filp);
235 
236 	list_for_each_entry(flc, list, flc_list)
237 		if (flc->flc_file == filp)
238 			pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
239 				" fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
240 				list_type, MAJOR(inode->i_sb->s_dev),
241 				MINOR(inode->i_sb->s_dev), inode->i_ino,
242 				flc->flc_owner, flc->flc_flags,
243 				flc->flc_type, flc->flc_pid);
244 }
245 
246 void
247 locks_free_lock_context(struct inode *inode)
248 {
249 	struct file_lock_context *ctx = locks_inode_context(inode);
250 
251 	if (unlikely(ctx)) {
252 		locks_check_ctx_lists(inode);
253 		kmem_cache_free(flctx_cache, ctx);
254 	}
255 }
256 
257 static void locks_init_lock_heads(struct file_lock_core *flc)
258 {
259 	INIT_HLIST_NODE(&flc->flc_link);
260 	INIT_LIST_HEAD(&flc->flc_list);
261 	INIT_LIST_HEAD(&flc->flc_blocked_requests);
262 	INIT_LIST_HEAD(&flc->flc_blocked_member);
263 	init_waitqueue_head(&flc->flc_wait);
264 }
265 
266 /* Allocate an empty lock structure. */
267 struct file_lock *locks_alloc_lock(void)
268 {
269 	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
270 
271 	if (fl)
272 		locks_init_lock_heads(&fl->c);
273 
274 	return fl;
275 }
276 EXPORT_SYMBOL_GPL(locks_alloc_lock);
277 
278 void locks_release_private(struct file_lock *fl)
279 {
280 	struct file_lock_core *flc = &fl->c;
281 
282 	BUG_ON(waitqueue_active(&flc->flc_wait));
283 	BUG_ON(!list_empty(&flc->flc_list));
284 	BUG_ON(!list_empty(&flc->flc_blocked_requests));
285 	BUG_ON(!list_empty(&flc->flc_blocked_member));
286 	BUG_ON(!hlist_unhashed(&flc->flc_link));
287 
288 	if (fl->fl_ops) {
289 		if (fl->fl_ops->fl_release_private)
290 			fl->fl_ops->fl_release_private(fl);
291 		fl->fl_ops = NULL;
292 	}
293 
294 	if (fl->fl_lmops) {
295 		if (fl->fl_lmops->lm_put_owner) {
296 			fl->fl_lmops->lm_put_owner(flc->flc_owner);
297 			flc->flc_owner = NULL;
298 		}
299 		fl->fl_lmops = NULL;
300 	}
301 }
302 EXPORT_SYMBOL_GPL(locks_release_private);
303 
304 /**
305  * locks_owner_has_blockers - Check for blocking lock requests
306  * @flctx: file lock context
307  * @owner: lock owner
308  *
309  * Return values:
310  *   %true: @owner has at least one blocker
311  *   %false: @owner has no blockers
312  */
313 bool locks_owner_has_blockers(struct file_lock_context *flctx, fl_owner_t owner)
314 {
315 	struct file_lock_core *flc;
316 
317 	spin_lock(&flctx->flc_lock);
318 	list_for_each_entry(flc, &flctx->flc_posix, flc_list) {
319 		if (flc->flc_owner != owner)
320 			continue;
321 		if (!list_empty(&flc->flc_blocked_requests)) {
322 			spin_unlock(&flctx->flc_lock);
323 			return true;
324 		}
325 	}
326 	spin_unlock(&flctx->flc_lock);
327 	return false;
328 }
329 EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
330 
331 /* Free a lock which is not in use. */
332 void locks_free_lock(struct file_lock *fl)
333 {
334 	locks_release_private(fl);
335 	kmem_cache_free(filelock_cache, fl);
336 }
337 EXPORT_SYMBOL(locks_free_lock);
338 
339 static void
340 locks_dispose_list(struct list_head *dispose)
341 {
342 	struct file_lock *fl;
343 
344 	while (!list_empty(dispose)) {
345 		fl = list_first_entry(dispose, struct file_lock, c.flc_list);
346 		list_del_init(&fl->c.flc_list);
347 		locks_free_lock(fl);
348 	}
349 }
350 
351 void locks_init_lock(struct file_lock *fl)
352 {
353 	memset(fl, 0, sizeof(struct file_lock));
354 	locks_init_lock_heads(&fl->c);
355 }
356 EXPORT_SYMBOL(locks_init_lock);
357 
358 /*
359  * Initialize a new lock from an existing file_lock structure.
360  */
361 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
362 {
363 	new->c.flc_owner = fl->c.flc_owner;
364 	new->c.flc_pid = fl->c.flc_pid;
365 	new->c.flc_file = NULL;
366 	new->c.flc_flags = fl->c.flc_flags;
367 	new->c.flc_type = fl->c.flc_type;
368 	new->fl_start = fl->fl_start;
369 	new->fl_end = fl->fl_end;
370 	new->fl_lmops = fl->fl_lmops;
371 	new->fl_ops = NULL;
372 
373 	if (fl->fl_lmops) {
374 		if (fl->fl_lmops->lm_get_owner)
375 			fl->fl_lmops->lm_get_owner(fl->c.flc_owner);
376 	}
377 }
378 EXPORT_SYMBOL(locks_copy_conflock);
379 
380 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
381 {
382 	/* "new" must be a freshly-initialized lock */
383 	WARN_ON_ONCE(new->fl_ops);
384 
385 	locks_copy_conflock(new, fl);
386 
387 	new->c.flc_file = fl->c.flc_file;
388 	new->fl_ops = fl->fl_ops;
389 
390 	if (fl->fl_ops) {
391 		if (fl->fl_ops->fl_copy_lock)
392 			fl->fl_ops->fl_copy_lock(new, fl);
393 	}
394 }
395 EXPORT_SYMBOL(locks_copy_lock);
396 
397 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
398 {
399 	struct file_lock *f;
400 
401 	/*
402 	 * As ctx->flc_lock is held, new requests cannot be added to
403 	 * ->flc_blocked_requests, so we don't need a lock to check if it
404 	 * is empty.
405 	 */
406 	if (list_empty(&fl->c.flc_blocked_requests))
407 		return;
408 	spin_lock(&blocked_lock_lock);
409 	list_splice_init(&fl->c.flc_blocked_requests,
410 			 &new->c.flc_blocked_requests);
411 	list_for_each_entry(f, &new->c.flc_blocked_requests,
412 			    c.flc_blocked_member)
413 		f->c.flc_blocker = &new->c;
414 	spin_unlock(&blocked_lock_lock);
415 }
416 
417 static inline int flock_translate_cmd(int cmd) {
418 	switch (cmd) {
419 	case LOCK_SH:
420 		return F_RDLCK;
421 	case LOCK_EX:
422 		return F_WRLCK;
423 	case LOCK_UN:
424 		return F_UNLCK;
425 	}
426 	return -EINVAL;
427 }
428 
429 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
430 static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
431 {
432 	locks_init_lock(fl);
433 
434 	fl->c.flc_file = filp;
435 	fl->c.flc_owner = filp;
436 	fl->c.flc_pid = current->tgid;
437 	fl->c.flc_flags = FL_FLOCK;
438 	fl->c.flc_type = type;
439 	fl->fl_end = OFFSET_MAX;
440 }
441 
442 static int assign_type(struct file_lock *fl, int type)
443 {
444 	switch (type) {
445 	case F_RDLCK:
446 	case F_WRLCK:
447 	case F_UNLCK:
448 		fl->c.flc_type = type;
449 		break;
450 	default:
451 		return -EINVAL;
452 	}
453 	return 0;
454 }
455 
456 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
457 				 struct flock64 *l)
458 {
459 	switch (l->l_whence) {
460 	case SEEK_SET:
461 		fl->fl_start = 0;
462 		break;
463 	case SEEK_CUR:
464 		fl->fl_start = filp->f_pos;
465 		break;
466 	case SEEK_END:
467 		fl->fl_start = i_size_read(file_inode(filp));
468 		break;
469 	default:
470 		return -EINVAL;
471 	}
472 	if (l->l_start > OFFSET_MAX - fl->fl_start)
473 		return -EOVERFLOW;
474 	fl->fl_start += l->l_start;
475 	if (fl->fl_start < 0)
476 		return -EINVAL;
477 
478 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
479 	   POSIX-2001 defines it. */
480 	if (l->l_len > 0) {
481 		if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
482 			return -EOVERFLOW;
483 		fl->fl_end = fl->fl_start + (l->l_len - 1);
484 
485 	} else if (l->l_len < 0) {
486 		if (fl->fl_start + l->l_len < 0)
487 			return -EINVAL;
488 		fl->fl_end = fl->fl_start - 1;
489 		fl->fl_start += l->l_len;
490 	} else
491 		fl->fl_end = OFFSET_MAX;
492 
493 	fl->c.flc_owner = current->files;
494 	fl->c.flc_pid = current->tgid;
495 	fl->c.flc_file = filp;
496 	fl->c.flc_flags = FL_POSIX;
497 	fl->fl_ops = NULL;
498 	fl->fl_lmops = NULL;
499 
500 	return assign_type(fl, l->l_type);
501 }
502 
503 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
504  * style lock.
505  */
506 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
507 			       struct flock *l)
508 {
509 	struct flock64 ll = {
510 		.l_type = l->l_type,
511 		.l_whence = l->l_whence,
512 		.l_start = l->l_start,
513 		.l_len = l->l_len,
514 	};
515 
516 	return flock64_to_posix_lock(filp, fl, &ll);
517 }
518 
519 /* default lease lock manager operations */
520 static bool
521 lease_break_callback(struct file_lock *fl)
522 {
523 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
524 	return false;
525 }
526 
527 static void
528 lease_setup(struct file_lock *fl, void **priv)
529 {
530 	struct file *filp = fl->c.flc_file;
531 	struct fasync_struct *fa = *priv;
532 
533 	/*
534 	 * fasync_insert_entry() returns the old entry if any. If there was no
535 	 * old entry, then it used "priv" and inserted it into the fasync list.
536 	 * Clear the pointer to indicate that it shouldn't be freed.
537 	 */
538 	if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
539 		*priv = NULL;
540 
541 	__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
542 }
543 
544 static const struct lock_manager_operations lease_manager_ops = {
545 	.lm_break = lease_break_callback,
546 	.lm_change = lease_modify,
547 	.lm_setup = lease_setup,
548 };
549 
550 /*
551  * Initialize a lease, use the default lock manager operations
552  */
553 static int lease_init(struct file *filp, int type, struct file_lock *fl)
554 {
555 	if (assign_type(fl, type) != 0)
556 		return -EINVAL;
557 
558 	fl->c.flc_owner = filp;
559 	fl->c.flc_pid = current->tgid;
560 
561 	fl->c.flc_file = filp;
562 	fl->c.flc_flags = FL_LEASE;
563 	fl->fl_start = 0;
564 	fl->fl_end = OFFSET_MAX;
565 	fl->fl_ops = NULL;
566 	fl->fl_lmops = &lease_manager_ops;
567 	return 0;
568 }
569 
570 /* Allocate a file_lock initialised to this type of lease */
571 static struct file_lock *lease_alloc(struct file *filp, int type)
572 {
573 	struct file_lock *fl = locks_alloc_lock();
574 	int error = -ENOMEM;
575 
576 	if (fl == NULL)
577 		return ERR_PTR(error);
578 
579 	error = lease_init(filp, type, fl);
580 	if (error) {
581 		locks_free_lock(fl);
582 		return ERR_PTR(error);
583 	}
584 	return fl;
585 }
586 
587 /* Check if two locks overlap each other.
588  */
589 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
590 {
591 	return ((fl1->fl_end >= fl2->fl_start) &&
592 		(fl2->fl_end >= fl1->fl_start));
593 }
594 
595 /*
596  * Check whether two locks have the same owner.
597  */
598 static int posix_same_owner(struct file_lock_core *fl1, struct file_lock_core *fl2)
599 {
600 	return fl1->flc_owner == fl2->flc_owner;
601 }
602 
603 /* Must be called with the flc_lock held! */
604 static void locks_insert_global_locks(struct file_lock_core *flc)
605 {
606 	struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
607 
608 	percpu_rwsem_assert_held(&file_rwsem);
609 
610 	spin_lock(&fll->lock);
611 	flc->flc_link_cpu = smp_processor_id();
612 	hlist_add_head(&flc->flc_link, &fll->hlist);
613 	spin_unlock(&fll->lock);
614 }
615 
616 /* Must be called with the flc_lock held! */
617 static void locks_delete_global_locks(struct file_lock_core *flc)
618 {
619 	struct file_lock_list_struct *fll;
620 
621 	percpu_rwsem_assert_held(&file_rwsem);
622 
623 	/*
624 	 * Avoid taking lock if already unhashed. This is safe since this check
625 	 * is done while holding the flc_lock, and new insertions into the list
626 	 * also require that it be held.
627 	 */
628 	if (hlist_unhashed(&flc->flc_link))
629 		return;
630 
631 	fll = per_cpu_ptr(&file_lock_list, flc->flc_link_cpu);
632 	spin_lock(&fll->lock);
633 	hlist_del_init(&flc->flc_link);
634 	spin_unlock(&fll->lock);
635 }
636 
637 static unsigned long
638 posix_owner_key(struct file_lock_core *flc)
639 {
640 	return (unsigned long) flc->flc_owner;
641 }
642 
643 static void locks_insert_global_blocked(struct file_lock_core *waiter)
644 {
645 	lockdep_assert_held(&blocked_lock_lock);
646 
647 	hash_add(blocked_hash, &waiter->flc_link, posix_owner_key(waiter));
648 }
649 
650 static void locks_delete_global_blocked(struct file_lock_core *waiter)
651 {
652 	lockdep_assert_held(&blocked_lock_lock);
653 
654 	hash_del(&waiter->flc_link);
655 }
656 
657 /* Remove waiter from blocker's block list.
658  * When blocker ends up pointing to itself then the list is empty.
659  *
660  * Must be called with blocked_lock_lock held.
661  */
662 static void __locks_delete_block(struct file_lock_core *waiter)
663 {
664 	locks_delete_global_blocked(waiter);
665 	list_del_init(&waiter->flc_blocked_member);
666 }
667 
668 static void __locks_wake_up_blocks(struct file_lock_core *blocker)
669 {
670 	while (!list_empty(&blocker->flc_blocked_requests)) {
671 		struct file_lock_core *waiter;
672 		struct file_lock *fl;
673 
674 		waiter = list_first_entry(&blocker->flc_blocked_requests,
675 					  struct file_lock_core, flc_blocked_member);
676 
677 		fl = file_lock(waiter);
678 		__locks_delete_block(waiter);
679 		if ((waiter->flc_flags & (FL_POSIX | FL_FLOCK)) &&
680 		    fl->fl_lmops && fl->fl_lmops->lm_notify)
681 			fl->fl_lmops->lm_notify(fl);
682 		else
683 			locks_wake_up(fl);
684 
685 		/*
686 		 * The setting of flc_blocker to NULL marks the "done"
687 		 * point in deleting a block. Paired with acquire at the top
688 		 * of locks_delete_block().
689 		 */
690 		smp_store_release(&waiter->flc_blocker, NULL);
691 	}
692 }
693 
694 /**
695  *	locks_delete_block - stop waiting for a file lock
696  *	@waiter: the lock which was waiting
697  *
698  *	lockd/nfsd need to disconnect the lock while working on it.
699  */
700 int locks_delete_block(struct file_lock *waiter_fl)
701 {
702 	int status = -ENOENT;
703 	struct file_lock_core *waiter = &waiter_fl->c;
704 
705 	/*
706 	 * If fl_blocker is NULL, it won't be set again as this thread "owns"
707 	 * the lock and is the only one that might try to claim the lock.
708 	 *
709 	 * We use acquire/release to manage fl_blocker so that we can
710 	 * optimize away taking the blocked_lock_lock in many cases.
711 	 *
712 	 * The smp_load_acquire guarantees two things:
713 	 *
714 	 * 1/ that fl_blocked_requests can be tested locklessly. If something
715 	 * was recently added to that list it must have been in a locked region
716 	 * *before* the locked region when fl_blocker was set to NULL.
717 	 *
718 	 * 2/ that no other thread is accessing 'waiter', so it is safe to free
719 	 * it.  __locks_wake_up_blocks is careful not to touch waiter after
720 	 * fl_blocker is released.
721 	 *
722 	 * If a lockless check of fl_blocker shows it to be NULL, we know that
723 	 * no new locks can be inserted into its fl_blocked_requests list, and
724 	 * can avoid doing anything further if the list is empty.
725 	 */
726 	if (!smp_load_acquire(&waiter->flc_blocker) &&
727 	    list_empty(&waiter->flc_blocked_requests))
728 		return status;
729 
730 	spin_lock(&blocked_lock_lock);
731 	if (waiter->flc_blocker)
732 		status = 0;
733 	__locks_wake_up_blocks(waiter);
734 	__locks_delete_block(waiter);
735 
736 	/*
737 	 * The setting of fl_blocker to NULL marks the "done" point in deleting
738 	 * a block. Paired with acquire at the top of this function.
739 	 */
740 	smp_store_release(&waiter->flc_blocker, NULL);
741 	spin_unlock(&blocked_lock_lock);
742 	return status;
743 }
744 EXPORT_SYMBOL(locks_delete_block);
745 
746 /* Insert waiter into blocker's block list.
747  * We use a circular list so that processes can be easily woken up in
748  * the order they blocked. The documentation doesn't require this but
749  * it seems like the reasonable thing to do.
750  *
751  * Must be called with both the flc_lock and blocked_lock_lock held. The
752  * fl_blocked_requests list itself is protected by the blocked_lock_lock,
753  * but by ensuring that the flc_lock is also held on insertions we can avoid
754  * taking the blocked_lock_lock in some cases when we see that the
755  * fl_blocked_requests list is empty.
756  *
757  * Rather than just adding to the list, we check for conflicts with any existing
758  * waiters, and add beneath any waiter that blocks the new waiter.
759  * Thus wakeups don't happen until needed.
760  */
761 static void __locks_insert_block(struct file_lock *blocker_fl,
762 				 struct file_lock *waiter_fl,
763 				 bool conflict(struct file_lock_core *,
764 					       struct file_lock_core *))
765 {
766 	struct file_lock_core *blocker = &blocker_fl->c;
767 	struct file_lock_core *waiter = &waiter_fl->c;
768 	struct file_lock_core *flc;
769 
770 	BUG_ON(!list_empty(&waiter->flc_blocked_member));
771 new_blocker:
772 	list_for_each_entry(flc, &blocker->flc_blocked_requests, flc_blocked_member)
773 		if (conflict(flc, waiter)) {
774 			blocker =  flc;
775 			goto new_blocker;
776 		}
777 	waiter->flc_blocker = blocker;
778 	list_add_tail(&waiter->flc_blocked_member,
779 		      &blocker->flc_blocked_requests);
780 
781 	if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == (FL_POSIX|FL_OFDLCK))
782 		locks_insert_global_blocked(waiter);
783 
784 	/* The requests in waiter->flc_blocked are known to conflict with
785 	 * waiter, but might not conflict with blocker, or the requests
786 	 * and lock which block it.  So they all need to be woken.
787 	 */
788 	__locks_wake_up_blocks(waiter);
789 }
790 
791 /* Must be called with flc_lock held. */
792 static void locks_insert_block(struct file_lock *blocker,
793 			       struct file_lock *waiter,
794 			       bool conflict(struct file_lock_core *,
795 					     struct file_lock_core *))
796 {
797 	spin_lock(&blocked_lock_lock);
798 	__locks_insert_block(blocker, waiter, conflict);
799 	spin_unlock(&blocked_lock_lock);
800 }
801 
802 /*
803  * Wake up processes blocked waiting for blocker.
804  *
805  * Must be called with the inode->flc_lock held!
806  */
807 static void locks_wake_up_blocks(struct file_lock *blocker)
808 {
809 	/*
810 	 * Avoid taking global lock if list is empty. This is safe since new
811 	 * blocked requests are only added to the list under the flc_lock, and
812 	 * the flc_lock is always held here. Note that removal from the
813 	 * fl_blocked_requests list does not require the flc_lock, so we must
814 	 * recheck list_empty() after acquiring the blocked_lock_lock.
815 	 */
816 	if (list_empty(&blocker->c.flc_blocked_requests))
817 		return;
818 
819 	spin_lock(&blocked_lock_lock);
820 	__locks_wake_up_blocks(&blocker->c);
821 	spin_unlock(&blocked_lock_lock);
822 }
823 
824 static void
825 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
826 {
827 	list_add_tail(&fl->c.flc_list, before);
828 	locks_insert_global_locks(&fl->c);
829 }
830 
831 static void
832 locks_unlink_lock_ctx(struct file_lock *fl)
833 {
834 	locks_delete_global_locks(&fl->c);
835 	list_del_init(&fl->c.flc_list);
836 	locks_wake_up_blocks(fl);
837 }
838 
839 static void
840 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
841 {
842 	locks_unlink_lock_ctx(fl);
843 	if (dispose)
844 		list_add(&fl->c.flc_list, dispose);
845 	else
846 		locks_free_lock(fl);
847 }
848 
849 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
850  * checks for shared/exclusive status of overlapping locks.
851  */
852 static bool locks_conflict(struct file_lock_core *caller_flc,
853 			   struct file_lock_core *sys_flc)
854 {
855 	if (sys_flc->flc_type == F_WRLCK)
856 		return true;
857 	if (caller_flc->flc_type == F_WRLCK)
858 		return true;
859 	return false;
860 }
861 
862 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
863  * checking before calling the locks_conflict().
864  */
865 static bool posix_locks_conflict(struct file_lock_core *caller_flc,
866 				 struct file_lock_core *sys_flc)
867 {
868 	struct file_lock *caller_fl = file_lock(caller_flc);
869 	struct file_lock *sys_fl = file_lock(sys_flc);
870 
871 	/* POSIX locks owned by the same process do not conflict with
872 	 * each other.
873 	 */
874 	if (posix_same_owner(caller_flc, sys_flc))
875 		return false;
876 
877 	/* Check whether they overlap */
878 	if (!locks_overlap(caller_fl, sys_fl))
879 		return false;
880 
881 	return locks_conflict(caller_flc, sys_flc);
882 }
883 
884 /* Determine if lock sys_fl blocks lock caller_fl. Used on xx_GETLK
885  * path so checks for additional GETLK-specific things like F_UNLCK.
886  */
887 static bool posix_test_locks_conflict(struct file_lock *caller_fl,
888 				      struct file_lock *sys_fl)
889 {
890 	struct file_lock_core *caller = &caller_fl->c;
891 	struct file_lock_core *sys = &sys_fl->c;
892 
893 	/* F_UNLCK checks any locks on the same fd. */
894 	if (lock_is_unlock(caller_fl)) {
895 		if (!posix_same_owner(caller, sys))
896 			return false;
897 		return locks_overlap(caller_fl, sys_fl);
898 	}
899 	return posix_locks_conflict(caller, sys);
900 }
901 
902 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
903  * checking before calling the locks_conflict().
904  */
905 static bool flock_locks_conflict(struct file_lock_core *caller_flc,
906 				 struct file_lock_core *sys_flc)
907 {
908 	/* FLOCK locks referring to the same filp do not conflict with
909 	 * each other.
910 	 */
911 	if (caller_flc->flc_file == sys_flc->flc_file)
912 		return false;
913 
914 	return locks_conflict(caller_flc, sys_flc);
915 }
916 
917 void
918 posix_test_lock(struct file *filp, struct file_lock *fl)
919 {
920 	struct file_lock *cfl;
921 	struct file_lock_context *ctx;
922 	struct inode *inode = file_inode(filp);
923 	void *owner;
924 	void (*func)(void);
925 
926 	ctx = locks_inode_context(inode);
927 	if (!ctx || list_empty_careful(&ctx->flc_posix)) {
928 		fl->c.flc_type = F_UNLCK;
929 		return;
930 	}
931 
932 retry:
933 	spin_lock(&ctx->flc_lock);
934 	list_for_each_entry(cfl, &ctx->flc_posix, c.flc_list) {
935 		if (!posix_test_locks_conflict(fl, cfl))
936 			continue;
937 		if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
938 			&& (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
939 			owner = cfl->fl_lmops->lm_mod_owner;
940 			func = cfl->fl_lmops->lm_expire_lock;
941 			__module_get(owner);
942 			spin_unlock(&ctx->flc_lock);
943 			(*func)();
944 			module_put(owner);
945 			goto retry;
946 		}
947 		locks_copy_conflock(fl, cfl);
948 		goto out;
949 	}
950 	fl->c.flc_type = F_UNLCK;
951 out:
952 	spin_unlock(&ctx->flc_lock);
953 	return;
954 }
955 EXPORT_SYMBOL(posix_test_lock);
956 
957 /*
958  * Deadlock detection:
959  *
960  * We attempt to detect deadlocks that are due purely to posix file
961  * locks.
962  *
963  * We assume that a task can be waiting for at most one lock at a time.
964  * So for any acquired lock, the process holding that lock may be
965  * waiting on at most one other lock.  That lock in turns may be held by
966  * someone waiting for at most one other lock.  Given a requested lock
967  * caller_fl which is about to wait for a conflicting lock block_fl, we
968  * follow this chain of waiters to ensure we are not about to create a
969  * cycle.
970  *
971  * Since we do this before we ever put a process to sleep on a lock, we
972  * are ensured that there is never a cycle; that is what guarantees that
973  * the while() loop in posix_locks_deadlock() eventually completes.
974  *
975  * Note: the above assumption may not be true when handling lock
976  * requests from a broken NFS client. It may also fail in the presence
977  * of tasks (such as posix threads) sharing the same open file table.
978  * To handle those cases, we just bail out after a few iterations.
979  *
980  * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
981  * Because the owner is not even nominally tied to a thread of
982  * execution, the deadlock detection below can't reasonably work well. Just
983  * skip it for those.
984  *
985  * In principle, we could do a more limited deadlock detection on FL_OFDLCK
986  * locks that just checks for the case where two tasks are attempting to
987  * upgrade from read to write locks on the same inode.
988  */
989 
990 #define MAX_DEADLK_ITERATIONS 10
991 
992 /* Find a lock that the owner of the given @blocker is blocking on. */
993 static struct file_lock_core *what_owner_is_waiting_for(struct file_lock_core *blocker)
994 {
995 	struct file_lock_core *flc;
996 
997 	hash_for_each_possible(blocked_hash, flc, flc_link, posix_owner_key(blocker)) {
998 		if (posix_same_owner(flc, blocker)) {
999 			while (flc->flc_blocker)
1000 				flc = flc->flc_blocker;
1001 			return flc;
1002 		}
1003 	}
1004 	return NULL;
1005 }
1006 
1007 /* Must be called with the blocked_lock_lock held! */
1008 static bool posix_locks_deadlock(struct file_lock *caller_fl,
1009 				 struct file_lock *block_fl)
1010 {
1011 	struct file_lock_core *caller = &caller_fl->c;
1012 	struct file_lock_core *blocker = &block_fl->c;
1013 	int i = 0;
1014 
1015 	lockdep_assert_held(&blocked_lock_lock);
1016 
1017 	/*
1018 	 * This deadlock detector can't reasonably detect deadlocks with
1019 	 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
1020 	 */
1021 	if (caller->flc_flags & FL_OFDLCK)
1022 		return false;
1023 
1024 	while ((blocker = what_owner_is_waiting_for(blocker))) {
1025 		if (i++ > MAX_DEADLK_ITERATIONS)
1026 			return false;
1027 		if (posix_same_owner(caller, blocker))
1028 			return true;
1029 	}
1030 	return false;
1031 }
1032 
1033 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1034  * after any leases, but before any posix locks.
1035  *
1036  * Note that if called with an FL_EXISTS argument, the caller may determine
1037  * whether or not a lock was successfully freed by testing the return
1038  * value for -ENOENT.
1039  */
1040 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1041 {
1042 	struct file_lock *new_fl = NULL;
1043 	struct file_lock *fl;
1044 	struct file_lock_context *ctx;
1045 	int error = 0;
1046 	bool found = false;
1047 	LIST_HEAD(dispose);
1048 
1049 	ctx = locks_get_lock_context(inode, request->c.flc_type);
1050 	if (!ctx) {
1051 		if (request->c.flc_type != F_UNLCK)
1052 			return -ENOMEM;
1053 		return (request->c.flc_flags & FL_EXISTS) ? -ENOENT : 0;
1054 	}
1055 
1056 	if (!(request->c.flc_flags & FL_ACCESS) && (request->c.flc_type != F_UNLCK)) {
1057 		new_fl = locks_alloc_lock();
1058 		if (!new_fl)
1059 			return -ENOMEM;
1060 	}
1061 
1062 	percpu_down_read(&file_rwsem);
1063 	spin_lock(&ctx->flc_lock);
1064 	if (request->c.flc_flags & FL_ACCESS)
1065 		goto find_conflict;
1066 
1067 	list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) {
1068 		if (request->c.flc_file != fl->c.flc_file)
1069 			continue;
1070 		if (request->c.flc_type == fl->c.flc_type)
1071 			goto out;
1072 		found = true;
1073 		locks_delete_lock_ctx(fl, &dispose);
1074 		break;
1075 	}
1076 
1077 	if (lock_is_unlock(request)) {
1078 		if ((request->c.flc_flags & FL_EXISTS) && !found)
1079 			error = -ENOENT;
1080 		goto out;
1081 	}
1082 
1083 find_conflict:
1084 	list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) {
1085 		if (!flock_locks_conflict(&request->c, &fl->c))
1086 			continue;
1087 		error = -EAGAIN;
1088 		if (!(request->c.flc_flags & FL_SLEEP))
1089 			goto out;
1090 		error = FILE_LOCK_DEFERRED;
1091 		locks_insert_block(fl, request, flock_locks_conflict);
1092 		goto out;
1093 	}
1094 	if (request->c.flc_flags & FL_ACCESS)
1095 		goto out;
1096 	locks_copy_lock(new_fl, request);
1097 	locks_move_blocks(new_fl, request);
1098 	locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1099 	new_fl = NULL;
1100 	error = 0;
1101 
1102 out:
1103 	spin_unlock(&ctx->flc_lock);
1104 	percpu_up_read(&file_rwsem);
1105 	if (new_fl)
1106 		locks_free_lock(new_fl);
1107 	locks_dispose_list(&dispose);
1108 	trace_flock_lock_inode(inode, request, error);
1109 	return error;
1110 }
1111 
1112 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1113 			    struct file_lock *conflock)
1114 {
1115 	struct file_lock *fl, *tmp;
1116 	struct file_lock *new_fl = NULL;
1117 	struct file_lock *new_fl2 = NULL;
1118 	struct file_lock *left = NULL;
1119 	struct file_lock *right = NULL;
1120 	struct file_lock_context *ctx;
1121 	int error;
1122 	bool added = false;
1123 	LIST_HEAD(dispose);
1124 	void *owner;
1125 	void (*func)(void);
1126 
1127 	ctx = locks_get_lock_context(inode, request->c.flc_type);
1128 	if (!ctx)
1129 		return lock_is_unlock(request) ? 0 : -ENOMEM;
1130 
1131 	/*
1132 	 * We may need two file_lock structures for this operation,
1133 	 * so we get them in advance to avoid races.
1134 	 *
1135 	 * In some cases we can be sure, that no new locks will be needed
1136 	 */
1137 	if (!(request->c.flc_flags & FL_ACCESS) &&
1138 	    (request->c.flc_type != F_UNLCK ||
1139 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1140 		new_fl = locks_alloc_lock();
1141 		new_fl2 = locks_alloc_lock();
1142 	}
1143 
1144 retry:
1145 	percpu_down_read(&file_rwsem);
1146 	spin_lock(&ctx->flc_lock);
1147 	/*
1148 	 * New lock request. Walk all POSIX locks and look for conflicts. If
1149 	 * there are any, either return error or put the request on the
1150 	 * blocker's list of waiters and the global blocked_hash.
1151 	 */
1152 	if (request->c.flc_type != F_UNLCK) {
1153 		list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) {
1154 			if (!posix_locks_conflict(&request->c, &fl->c))
1155 				continue;
1156 			if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
1157 				&& (*fl->fl_lmops->lm_lock_expirable)(fl)) {
1158 				owner = fl->fl_lmops->lm_mod_owner;
1159 				func = fl->fl_lmops->lm_expire_lock;
1160 				__module_get(owner);
1161 				spin_unlock(&ctx->flc_lock);
1162 				percpu_up_read(&file_rwsem);
1163 				(*func)();
1164 				module_put(owner);
1165 				goto retry;
1166 			}
1167 			if (conflock)
1168 				locks_copy_conflock(conflock, fl);
1169 			error = -EAGAIN;
1170 			if (!(request->c.flc_flags & FL_SLEEP))
1171 				goto out;
1172 			/*
1173 			 * Deadlock detection and insertion into the blocked
1174 			 * locks list must be done while holding the same lock!
1175 			 */
1176 			error = -EDEADLK;
1177 			spin_lock(&blocked_lock_lock);
1178 			/*
1179 			 * Ensure that we don't find any locks blocked on this
1180 			 * request during deadlock detection.
1181 			 */
1182 			__locks_wake_up_blocks(&request->c);
1183 			if (likely(!posix_locks_deadlock(request, fl))) {
1184 				error = FILE_LOCK_DEFERRED;
1185 				__locks_insert_block(fl, request,
1186 						     posix_locks_conflict);
1187 			}
1188 			spin_unlock(&blocked_lock_lock);
1189 			goto out;
1190 		}
1191 	}
1192 
1193 	/* If we're just looking for a conflict, we're done. */
1194 	error = 0;
1195 	if (request->c.flc_flags & FL_ACCESS)
1196 		goto out;
1197 
1198 	/* Find the first old lock with the same owner as the new lock */
1199 	list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) {
1200 		if (posix_same_owner(&request->c, &fl->c))
1201 			break;
1202 	}
1203 
1204 	/* Process locks with this owner. */
1205 	list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, c.flc_list) {
1206 		if (!posix_same_owner(&request->c, &fl->c))
1207 			break;
1208 
1209 		/* Detect adjacent or overlapping regions (if same lock type) */
1210 		if (request->c.flc_type == fl->c.flc_type) {
1211 			/* In all comparisons of start vs end, use
1212 			 * "start - 1" rather than "end + 1". If end
1213 			 * is OFFSET_MAX, end + 1 will become negative.
1214 			 */
1215 			if (fl->fl_end < request->fl_start - 1)
1216 				continue;
1217 			/* If the next lock in the list has entirely bigger
1218 			 * addresses than the new one, insert the lock here.
1219 			 */
1220 			if (fl->fl_start - 1 > request->fl_end)
1221 				break;
1222 
1223 			/* If we come here, the new and old lock are of the
1224 			 * same type and adjacent or overlapping. Make one
1225 			 * lock yielding from the lower start address of both
1226 			 * locks to the higher end address.
1227 			 */
1228 			if (fl->fl_start > request->fl_start)
1229 				fl->fl_start = request->fl_start;
1230 			else
1231 				request->fl_start = fl->fl_start;
1232 			if (fl->fl_end < request->fl_end)
1233 				fl->fl_end = request->fl_end;
1234 			else
1235 				request->fl_end = fl->fl_end;
1236 			if (added) {
1237 				locks_delete_lock_ctx(fl, &dispose);
1238 				continue;
1239 			}
1240 			request = fl;
1241 			added = true;
1242 		} else {
1243 			/* Processing for different lock types is a bit
1244 			 * more complex.
1245 			 */
1246 			if (fl->fl_end < request->fl_start)
1247 				continue;
1248 			if (fl->fl_start > request->fl_end)
1249 				break;
1250 			if (lock_is_unlock(request))
1251 				added = true;
1252 			if (fl->fl_start < request->fl_start)
1253 				left = fl;
1254 			/* If the next lock in the list has a higher end
1255 			 * address than the new one, insert the new one here.
1256 			 */
1257 			if (fl->fl_end > request->fl_end) {
1258 				right = fl;
1259 				break;
1260 			}
1261 			if (fl->fl_start >= request->fl_start) {
1262 				/* The new lock completely replaces an old
1263 				 * one (This may happen several times).
1264 				 */
1265 				if (added) {
1266 					locks_delete_lock_ctx(fl, &dispose);
1267 					continue;
1268 				}
1269 				/*
1270 				 * Replace the old lock with new_fl, and
1271 				 * remove the old one. It's safe to do the
1272 				 * insert here since we know that we won't be
1273 				 * using new_fl later, and that the lock is
1274 				 * just replacing an existing lock.
1275 				 */
1276 				error = -ENOLCK;
1277 				if (!new_fl)
1278 					goto out;
1279 				locks_copy_lock(new_fl, request);
1280 				locks_move_blocks(new_fl, request);
1281 				request = new_fl;
1282 				new_fl = NULL;
1283 				locks_insert_lock_ctx(request,
1284 						      &fl->c.flc_list);
1285 				locks_delete_lock_ctx(fl, &dispose);
1286 				added = true;
1287 			}
1288 		}
1289 	}
1290 
1291 	/*
1292 	 * The above code only modifies existing locks in case of merging or
1293 	 * replacing. If new lock(s) need to be inserted all modifications are
1294 	 * done below this, so it's safe yet to bail out.
1295 	 */
1296 	error = -ENOLCK; /* "no luck" */
1297 	if (right && left == right && !new_fl2)
1298 		goto out;
1299 
1300 	error = 0;
1301 	if (!added) {
1302 		if (lock_is_unlock(request)) {
1303 			if (request->c.flc_flags & FL_EXISTS)
1304 				error = -ENOENT;
1305 			goto out;
1306 		}
1307 
1308 		if (!new_fl) {
1309 			error = -ENOLCK;
1310 			goto out;
1311 		}
1312 		locks_copy_lock(new_fl, request);
1313 		locks_move_blocks(new_fl, request);
1314 		locks_insert_lock_ctx(new_fl, &fl->c.flc_list);
1315 		fl = new_fl;
1316 		new_fl = NULL;
1317 	}
1318 	if (right) {
1319 		if (left == right) {
1320 			/* The new lock breaks the old one in two pieces,
1321 			 * so we have to use the second new lock.
1322 			 */
1323 			left = new_fl2;
1324 			new_fl2 = NULL;
1325 			locks_copy_lock(left, right);
1326 			locks_insert_lock_ctx(left, &fl->c.flc_list);
1327 		}
1328 		right->fl_start = request->fl_end + 1;
1329 		locks_wake_up_blocks(right);
1330 	}
1331 	if (left) {
1332 		left->fl_end = request->fl_start - 1;
1333 		locks_wake_up_blocks(left);
1334 	}
1335  out:
1336 	spin_unlock(&ctx->flc_lock);
1337 	percpu_up_read(&file_rwsem);
1338 	trace_posix_lock_inode(inode, request, error);
1339 	/*
1340 	 * Free any unused locks.
1341 	 */
1342 	if (new_fl)
1343 		locks_free_lock(new_fl);
1344 	if (new_fl2)
1345 		locks_free_lock(new_fl2);
1346 	locks_dispose_list(&dispose);
1347 
1348 	return error;
1349 }
1350 
1351 /**
1352  * posix_lock_file - Apply a POSIX-style lock to a file
1353  * @filp: The file to apply the lock to
1354  * @fl: The lock to be applied
1355  * @conflock: Place to return a copy of the conflicting lock, if found.
1356  *
1357  * Add a POSIX style lock to a file.
1358  * We merge adjacent & overlapping locks whenever possible.
1359  * POSIX locks are sorted by owner task, then by starting address
1360  *
1361  * Note that if called with an FL_EXISTS argument, the caller may determine
1362  * whether or not a lock was successfully freed by testing the return
1363  * value for -ENOENT.
1364  */
1365 int posix_lock_file(struct file *filp, struct file_lock *fl,
1366 			struct file_lock *conflock)
1367 {
1368 	return posix_lock_inode(file_inode(filp), fl, conflock);
1369 }
1370 EXPORT_SYMBOL(posix_lock_file);
1371 
1372 /**
1373  * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1374  * @inode: inode of file to which lock request should be applied
1375  * @fl: The lock to be applied
1376  *
1377  * Apply a POSIX style lock request to an inode.
1378  */
1379 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1380 {
1381 	int error;
1382 	might_sleep ();
1383 	for (;;) {
1384 		error = posix_lock_inode(inode, fl, NULL);
1385 		if (error != FILE_LOCK_DEFERRED)
1386 			break;
1387 		error = wait_event_interruptible(fl->c.flc_wait,
1388 						 list_empty(&fl->c.flc_blocked_member));
1389 		if (error)
1390 			break;
1391 	}
1392 	locks_delete_block(fl);
1393 	return error;
1394 }
1395 
1396 static void lease_clear_pending(struct file_lock *fl, int arg)
1397 {
1398 	switch (arg) {
1399 	case F_UNLCK:
1400 		fl->c.flc_flags &= ~FL_UNLOCK_PENDING;
1401 		fallthrough;
1402 	case F_RDLCK:
1403 		fl->c.flc_flags &= ~FL_DOWNGRADE_PENDING;
1404 	}
1405 }
1406 
1407 /* We already had a lease on this file; just change its type */
1408 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1409 {
1410 	int error = assign_type(fl, arg);
1411 
1412 	if (error)
1413 		return error;
1414 	lease_clear_pending(fl, arg);
1415 	locks_wake_up_blocks(fl);
1416 	if (arg == F_UNLCK) {
1417 		struct file *filp = fl->c.flc_file;
1418 
1419 		f_delown(filp);
1420 		filp->f_owner.signum = 0;
1421 		fasync_helper(0, fl->c.flc_file, 0, &fl->fl_fasync);
1422 		if (fl->fl_fasync != NULL) {
1423 			printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1424 			fl->fl_fasync = NULL;
1425 		}
1426 		locks_delete_lock_ctx(fl, dispose);
1427 	}
1428 	return 0;
1429 }
1430 EXPORT_SYMBOL(lease_modify);
1431 
1432 static bool past_time(unsigned long then)
1433 {
1434 	if (!then)
1435 		/* 0 is a special value meaning "this never expires": */
1436 		return false;
1437 	return time_after(jiffies, then);
1438 }
1439 
1440 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1441 {
1442 	struct file_lock_context *ctx = inode->i_flctx;
1443 	struct file_lock *fl, *tmp;
1444 
1445 	lockdep_assert_held(&ctx->flc_lock);
1446 
1447 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) {
1448 		trace_time_out_leases(inode, fl);
1449 		if (past_time(fl->fl_downgrade_time))
1450 			lease_modify(fl, F_RDLCK, dispose);
1451 		if (past_time(fl->fl_break_time))
1452 			lease_modify(fl, F_UNLCK, dispose);
1453 	}
1454 }
1455 
1456 static bool leases_conflict(struct file_lock_core *lc, struct file_lock_core *bc)
1457 {
1458 	bool rc;
1459 	struct file_lock *lease = file_lock(lc);
1460 	struct file_lock *breaker = file_lock(bc);
1461 
1462 	if (lease->fl_lmops->lm_breaker_owns_lease
1463 			&& lease->fl_lmops->lm_breaker_owns_lease(lease))
1464 		return false;
1465 	if ((bc->flc_flags & FL_LAYOUT) != (lc->flc_flags & FL_LAYOUT)) {
1466 		rc = false;
1467 		goto trace;
1468 	}
1469 	if ((bc->flc_flags & FL_DELEG) && (lc->flc_flags & FL_LEASE)) {
1470 		rc = false;
1471 		goto trace;
1472 	}
1473 
1474 	rc = locks_conflict(bc, lc);
1475 trace:
1476 	trace_leases_conflict(rc, lease, breaker);
1477 	return rc;
1478 }
1479 
1480 static bool
1481 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1482 {
1483 	struct file_lock_context *ctx = inode->i_flctx;
1484 	struct file_lock_core *flc;
1485 
1486 	lockdep_assert_held(&ctx->flc_lock);
1487 
1488 	list_for_each_entry(flc, &ctx->flc_lease, flc_list) {
1489 		if (leases_conflict(flc, &breaker->c))
1490 			return true;
1491 	}
1492 	return false;
1493 }
1494 
1495 /**
1496  *	__break_lease	-	revoke all outstanding leases on file
1497  *	@inode: the inode of the file to return
1498  *	@mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1499  *	    break all leases
1500  *	@type: FL_LEASE: break leases and delegations; FL_DELEG: break
1501  *	    only delegations
1502  *
1503  *	break_lease (inlined for speed) has checked there already is at least
1504  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1505  *	a call to open() or truncate().  This function can sleep unless you
1506  *	specified %O_NONBLOCK to your open().
1507  */
1508 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1509 {
1510 	int error = 0;
1511 	struct file_lock_context *ctx;
1512 	struct file_lock *new_fl, *fl, *tmp;
1513 	unsigned long break_time;
1514 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1515 	LIST_HEAD(dispose);
1516 
1517 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1518 	if (IS_ERR(new_fl))
1519 		return PTR_ERR(new_fl);
1520 	new_fl->c.flc_flags = type;
1521 
1522 	/* typically we will check that ctx is non-NULL before calling */
1523 	ctx = locks_inode_context(inode);
1524 	if (!ctx) {
1525 		WARN_ON_ONCE(1);
1526 		goto free_lock;
1527 	}
1528 
1529 	percpu_down_read(&file_rwsem);
1530 	spin_lock(&ctx->flc_lock);
1531 
1532 	time_out_leases(inode, &dispose);
1533 
1534 	if (!any_leases_conflict(inode, new_fl))
1535 		goto out;
1536 
1537 	break_time = 0;
1538 	if (lease_break_time > 0) {
1539 		break_time = jiffies + lease_break_time * HZ;
1540 		if (break_time == 0)
1541 			break_time++;	/* so that 0 means no break time */
1542 	}
1543 
1544 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) {
1545 		if (!leases_conflict(&fl->c, &new_fl->c))
1546 			continue;
1547 		if (want_write) {
1548 			if (fl->c.flc_flags & FL_UNLOCK_PENDING)
1549 				continue;
1550 			fl->c.flc_flags |= FL_UNLOCK_PENDING;
1551 			fl->fl_break_time = break_time;
1552 		} else {
1553 			if (lease_breaking(fl))
1554 				continue;
1555 			fl->c.flc_flags |= FL_DOWNGRADE_PENDING;
1556 			fl->fl_downgrade_time = break_time;
1557 		}
1558 		if (fl->fl_lmops->lm_break(fl))
1559 			locks_delete_lock_ctx(fl, &dispose);
1560 	}
1561 
1562 	if (list_empty(&ctx->flc_lease))
1563 		goto out;
1564 
1565 	if (mode & O_NONBLOCK) {
1566 		trace_break_lease_noblock(inode, new_fl);
1567 		error = -EWOULDBLOCK;
1568 		goto out;
1569 	}
1570 
1571 restart:
1572 	fl = list_first_entry(&ctx->flc_lease, struct file_lock, c.flc_list);
1573 	break_time = fl->fl_break_time;
1574 	if (break_time != 0)
1575 		break_time -= jiffies;
1576 	if (break_time == 0)
1577 		break_time++;
1578 	locks_insert_block(fl, new_fl, leases_conflict);
1579 	trace_break_lease_block(inode, new_fl);
1580 	spin_unlock(&ctx->flc_lock);
1581 	percpu_up_read(&file_rwsem);
1582 
1583 	locks_dispose_list(&dispose);
1584 	error = wait_event_interruptible_timeout(new_fl->c.flc_wait,
1585 						 list_empty(&new_fl->c.flc_blocked_member),
1586 						 break_time);
1587 
1588 	percpu_down_read(&file_rwsem);
1589 	spin_lock(&ctx->flc_lock);
1590 	trace_break_lease_unblock(inode, new_fl);
1591 	locks_delete_block(new_fl);
1592 	if (error >= 0) {
1593 		/*
1594 		 * Wait for the next conflicting lease that has not been
1595 		 * broken yet
1596 		 */
1597 		if (error == 0)
1598 			time_out_leases(inode, &dispose);
1599 		if (any_leases_conflict(inode, new_fl))
1600 			goto restart;
1601 		error = 0;
1602 	}
1603 out:
1604 	spin_unlock(&ctx->flc_lock);
1605 	percpu_up_read(&file_rwsem);
1606 	locks_dispose_list(&dispose);
1607 free_lock:
1608 	locks_free_lock(new_fl);
1609 	return error;
1610 }
1611 EXPORT_SYMBOL(__break_lease);
1612 
1613 /**
1614  *	lease_get_mtime - update modified time of an inode with exclusive lease
1615  *	@inode: the inode
1616  *      @time:  pointer to a timespec which contains the last modified time
1617  *
1618  * This is to force NFS clients to flush their caches for files with
1619  * exclusive leases.  The justification is that if someone has an
1620  * exclusive lease, then they could be modifying it.
1621  */
1622 void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1623 {
1624 	bool has_lease = false;
1625 	struct file_lock_context *ctx;
1626 	struct file_lock *fl;
1627 
1628 	ctx = locks_inode_context(inode);
1629 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1630 		spin_lock(&ctx->flc_lock);
1631 		fl = list_first_entry_or_null(&ctx->flc_lease,
1632 					      struct file_lock, c.flc_list);
1633 		if (fl && lock_is_write(fl))
1634 			has_lease = true;
1635 		spin_unlock(&ctx->flc_lock);
1636 	}
1637 
1638 	if (has_lease)
1639 		*time = current_time(inode);
1640 }
1641 EXPORT_SYMBOL(lease_get_mtime);
1642 
1643 /**
1644  *	fcntl_getlease - Enquire what lease is currently active
1645  *	@filp: the file
1646  *
1647  *	The value returned by this function will be one of
1648  *	(if no lease break is pending):
1649  *
1650  *	%F_RDLCK to indicate a shared lease is held.
1651  *
1652  *	%F_WRLCK to indicate an exclusive lease is held.
1653  *
1654  *	%F_UNLCK to indicate no lease is held.
1655  *
1656  *	(if a lease break is pending):
1657  *
1658  *	%F_RDLCK to indicate an exclusive lease needs to be
1659  *		changed to a shared lease (or removed).
1660  *
1661  *	%F_UNLCK to indicate the lease needs to be removed.
1662  *
1663  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1664  *	should be returned to userspace.
1665  */
1666 int fcntl_getlease(struct file *filp)
1667 {
1668 	struct file_lock *fl;
1669 	struct inode *inode = file_inode(filp);
1670 	struct file_lock_context *ctx;
1671 	int type = F_UNLCK;
1672 	LIST_HEAD(dispose);
1673 
1674 	ctx = locks_inode_context(inode);
1675 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1676 		percpu_down_read(&file_rwsem);
1677 		spin_lock(&ctx->flc_lock);
1678 		time_out_leases(inode, &dispose);
1679 		list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
1680 			if (fl->c.flc_file != filp)
1681 				continue;
1682 			type = target_leasetype(fl);
1683 			break;
1684 		}
1685 		spin_unlock(&ctx->flc_lock);
1686 		percpu_up_read(&file_rwsem);
1687 
1688 		locks_dispose_list(&dispose);
1689 	}
1690 	return type;
1691 }
1692 
1693 /**
1694  * check_conflicting_open - see if the given file points to an inode that has
1695  *			    an existing open that would conflict with the
1696  *			    desired lease.
1697  * @filp:	file to check
1698  * @arg:	type of lease that we're trying to acquire
1699  * @flags:	current lock flags
1700  *
1701  * Check to see if there's an existing open fd on this file that would
1702  * conflict with the lease we're trying to set.
1703  */
1704 static int
1705 check_conflicting_open(struct file *filp, const int arg, int flags)
1706 {
1707 	struct inode *inode = file_inode(filp);
1708 	int self_wcount = 0, self_rcount = 0;
1709 
1710 	if (flags & FL_LAYOUT)
1711 		return 0;
1712 	if (flags & FL_DELEG)
1713 		/* We leave these checks to the caller */
1714 		return 0;
1715 
1716 	if (arg == F_RDLCK)
1717 		return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1718 	else if (arg != F_WRLCK)
1719 		return 0;
1720 
1721 	/*
1722 	 * Make sure that only read/write count is from lease requestor.
1723 	 * Note that this will result in denying write leases when i_writecount
1724 	 * is negative, which is what we want.  (We shouldn't grant write leases
1725 	 * on files open for execution.)
1726 	 */
1727 	if (filp->f_mode & FMODE_WRITE)
1728 		self_wcount = 1;
1729 	else if (filp->f_mode & FMODE_READ)
1730 		self_rcount = 1;
1731 
1732 	if (atomic_read(&inode->i_writecount) != self_wcount ||
1733 	    atomic_read(&inode->i_readcount) != self_rcount)
1734 		return -EAGAIN;
1735 
1736 	return 0;
1737 }
1738 
1739 static int
1740 generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **priv)
1741 {
1742 	struct file_lock *fl, *my_fl = NULL, *lease;
1743 	struct inode *inode = file_inode(filp);
1744 	struct file_lock_context *ctx;
1745 	bool is_deleg = (*flp)->c.flc_flags & FL_DELEG;
1746 	int error;
1747 	LIST_HEAD(dispose);
1748 
1749 	lease = *flp;
1750 	trace_generic_add_lease(inode, lease);
1751 
1752 	/* Note that arg is never F_UNLCK here */
1753 	ctx = locks_get_lock_context(inode, arg);
1754 	if (!ctx)
1755 		return -ENOMEM;
1756 
1757 	/*
1758 	 * In the delegation case we need mutual exclusion with
1759 	 * a number of operations that take the i_mutex.  We trylock
1760 	 * because delegations are an optional optimization, and if
1761 	 * there's some chance of a conflict--we'd rather not
1762 	 * bother, maybe that's a sign this just isn't a good file to
1763 	 * hand out a delegation on.
1764 	 */
1765 	if (is_deleg && !inode_trylock(inode))
1766 		return -EAGAIN;
1767 
1768 	percpu_down_read(&file_rwsem);
1769 	spin_lock(&ctx->flc_lock);
1770 	time_out_leases(inode, &dispose);
1771 	error = check_conflicting_open(filp, arg, lease->c.flc_flags);
1772 	if (error)
1773 		goto out;
1774 
1775 	/*
1776 	 * At this point, we know that if there is an exclusive
1777 	 * lease on this file, then we hold it on this filp
1778 	 * (otherwise our open of this file would have blocked).
1779 	 * And if we are trying to acquire an exclusive lease,
1780 	 * then the file is not open by anyone (including us)
1781 	 * except for this filp.
1782 	 */
1783 	error = -EAGAIN;
1784 	list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
1785 		if (fl->c.flc_file == filp &&
1786 		    fl->c.flc_owner == lease->c.flc_owner) {
1787 			my_fl = fl;
1788 			continue;
1789 		}
1790 
1791 		/*
1792 		 * No exclusive leases if someone else has a lease on
1793 		 * this file:
1794 		 */
1795 		if (arg == F_WRLCK)
1796 			goto out;
1797 		/*
1798 		 * Modifying our existing lease is OK, but no getting a
1799 		 * new lease if someone else is opening for write:
1800 		 */
1801 		if (fl->c.flc_flags & FL_UNLOCK_PENDING)
1802 			goto out;
1803 	}
1804 
1805 	if (my_fl != NULL) {
1806 		lease = my_fl;
1807 		error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1808 		if (error)
1809 			goto out;
1810 		goto out_setup;
1811 	}
1812 
1813 	error = -EINVAL;
1814 	if (!leases_enable)
1815 		goto out;
1816 
1817 	locks_insert_lock_ctx(lease, &ctx->flc_lease);
1818 	/*
1819 	 * The check in break_lease() is lockless. It's possible for another
1820 	 * open to race in after we did the earlier check for a conflicting
1821 	 * open but before the lease was inserted. Check again for a
1822 	 * conflicting open and cancel the lease if there is one.
1823 	 *
1824 	 * We also add a barrier here to ensure that the insertion of the lock
1825 	 * precedes these checks.
1826 	 */
1827 	smp_mb();
1828 	error = check_conflicting_open(filp, arg, lease->c.flc_flags);
1829 	if (error) {
1830 		locks_unlink_lock_ctx(lease);
1831 		goto out;
1832 	}
1833 
1834 out_setup:
1835 	if (lease->fl_lmops->lm_setup)
1836 		lease->fl_lmops->lm_setup(lease, priv);
1837 out:
1838 	spin_unlock(&ctx->flc_lock);
1839 	percpu_up_read(&file_rwsem);
1840 	locks_dispose_list(&dispose);
1841 	if (is_deleg)
1842 		inode_unlock(inode);
1843 	if (!error && !my_fl)
1844 		*flp = NULL;
1845 	return error;
1846 }
1847 
1848 static int generic_delete_lease(struct file *filp, void *owner)
1849 {
1850 	int error = -EAGAIN;
1851 	struct file_lock *fl, *victim = NULL;
1852 	struct inode *inode = file_inode(filp);
1853 	struct file_lock_context *ctx;
1854 	LIST_HEAD(dispose);
1855 
1856 	ctx = locks_inode_context(inode);
1857 	if (!ctx) {
1858 		trace_generic_delete_lease(inode, NULL);
1859 		return error;
1860 	}
1861 
1862 	percpu_down_read(&file_rwsem);
1863 	spin_lock(&ctx->flc_lock);
1864 	list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
1865 		if (fl->c.flc_file == filp &&
1866 		    fl->c.flc_owner == owner) {
1867 			victim = fl;
1868 			break;
1869 		}
1870 	}
1871 	trace_generic_delete_lease(inode, victim);
1872 	if (victim)
1873 		error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1874 	spin_unlock(&ctx->flc_lock);
1875 	percpu_up_read(&file_rwsem);
1876 	locks_dispose_list(&dispose);
1877 	return error;
1878 }
1879 
1880 /**
1881  *	generic_setlease	-	sets a lease on an open file
1882  *	@filp:	file pointer
1883  *	@arg:	type of lease to obtain
1884  *	@flp:	input - file_lock to use, output - file_lock inserted
1885  *	@priv:	private data for lm_setup (may be NULL if lm_setup
1886  *		doesn't require it)
1887  *
1888  *	The (input) flp->fl_lmops->lm_break function is required
1889  *	by break_lease().
1890  */
1891 int generic_setlease(struct file *filp, int arg, struct file_lock **flp,
1892 			void **priv)
1893 {
1894 	struct inode *inode = file_inode(filp);
1895 	vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode);
1896 	int error;
1897 
1898 	if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
1899 		return -EACCES;
1900 	if (!S_ISREG(inode->i_mode))
1901 		return -EINVAL;
1902 	error = security_file_lock(filp, arg);
1903 	if (error)
1904 		return error;
1905 
1906 	switch (arg) {
1907 	case F_UNLCK:
1908 		return generic_delete_lease(filp, *priv);
1909 	case F_RDLCK:
1910 	case F_WRLCK:
1911 		if (!(*flp)->fl_lmops->lm_break) {
1912 			WARN_ON_ONCE(1);
1913 			return -ENOLCK;
1914 		}
1915 
1916 		return generic_add_lease(filp, arg, flp, priv);
1917 	default:
1918 		return -EINVAL;
1919 	}
1920 }
1921 EXPORT_SYMBOL(generic_setlease);
1922 
1923 /*
1924  * Kernel subsystems can register to be notified on any attempt to set
1925  * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1926  * to close files that it may have cached when there is an attempt to set a
1927  * conflicting lease.
1928  */
1929 static struct srcu_notifier_head lease_notifier_chain;
1930 
1931 static inline void
1932 lease_notifier_chain_init(void)
1933 {
1934 	srcu_init_notifier_head(&lease_notifier_chain);
1935 }
1936 
1937 static inline void
1938 setlease_notifier(int arg, struct file_lock *lease)
1939 {
1940 	if (arg != F_UNLCK)
1941 		srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1942 }
1943 
1944 int lease_register_notifier(struct notifier_block *nb)
1945 {
1946 	return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1947 }
1948 EXPORT_SYMBOL_GPL(lease_register_notifier);
1949 
1950 void lease_unregister_notifier(struct notifier_block *nb)
1951 {
1952 	srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1953 }
1954 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1955 
1956 /**
1957  * vfs_setlease        -       sets a lease on an open file
1958  * @filp:	file pointer
1959  * @arg:	type of lease to obtain
1960  * @lease:	file_lock to use when adding a lease
1961  * @priv:	private info for lm_setup when adding a lease (may be
1962  *		NULL if lm_setup doesn't require it)
1963  *
1964  * Call this to establish a lease on the file. The "lease" argument is not
1965  * used for F_UNLCK requests and may be NULL. For commands that set or alter
1966  * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1967  * set; if not, this function will return -ENOLCK (and generate a scary-looking
1968  * stack trace).
1969  *
1970  * The "priv" pointer is passed directly to the lm_setup function as-is. It
1971  * may be NULL if the lm_setup operation doesn't require it.
1972  */
1973 int
1974 vfs_setlease(struct file *filp, int arg, struct file_lock **lease, void **priv)
1975 {
1976 	if (lease)
1977 		setlease_notifier(arg, *lease);
1978 	if (filp->f_op->setlease)
1979 		return filp->f_op->setlease(filp, arg, lease, priv);
1980 	else
1981 		return generic_setlease(filp, arg, lease, priv);
1982 }
1983 EXPORT_SYMBOL_GPL(vfs_setlease);
1984 
1985 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg)
1986 {
1987 	struct file_lock *fl;
1988 	struct fasync_struct *new;
1989 	int error;
1990 
1991 	fl = lease_alloc(filp, arg);
1992 	if (IS_ERR(fl))
1993 		return PTR_ERR(fl);
1994 
1995 	new = fasync_alloc();
1996 	if (!new) {
1997 		locks_free_lock(fl);
1998 		return -ENOMEM;
1999 	}
2000 	new->fa_fd = fd;
2001 
2002 	error = vfs_setlease(filp, arg, &fl, (void **)&new);
2003 	if (fl)
2004 		locks_free_lock(fl);
2005 	if (new)
2006 		fasync_free(new);
2007 	return error;
2008 }
2009 
2010 /**
2011  *	fcntl_setlease	-	sets a lease on an open file
2012  *	@fd: open file descriptor
2013  *	@filp: file pointer
2014  *	@arg: type of lease to obtain
2015  *
2016  *	Call this fcntl to establish a lease on the file.
2017  *	Note that you also need to call %F_SETSIG to
2018  *	receive a signal when the lease is broken.
2019  */
2020 int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
2021 {
2022 	if (arg == F_UNLCK)
2023 		return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2024 	return do_fcntl_add_lease(fd, filp, arg);
2025 }
2026 
2027 /**
2028  * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2029  * @inode: inode of the file to apply to
2030  * @fl: The lock to be applied
2031  *
2032  * Apply a FLOCK style lock request to an inode.
2033  */
2034 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2035 {
2036 	int error;
2037 	might_sleep();
2038 	for (;;) {
2039 		error = flock_lock_inode(inode, fl);
2040 		if (error != FILE_LOCK_DEFERRED)
2041 			break;
2042 		error = wait_event_interruptible(fl->c.flc_wait,
2043 						 list_empty(&fl->c.flc_blocked_member));
2044 		if (error)
2045 			break;
2046 	}
2047 	locks_delete_block(fl);
2048 	return error;
2049 }
2050 
2051 /**
2052  * locks_lock_inode_wait - Apply a lock to an inode
2053  * @inode: inode of the file to apply to
2054  * @fl: The lock to be applied
2055  *
2056  * Apply a POSIX or FLOCK style lock request to an inode.
2057  */
2058 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2059 {
2060 	int res = 0;
2061 	switch (fl->c.flc_flags & (FL_POSIX|FL_FLOCK)) {
2062 		case FL_POSIX:
2063 			res = posix_lock_inode_wait(inode, fl);
2064 			break;
2065 		case FL_FLOCK:
2066 			res = flock_lock_inode_wait(inode, fl);
2067 			break;
2068 		default:
2069 			BUG();
2070 	}
2071 	return res;
2072 }
2073 EXPORT_SYMBOL(locks_lock_inode_wait);
2074 
2075 /**
2076  *	sys_flock: - flock() system call.
2077  *	@fd: the file descriptor to lock.
2078  *	@cmd: the type of lock to apply.
2079  *
2080  *	Apply a %FL_FLOCK style lock to an open file descriptor.
2081  *	The @cmd can be one of:
2082  *
2083  *	- %LOCK_SH -- a shared lock.
2084  *	- %LOCK_EX -- an exclusive lock.
2085  *	- %LOCK_UN -- remove an existing lock.
2086  *	- %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
2087  *
2088  *	%LOCK_MAND support has been removed from the kernel.
2089  */
2090 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2091 {
2092 	int can_sleep, error, type;
2093 	struct file_lock fl;
2094 	struct fd f;
2095 
2096 	/*
2097 	 * LOCK_MAND locks were broken for a long time in that they never
2098 	 * conflicted with one another and didn't prevent any sort of open,
2099 	 * read or write activity.
2100 	 *
2101 	 * Just ignore these requests now, to preserve legacy behavior, but
2102 	 * throw a warning to let people know that they don't actually work.
2103 	 */
2104 	if (cmd & LOCK_MAND) {
2105 		pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current->comm, current->pid);
2106 		return 0;
2107 	}
2108 
2109 	type = flock_translate_cmd(cmd & ~LOCK_NB);
2110 	if (type < 0)
2111 		return type;
2112 
2113 	error = -EBADF;
2114 	f = fdget(fd);
2115 	if (!f.file)
2116 		return error;
2117 
2118 	if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
2119 		goto out_putf;
2120 
2121 	flock_make_lock(f.file, &fl, type);
2122 
2123 	error = security_file_lock(f.file, fl.c.flc_type);
2124 	if (error)
2125 		goto out_putf;
2126 
2127 	can_sleep = !(cmd & LOCK_NB);
2128 	if (can_sleep)
2129 		fl.c.flc_flags |= FL_SLEEP;
2130 
2131 	if (f.file->f_op->flock)
2132 		error = f.file->f_op->flock(f.file,
2133 					    (can_sleep) ? F_SETLKW : F_SETLK,
2134 					    &fl);
2135 	else
2136 		error = locks_lock_file_wait(f.file, &fl);
2137 
2138 	locks_release_private(&fl);
2139  out_putf:
2140 	fdput(f);
2141 
2142 	return error;
2143 }
2144 
2145 /**
2146  * vfs_test_lock - test file byte range lock
2147  * @filp: The file to test lock for
2148  * @fl: The lock to test; also used to hold result
2149  *
2150  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
2151  * setting conf->fl_type to something other than F_UNLCK.
2152  */
2153 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2154 {
2155 	WARN_ON_ONCE(filp != fl->c.flc_file);
2156 	if (filp->f_op->lock)
2157 		return filp->f_op->lock(filp, F_GETLK, fl);
2158 	posix_test_lock(filp, fl);
2159 	return 0;
2160 }
2161 EXPORT_SYMBOL_GPL(vfs_test_lock);
2162 
2163 /**
2164  * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2165  * @fl: The file_lock who's fl_pid should be translated
2166  * @ns: The namespace into which the pid should be translated
2167  *
2168  * Used to translate a fl_pid into a namespace virtual pid number
2169  */
2170 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2171 {
2172 	pid_t vnr;
2173 	struct pid *pid;
2174 
2175 	if (fl->c.flc_flags & FL_OFDLCK)
2176 		return -1;
2177 
2178 	/* Remote locks report a negative pid value */
2179 	if (fl->c.flc_pid <= 0)
2180 		return fl->c.flc_pid;
2181 
2182 	/*
2183 	 * If the flock owner process is dead and its pid has been already
2184 	 * freed, the translation below won't work, but we still want to show
2185 	 * flock owner pid number in init pidns.
2186 	 */
2187 	if (ns == &init_pid_ns)
2188 		return (pid_t) fl->c.flc_pid;
2189 
2190 	rcu_read_lock();
2191 	pid = find_pid_ns(fl->c.flc_pid, &init_pid_ns);
2192 	vnr = pid_nr_ns(pid, ns);
2193 	rcu_read_unlock();
2194 	return vnr;
2195 }
2196 
2197 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2198 {
2199 	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2200 #if BITS_PER_LONG == 32
2201 	/*
2202 	 * Make sure we can represent the posix lock via
2203 	 * legacy 32bit flock.
2204 	 */
2205 	if (fl->fl_start > OFFT_OFFSET_MAX)
2206 		return -EOVERFLOW;
2207 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2208 		return -EOVERFLOW;
2209 #endif
2210 	flock->l_start = fl->fl_start;
2211 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2212 		fl->fl_end - fl->fl_start + 1;
2213 	flock->l_whence = 0;
2214 	flock->l_type = fl->c.flc_type;
2215 	return 0;
2216 }
2217 
2218 #if BITS_PER_LONG == 32
2219 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2220 {
2221 	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2222 	flock->l_start = fl->fl_start;
2223 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2224 		fl->fl_end - fl->fl_start + 1;
2225 	flock->l_whence = 0;
2226 	flock->l_type = fl->c.flc_type;
2227 }
2228 #endif
2229 
2230 /* Report the first existing lock that would conflict with l.
2231  * This implements the F_GETLK command of fcntl().
2232  */
2233 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2234 {
2235 	struct file_lock *fl;
2236 	int error;
2237 
2238 	fl = locks_alloc_lock();
2239 	if (fl == NULL)
2240 		return -ENOMEM;
2241 	error = -EINVAL;
2242 	if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
2243 			&& flock->l_type != F_WRLCK)
2244 		goto out;
2245 
2246 	error = flock_to_posix_lock(filp, fl, flock);
2247 	if (error)
2248 		goto out;
2249 
2250 	if (cmd == F_OFD_GETLK) {
2251 		error = -EINVAL;
2252 		if (flock->l_pid != 0)
2253 			goto out;
2254 
2255 		fl->c.flc_flags |= FL_OFDLCK;
2256 		fl->c.flc_owner = filp;
2257 	}
2258 
2259 	error = vfs_test_lock(filp, fl);
2260 	if (error)
2261 		goto out;
2262 
2263 	flock->l_type = fl->c.flc_type;
2264 	if (fl->c.flc_type != F_UNLCK) {
2265 		error = posix_lock_to_flock(flock, fl);
2266 		if (error)
2267 			goto out;
2268 	}
2269 out:
2270 	locks_free_lock(fl);
2271 	return error;
2272 }
2273 
2274 /**
2275  * vfs_lock_file - file byte range lock
2276  * @filp: The file to apply the lock to
2277  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2278  * @fl: The lock to be applied
2279  * @conf: Place to return a copy of the conflicting lock, if found.
2280  *
2281  * A caller that doesn't care about the conflicting lock may pass NULL
2282  * as the final argument.
2283  *
2284  * If the filesystem defines a private ->lock() method, then @conf will
2285  * be left unchanged; so a caller that cares should initialize it to
2286  * some acceptable default.
2287  *
2288  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2289  * locks, the ->lock() interface may return asynchronously, before the lock has
2290  * been granted or denied by the underlying filesystem, if (and only if)
2291  * lm_grant is set. Additionally EXPORT_OP_ASYNC_LOCK in export_operations
2292  * flags need to be set.
2293  *
2294  * Callers expecting ->lock() to return asynchronously will only use F_SETLK,
2295  * not F_SETLKW; they will set FL_SLEEP if (and only if) the request is for a
2296  * blocking lock. When ->lock() does return asynchronously, it must return
2297  * FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock request completes.
2298  * If the request is for non-blocking lock the file system should return
2299  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2300  * with the result. If the request timed out the callback routine will return a
2301  * nonzero return code and the file system should release the lock. The file
2302  * system is also responsible to keep a corresponding posix lock when it
2303  * grants a lock so the VFS can find out which locks are locally held and do
2304  * the correct lock cleanup when required.
2305  * The underlying filesystem must not drop the kernel lock or call
2306  * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2307  * return code.
2308  */
2309 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2310 {
2311 	WARN_ON_ONCE(filp != fl->c.flc_file);
2312 	if (filp->f_op->lock)
2313 		return filp->f_op->lock(filp, cmd, fl);
2314 	else
2315 		return posix_lock_file(filp, fl, conf);
2316 }
2317 EXPORT_SYMBOL_GPL(vfs_lock_file);
2318 
2319 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2320 			     struct file_lock *fl)
2321 {
2322 	int error;
2323 
2324 	error = security_file_lock(filp, fl->c.flc_type);
2325 	if (error)
2326 		return error;
2327 
2328 	for (;;) {
2329 		error = vfs_lock_file(filp, cmd, fl, NULL);
2330 		if (error != FILE_LOCK_DEFERRED)
2331 			break;
2332 		error = wait_event_interruptible(fl->c.flc_wait,
2333 						 list_empty(&fl->c.flc_blocked_member));
2334 		if (error)
2335 			break;
2336 	}
2337 	locks_delete_block(fl);
2338 
2339 	return error;
2340 }
2341 
2342 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2343 static int
2344 check_fmode_for_setlk(struct file_lock *fl)
2345 {
2346 	switch (fl->c.flc_type) {
2347 	case F_RDLCK:
2348 		if (!(fl->c.flc_file->f_mode & FMODE_READ))
2349 			return -EBADF;
2350 		break;
2351 	case F_WRLCK:
2352 		if (!(fl->c.flc_file->f_mode & FMODE_WRITE))
2353 			return -EBADF;
2354 	}
2355 	return 0;
2356 }
2357 
2358 /* Apply the lock described by l to an open file descriptor.
2359  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2360  */
2361 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2362 		struct flock *flock)
2363 {
2364 	struct file_lock *file_lock = locks_alloc_lock();
2365 	struct inode *inode = file_inode(filp);
2366 	struct file *f;
2367 	int error;
2368 
2369 	if (file_lock == NULL)
2370 		return -ENOLCK;
2371 
2372 	error = flock_to_posix_lock(filp, file_lock, flock);
2373 	if (error)
2374 		goto out;
2375 
2376 	error = check_fmode_for_setlk(file_lock);
2377 	if (error)
2378 		goto out;
2379 
2380 	/*
2381 	 * If the cmd is requesting file-private locks, then set the
2382 	 * FL_OFDLCK flag and override the owner.
2383 	 */
2384 	switch (cmd) {
2385 	case F_OFD_SETLK:
2386 		error = -EINVAL;
2387 		if (flock->l_pid != 0)
2388 			goto out;
2389 
2390 		cmd = F_SETLK;
2391 		file_lock->c.flc_flags |= FL_OFDLCK;
2392 		file_lock->c.flc_owner = filp;
2393 		break;
2394 	case F_OFD_SETLKW:
2395 		error = -EINVAL;
2396 		if (flock->l_pid != 0)
2397 			goto out;
2398 
2399 		cmd = F_SETLKW;
2400 		file_lock->c.flc_flags |= FL_OFDLCK;
2401 		file_lock->c.flc_owner = filp;
2402 		fallthrough;
2403 	case F_SETLKW:
2404 		file_lock->c.flc_flags |= FL_SLEEP;
2405 	}
2406 
2407 	error = do_lock_file_wait(filp, cmd, file_lock);
2408 
2409 	/*
2410 	 * Attempt to detect a close/fcntl race and recover by releasing the
2411 	 * lock that was just acquired. There is no need to do that when we're
2412 	 * unlocking though, or for OFD locks.
2413 	 */
2414 	if (!error && file_lock->c.flc_type != F_UNLCK &&
2415 	    !(file_lock->c.flc_flags & FL_OFDLCK)) {
2416 		struct files_struct *files = current->files;
2417 		/*
2418 		 * We need that spin_lock here - it prevents reordering between
2419 		 * update of i_flctx->flc_posix and check for it done in
2420 		 * close(). rcu_read_lock() wouldn't do.
2421 		 */
2422 		spin_lock(&files->file_lock);
2423 		f = files_lookup_fd_locked(files, fd);
2424 		spin_unlock(&files->file_lock);
2425 		if (f != filp) {
2426 			file_lock->c.flc_type = F_UNLCK;
2427 			error = do_lock_file_wait(filp, cmd, file_lock);
2428 			WARN_ON_ONCE(error);
2429 			error = -EBADF;
2430 		}
2431 	}
2432 out:
2433 	trace_fcntl_setlk(inode, file_lock, error);
2434 	locks_free_lock(file_lock);
2435 	return error;
2436 }
2437 
2438 #if BITS_PER_LONG == 32
2439 /* Report the first existing lock that would conflict with l.
2440  * This implements the F_GETLK command of fcntl().
2441  */
2442 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2443 {
2444 	struct file_lock *fl;
2445 	int error;
2446 
2447 	fl = locks_alloc_lock();
2448 	if (fl == NULL)
2449 		return -ENOMEM;
2450 
2451 	error = -EINVAL;
2452 	if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
2453 			&& flock->l_type != F_WRLCK)
2454 		goto out;
2455 
2456 	error = flock64_to_posix_lock(filp, fl, flock);
2457 	if (error)
2458 		goto out;
2459 
2460 	if (cmd == F_OFD_GETLK) {
2461 		error = -EINVAL;
2462 		if (flock->l_pid != 0)
2463 			goto out;
2464 
2465 		fl->c.flc_flags |= FL_OFDLCK;
2466 		fl->c.flc_owner = filp;
2467 	}
2468 
2469 	error = vfs_test_lock(filp, fl);
2470 	if (error)
2471 		goto out;
2472 
2473 	flock->l_type = fl->c.flc_type;
2474 	if (fl->c.flc_type != F_UNLCK)
2475 		posix_lock_to_flock64(flock, fl);
2476 
2477 out:
2478 	locks_free_lock(fl);
2479 	return error;
2480 }
2481 
2482 /* Apply the lock described by l to an open file descriptor.
2483  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2484  */
2485 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2486 		struct flock64 *flock)
2487 {
2488 	struct file_lock *file_lock = locks_alloc_lock();
2489 	struct file *f;
2490 	int error;
2491 
2492 	if (file_lock == NULL)
2493 		return -ENOLCK;
2494 
2495 	error = flock64_to_posix_lock(filp, file_lock, flock);
2496 	if (error)
2497 		goto out;
2498 
2499 	error = check_fmode_for_setlk(file_lock);
2500 	if (error)
2501 		goto out;
2502 
2503 	/*
2504 	 * If the cmd is requesting file-private locks, then set the
2505 	 * FL_OFDLCK flag and override the owner.
2506 	 */
2507 	switch (cmd) {
2508 	case F_OFD_SETLK:
2509 		error = -EINVAL;
2510 		if (flock->l_pid != 0)
2511 			goto out;
2512 
2513 		cmd = F_SETLK64;
2514 		file_lock->c.flc_flags |= FL_OFDLCK;
2515 		file_lock->c.flc_owner = filp;
2516 		break;
2517 	case F_OFD_SETLKW:
2518 		error = -EINVAL;
2519 		if (flock->l_pid != 0)
2520 			goto out;
2521 
2522 		cmd = F_SETLKW64;
2523 		file_lock->c.flc_flags |= FL_OFDLCK;
2524 		file_lock->c.flc_owner = filp;
2525 		fallthrough;
2526 	case F_SETLKW64:
2527 		file_lock->c.flc_flags |= FL_SLEEP;
2528 	}
2529 
2530 	error = do_lock_file_wait(filp, cmd, file_lock);
2531 
2532 	/*
2533 	 * Attempt to detect a close/fcntl race and recover by releasing the
2534 	 * lock that was just acquired. There is no need to do that when we're
2535 	 * unlocking though, or for OFD locks.
2536 	 */
2537 	if (!error && file_lock->c.flc_type != F_UNLCK &&
2538 	    !(file_lock->c.flc_flags & FL_OFDLCK)) {
2539 		struct files_struct *files = current->files;
2540 		/*
2541 		 * We need that spin_lock here - it prevents reordering between
2542 		 * update of i_flctx->flc_posix and check for it done in
2543 		 * close(). rcu_read_lock() wouldn't do.
2544 		 */
2545 		spin_lock(&files->file_lock);
2546 		f = files_lookup_fd_locked(files, fd);
2547 		spin_unlock(&files->file_lock);
2548 		if (f != filp) {
2549 			file_lock->c.flc_type = F_UNLCK;
2550 			error = do_lock_file_wait(filp, cmd, file_lock);
2551 			WARN_ON_ONCE(error);
2552 			error = -EBADF;
2553 		}
2554 	}
2555 out:
2556 	locks_free_lock(file_lock);
2557 	return error;
2558 }
2559 #endif /* BITS_PER_LONG == 32 */
2560 
2561 /*
2562  * This function is called when the file is being removed
2563  * from the task's fd array.  POSIX locks belonging to this task
2564  * are deleted at this time.
2565  */
2566 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2567 {
2568 	int error;
2569 	struct inode *inode = file_inode(filp);
2570 	struct file_lock lock;
2571 	struct file_lock_context *ctx;
2572 
2573 	/*
2574 	 * If there are no locks held on this file, we don't need to call
2575 	 * posix_lock_file().  Another process could be setting a lock on this
2576 	 * file at the same time, but we wouldn't remove that lock anyway.
2577 	 */
2578 	ctx = locks_inode_context(inode);
2579 	if (!ctx || list_empty(&ctx->flc_posix))
2580 		return;
2581 
2582 	locks_init_lock(&lock);
2583 	lock.c.flc_type = F_UNLCK;
2584 	lock.c.flc_flags = FL_POSIX | FL_CLOSE;
2585 	lock.fl_start = 0;
2586 	lock.fl_end = OFFSET_MAX;
2587 	lock.c.flc_owner = owner;
2588 	lock.c.flc_pid = current->tgid;
2589 	lock.c.flc_file = filp;
2590 	lock.fl_ops = NULL;
2591 	lock.fl_lmops = NULL;
2592 
2593 	error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2594 
2595 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2596 		lock.fl_ops->fl_release_private(&lock);
2597 	trace_locks_remove_posix(inode, &lock, error);
2598 }
2599 EXPORT_SYMBOL(locks_remove_posix);
2600 
2601 /* The i_flctx must be valid when calling into here */
2602 static void
2603 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2604 {
2605 	struct file_lock fl;
2606 	struct inode *inode = file_inode(filp);
2607 
2608 	if (list_empty(&flctx->flc_flock))
2609 		return;
2610 
2611 	flock_make_lock(filp, &fl, F_UNLCK);
2612 	fl.c.flc_flags |= FL_CLOSE;
2613 
2614 	if (filp->f_op->flock)
2615 		filp->f_op->flock(filp, F_SETLKW, &fl);
2616 	else
2617 		flock_lock_inode(inode, &fl);
2618 
2619 	if (fl.fl_ops && fl.fl_ops->fl_release_private)
2620 		fl.fl_ops->fl_release_private(&fl);
2621 }
2622 
2623 /* The i_flctx must be valid when calling into here */
2624 static void
2625 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2626 {
2627 	struct file_lock *fl, *tmp;
2628 	LIST_HEAD(dispose);
2629 
2630 	if (list_empty(&ctx->flc_lease))
2631 		return;
2632 
2633 	percpu_down_read(&file_rwsem);
2634 	spin_lock(&ctx->flc_lock);
2635 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list)
2636 		if (filp == fl->c.flc_file)
2637 			lease_modify(fl, F_UNLCK, &dispose);
2638 	spin_unlock(&ctx->flc_lock);
2639 	percpu_up_read(&file_rwsem);
2640 
2641 	locks_dispose_list(&dispose);
2642 }
2643 
2644 /*
2645  * This function is called on the last close of an open file.
2646  */
2647 void locks_remove_file(struct file *filp)
2648 {
2649 	struct file_lock_context *ctx;
2650 
2651 	ctx = locks_inode_context(file_inode(filp));
2652 	if (!ctx)
2653 		return;
2654 
2655 	/* remove any OFD locks */
2656 	locks_remove_posix(filp, filp);
2657 
2658 	/* remove flock locks */
2659 	locks_remove_flock(filp, ctx);
2660 
2661 	/* remove any leases */
2662 	locks_remove_lease(filp, ctx);
2663 
2664 	spin_lock(&ctx->flc_lock);
2665 	locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2666 	locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2667 	locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2668 	spin_unlock(&ctx->flc_lock);
2669 }
2670 
2671 /**
2672  * vfs_cancel_lock - file byte range unblock lock
2673  * @filp: The file to apply the unblock to
2674  * @fl: The lock to be unblocked
2675  *
2676  * Used by lock managers to cancel blocked requests
2677  */
2678 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2679 {
2680 	WARN_ON_ONCE(filp != fl->c.flc_file);
2681 	if (filp->f_op->lock)
2682 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2683 	return 0;
2684 }
2685 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2686 
2687 /**
2688  * vfs_inode_has_locks - are any file locks held on @inode?
2689  * @inode: inode to check for locks
2690  *
2691  * Return true if there are any FL_POSIX or FL_FLOCK locks currently
2692  * set on @inode.
2693  */
2694 bool vfs_inode_has_locks(struct inode *inode)
2695 {
2696 	struct file_lock_context *ctx;
2697 	bool ret;
2698 
2699 	ctx = locks_inode_context(inode);
2700 	if (!ctx)
2701 		return false;
2702 
2703 	spin_lock(&ctx->flc_lock);
2704 	ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
2705 	spin_unlock(&ctx->flc_lock);
2706 	return ret;
2707 }
2708 EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
2709 
2710 #ifdef CONFIG_PROC_FS
2711 #include <linux/proc_fs.h>
2712 #include <linux/seq_file.h>
2713 
2714 struct locks_iterator {
2715 	int	li_cpu;
2716 	loff_t	li_pos;
2717 };
2718 
2719 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2720 			    loff_t id, char *pfx, int repeat)
2721 {
2722 	struct inode *inode = NULL;
2723 	unsigned int pid;
2724 	struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2725 	int type = fl->c.flc_type;
2726 
2727 	pid = locks_translate_pid(fl, proc_pidns);
2728 	/*
2729 	 * If lock owner is dead (and pid is freed) or not visible in current
2730 	 * pidns, zero is shown as a pid value. Check lock info from
2731 	 * init_pid_ns to get saved lock pid value.
2732 	 */
2733 
2734 	if (fl->c.flc_file != NULL)
2735 		inode = file_inode(fl->c.flc_file);
2736 
2737 	seq_printf(f, "%lld: ", id);
2738 
2739 	if (repeat)
2740 		seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2741 
2742 	if (fl->c.flc_flags & FL_POSIX) {
2743 		if (fl->c.flc_flags & FL_ACCESS)
2744 			seq_puts(f, "ACCESS");
2745 		else if (fl->c.flc_flags & FL_OFDLCK)
2746 			seq_puts(f, "OFDLCK");
2747 		else
2748 			seq_puts(f, "POSIX ");
2749 
2750 		seq_printf(f, " %s ",
2751 			     (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2752 	} else if (fl->c.flc_flags & FL_FLOCK) {
2753 		seq_puts(f, "FLOCK  ADVISORY  ");
2754 	} else if (fl->c.flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) {
2755 		type = target_leasetype(fl);
2756 
2757 		if (fl->c.flc_flags & FL_DELEG)
2758 			seq_puts(f, "DELEG  ");
2759 		else
2760 			seq_puts(f, "LEASE  ");
2761 
2762 		if (lease_breaking(fl))
2763 			seq_puts(f, "BREAKING  ");
2764 		else if (fl->c.flc_file)
2765 			seq_puts(f, "ACTIVE    ");
2766 		else
2767 			seq_puts(f, "BREAKER   ");
2768 	} else {
2769 		seq_puts(f, "UNKNOWN UNKNOWN  ");
2770 	}
2771 
2772 	seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2773 			     (type == F_RDLCK) ? "READ" : "UNLCK");
2774 	if (inode) {
2775 		/* userspace relies on this representation of dev_t */
2776 		seq_printf(f, "%d %02x:%02x:%lu ", pid,
2777 				MAJOR(inode->i_sb->s_dev),
2778 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2779 	} else {
2780 		seq_printf(f, "%d <none>:0 ", pid);
2781 	}
2782 	if (fl->c.flc_flags & FL_POSIX) {
2783 		if (fl->fl_end == OFFSET_MAX)
2784 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2785 		else
2786 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2787 	} else {
2788 		seq_puts(f, "0 EOF\n");
2789 	}
2790 }
2791 
2792 static struct file_lock *get_next_blocked_member(struct file_lock *node)
2793 {
2794 	struct file_lock *tmp;
2795 
2796 	/* NULL node or root node */
2797 	if (node == NULL || node->c.flc_blocker == NULL)
2798 		return NULL;
2799 
2800 	/* Next member in the linked list could be itself */
2801 	tmp = list_next_entry(node, c.flc_blocked_member);
2802 	if (list_entry_is_head(tmp, &node->c.flc_blocker->flc_blocked_requests,
2803 			       c.flc_blocked_member)
2804 		|| tmp == node) {
2805 		return NULL;
2806 	}
2807 
2808 	return tmp;
2809 }
2810 
2811 static int locks_show(struct seq_file *f, void *v)
2812 {
2813 	struct locks_iterator *iter = f->private;
2814 	struct file_lock *cur, *tmp;
2815 	struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2816 	int level = 0;
2817 
2818 	cur = hlist_entry(v, struct file_lock, c.flc_link);
2819 
2820 	if (locks_translate_pid(cur, proc_pidns) == 0)
2821 		return 0;
2822 
2823 	/* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
2824 	 * is the left child of current node, the next silibing in flc_blocked_member is the
2825 	 * right child, we can alse get the parent of current node from fl_blocker, so this
2826 	 * question becomes traversal of a binary tree
2827 	 */
2828 	while (cur != NULL) {
2829 		if (level)
2830 			lock_get_status(f, cur, iter->li_pos, "-> ", level);
2831 		else
2832 			lock_get_status(f, cur, iter->li_pos, "", level);
2833 
2834 		if (!list_empty(&cur->c.flc_blocked_requests)) {
2835 			/* Turn left */
2836 			cur = list_first_entry_or_null(&cur->c.flc_blocked_requests,
2837 						       struct file_lock,
2838 						       c.flc_blocked_member);
2839 			level++;
2840 		} else {
2841 			/* Turn right */
2842 			tmp = get_next_blocked_member(cur);
2843 			/* Fall back to parent node */
2844 			while (tmp == NULL && cur->c.flc_blocker != NULL) {
2845 				cur = file_lock(cur->c.flc_blocker);
2846 				level--;
2847 				tmp = get_next_blocked_member(cur);
2848 			}
2849 			cur = tmp;
2850 		}
2851 	}
2852 
2853 	return 0;
2854 }
2855 
2856 static void __show_fd_locks(struct seq_file *f,
2857 			struct list_head *head, int *id,
2858 			struct file *filp, struct files_struct *files)
2859 {
2860 	struct file_lock *fl;
2861 
2862 	list_for_each_entry(fl, head, c.flc_list) {
2863 
2864 		if (filp != fl->c.flc_file)
2865 			continue;
2866 		if (fl->c.flc_owner != files &&
2867 		    fl->c.flc_owner != filp)
2868 			continue;
2869 
2870 		(*id)++;
2871 		seq_puts(f, "lock:\t");
2872 		lock_get_status(f, fl, *id, "", 0);
2873 	}
2874 }
2875 
2876 void show_fd_locks(struct seq_file *f,
2877 		  struct file *filp, struct files_struct *files)
2878 {
2879 	struct inode *inode = file_inode(filp);
2880 	struct file_lock_context *ctx;
2881 	int id = 0;
2882 
2883 	ctx = locks_inode_context(inode);
2884 	if (!ctx)
2885 		return;
2886 
2887 	spin_lock(&ctx->flc_lock);
2888 	__show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2889 	__show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2890 	__show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2891 	spin_unlock(&ctx->flc_lock);
2892 }
2893 
2894 static void *locks_start(struct seq_file *f, loff_t *pos)
2895 	__acquires(&blocked_lock_lock)
2896 {
2897 	struct locks_iterator *iter = f->private;
2898 
2899 	iter->li_pos = *pos + 1;
2900 	percpu_down_write(&file_rwsem);
2901 	spin_lock(&blocked_lock_lock);
2902 	return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2903 }
2904 
2905 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2906 {
2907 	struct locks_iterator *iter = f->private;
2908 
2909 	++iter->li_pos;
2910 	return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2911 }
2912 
2913 static void locks_stop(struct seq_file *f, void *v)
2914 	__releases(&blocked_lock_lock)
2915 {
2916 	spin_unlock(&blocked_lock_lock);
2917 	percpu_up_write(&file_rwsem);
2918 }
2919 
2920 static const struct seq_operations locks_seq_operations = {
2921 	.start	= locks_start,
2922 	.next	= locks_next,
2923 	.stop	= locks_stop,
2924 	.show	= locks_show,
2925 };
2926 
2927 static int __init proc_locks_init(void)
2928 {
2929 	proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2930 			sizeof(struct locks_iterator), NULL);
2931 	return 0;
2932 }
2933 fs_initcall(proc_locks_init);
2934 #endif
2935 
2936 static int __init filelock_init(void)
2937 {
2938 	int i;
2939 
2940 	flctx_cache = kmem_cache_create("file_lock_ctx",
2941 			sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2942 
2943 	filelock_cache = kmem_cache_create("file_lock_cache",
2944 			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2945 
2946 	for_each_possible_cpu(i) {
2947 		struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2948 
2949 		spin_lock_init(&fll->lock);
2950 		INIT_HLIST_HEAD(&fll->hlist);
2951 	}
2952 
2953 	lease_notifier_chain_init();
2954 	return 0;
2955 }
2956 core_initcall(filelock_init);
2957