xref: /linux-6.15/kernel/watch_queue.c (revision e7d553d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Watch queue and general notification mechanism, built on pipes
3  *
4  * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells ([email protected])
6  *
7  * See Documentation/watch_queue.rst
8  */
9 
10 #define pr_fmt(fmt) "watchq: " fmt
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/printk.h>
16 #include <linux/miscdevice.h>
17 #include <linux/fs.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/poll.h>
21 #include <linux/uaccess.h>
22 #include <linux/vmalloc.h>
23 #include <linux/file.h>
24 #include <linux/security.h>
25 #include <linux/cred.h>
26 #include <linux/sched/signal.h>
27 #include <linux/watch_queue.h>
28 #include <linux/pipe_fs_i.h>
29 
30 MODULE_DESCRIPTION("Watch queue");
31 MODULE_AUTHOR("Red Hat, Inc.");
32 MODULE_LICENSE("GPL");
33 
34 #define WATCH_QUEUE_NOTE_SIZE 128
35 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
36 
37 static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
38 					 struct pipe_buffer *buf)
39 {
40 	struct watch_queue *wqueue = (struct watch_queue *)buf->private;
41 	struct page *page;
42 	unsigned int bit;
43 
44 	/* We need to work out which note within the page this refers to, but
45 	 * the note might have been maximum size, so merely ANDing the offset
46 	 * off doesn't work.  OTOH, the note must've been more than zero size.
47 	 */
48 	bit = buf->offset + buf->len;
49 	if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0)
50 		bit -= WATCH_QUEUE_NOTE_SIZE;
51 	bit /= WATCH_QUEUE_NOTE_SIZE;
52 
53 	page = buf->page;
54 	bit += page->index;
55 
56 	set_bit(bit, wqueue->notes_bitmap);
57 }
58 
59 static int watch_queue_pipe_buf_steal(struct pipe_inode_info *pipe,
60 				      struct pipe_buffer *buf)
61 {
62 	return -1; /* No. */
63 }
64 
65 /* New data written to a pipe may be appended to a buffer with this type. */
66 static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
67 	.confirm	= generic_pipe_buf_confirm,
68 	.release	= watch_queue_pipe_buf_release,
69 	.steal		= watch_queue_pipe_buf_steal,
70 	.get		= generic_pipe_buf_get,
71 };
72 
73 /*
74  * Post a notification to a watch queue.
75  */
76 static bool post_one_notification(struct watch_queue *wqueue,
77 				  struct watch_notification *n)
78 {
79 	void *p;
80 	struct pipe_inode_info *pipe = wqueue->pipe;
81 	struct pipe_buffer *buf;
82 	struct page *page;
83 	unsigned int head, tail, mask, note, offset, len;
84 	bool done = false;
85 
86 	if (!pipe)
87 		return false;
88 
89 	spin_lock_irq(&pipe->rd_wait.lock);
90 
91 	if (wqueue->defunct)
92 		goto out;
93 
94 	mask = pipe->ring_size - 1;
95 	head = pipe->head;
96 	tail = pipe->tail;
97 	if (pipe_full(head, tail, pipe->ring_size))
98 		goto lost;
99 
100 	note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes);
101 	if (note >= wqueue->nr_notes)
102 		goto lost;
103 
104 	page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE];
105 	offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE;
106 	get_page(page);
107 	len = n->info & WATCH_INFO_LENGTH;
108 	p = kmap_atomic(page);
109 	memcpy(p + offset, n, len);
110 	kunmap_atomic(p);
111 
112 	buf = &pipe->bufs[head & mask];
113 	buf->page = page;
114 	buf->private = (unsigned long)wqueue;
115 	buf->ops = &watch_queue_pipe_buf_ops;
116 	buf->offset = offset;
117 	buf->len = len;
118 	buf->flags = PIPE_BUF_FLAG_WHOLE;
119 	pipe->head = head + 1;
120 
121 	if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
122 		spin_unlock_irq(&pipe->rd_wait.lock);
123 		BUG();
124 	}
125 	wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
126 	done = true;
127 
128 out:
129 	spin_unlock_irq(&pipe->rd_wait.lock);
130 	if (done)
131 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
132 	return done;
133 
134 lost:
135 	buf = &pipe->bufs[(head - 1) & mask];
136 	buf->flags |= PIPE_BUF_FLAG_LOSS;
137 	goto out;
138 }
139 
140 /*
141  * Apply filter rules to a notification.
142  */
143 static bool filter_watch_notification(const struct watch_filter *wf,
144 				      const struct watch_notification *n)
145 {
146 	const struct watch_type_filter *wt;
147 	unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8;
148 	unsigned int st_index = n->subtype / st_bits;
149 	unsigned int st_bit = 1U << (n->subtype % st_bits);
150 	int i;
151 
152 	if (!test_bit(n->type, wf->type_filter))
153 		return false;
154 
155 	for (i = 0; i < wf->nr_filters; i++) {
156 		wt = &wf->filters[i];
157 		if (n->type == wt->type &&
158 		    (wt->subtype_filter[st_index] & st_bit) &&
159 		    (n->info & wt->info_mask) == wt->info_filter)
160 			return true;
161 	}
162 
163 	return false; /* If there is a filter, the default is to reject. */
164 }
165 
166 /**
167  * __post_watch_notification - Post an event notification
168  * @wlist: The watch list to post the event to.
169  * @n: The notification record to post.
170  * @cred: The creds of the process that triggered the notification.
171  * @id: The ID to match on the watch.
172  *
173  * Post a notification of an event into a set of watch queues and let the users
174  * know.
175  *
176  * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and
177  * should be in units of sizeof(*n).
178  */
179 void __post_watch_notification(struct watch_list *wlist,
180 			       struct watch_notification *n,
181 			       const struct cred *cred,
182 			       u64 id)
183 {
184 	const struct watch_filter *wf;
185 	struct watch_queue *wqueue;
186 	struct watch *watch;
187 
188 	if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) {
189 		WARN_ON(1);
190 		return;
191 	}
192 
193 	rcu_read_lock();
194 
195 	hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) {
196 		if (watch->id != id)
197 			continue;
198 		n->info &= ~WATCH_INFO_ID;
199 		n->info |= watch->info_id;
200 
201 		wqueue = rcu_dereference(watch->queue);
202 		wf = rcu_dereference(wqueue->filter);
203 		if (wf && !filter_watch_notification(wf, n))
204 			continue;
205 
206 		if (security_post_notification(watch->cred, cred, n) < 0)
207 			continue;
208 
209 		post_one_notification(wqueue, n);
210 	}
211 
212 	rcu_read_unlock();
213 }
214 EXPORT_SYMBOL(__post_watch_notification);
215 
216 /*
217  * Allocate sufficient pages to preallocation for the requested number of
218  * notifications.
219  */
220 long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
221 {
222 	struct watch_queue *wqueue = pipe->watch_queue;
223 	struct page **pages;
224 	unsigned long *bitmap;
225 	unsigned long user_bufs;
226 	unsigned int bmsize;
227 	int ret, i, nr_pages;
228 
229 	if (!wqueue)
230 		return -ENODEV;
231 	if (wqueue->notes)
232 		return -EBUSY;
233 
234 	if (nr_notes < 1 ||
235 	    nr_notes > 512) /* TODO: choose a better hard limit */
236 		return -EINVAL;
237 
238 	nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1);
239 	nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE;
240 	user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages);
241 
242 	if (nr_pages > pipe->max_usage &&
243 	    (too_many_pipe_buffers_hard(user_bufs) ||
244 	     too_many_pipe_buffers_soft(user_bufs)) &&
245 	    pipe_is_unprivileged_user()) {
246 		ret = -EPERM;
247 		goto error;
248 	}
249 
250 	ret = pipe_resize_ring(pipe, nr_notes);
251 	if (ret < 0)
252 		goto error;
253 
254 	pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
255 	if (!pages)
256 		goto error;
257 
258 	for (i = 0; i < nr_pages; i++) {
259 		pages[i] = alloc_page(GFP_KERNEL);
260 		if (!pages[i])
261 			goto error_p;
262 		pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE;
263 	}
264 
265 	bmsize = (nr_notes + BITS_PER_LONG - 1) / BITS_PER_LONG;
266 	bmsize *= sizeof(unsigned long);
267 	bitmap = kmalloc(bmsize, GFP_KERNEL);
268 	if (!bitmap)
269 		goto error_p;
270 
271 	memset(bitmap, 0xff, bmsize);
272 	wqueue->notes = pages;
273 	wqueue->notes_bitmap = bitmap;
274 	wqueue->nr_pages = nr_pages;
275 	wqueue->nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
276 	return 0;
277 
278 error_p:
279 	for (i = 0; i < nr_pages; i++)
280 		__free_page(pages[i]);
281 	kfree(pages);
282 error:
283 	(void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted);
284 	return ret;
285 }
286 
287 /*
288  * Set the filter on a watch queue.
289  */
290 long watch_queue_set_filter(struct pipe_inode_info *pipe,
291 			    struct watch_notification_filter __user *_filter)
292 {
293 	struct watch_notification_type_filter *tf;
294 	struct watch_notification_filter filter;
295 	struct watch_type_filter *q;
296 	struct watch_filter *wfilter;
297 	struct watch_queue *wqueue = pipe->watch_queue;
298 	int ret, nr_filter = 0, i;
299 
300 	if (!wqueue)
301 		return -ENODEV;
302 
303 	if (!_filter) {
304 		/* Remove the old filter */
305 		wfilter = NULL;
306 		goto set;
307 	}
308 
309 	/* Grab the user's filter specification */
310 	if (copy_from_user(&filter, _filter, sizeof(filter)) != 0)
311 		return -EFAULT;
312 	if (filter.nr_filters == 0 ||
313 	    filter.nr_filters > 16 ||
314 	    filter.__reserved != 0)
315 		return -EINVAL;
316 
317 	tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
318 	if (IS_ERR(tf))
319 		return PTR_ERR(tf);
320 
321 	ret = -EINVAL;
322 	for (i = 0; i < filter.nr_filters; i++) {
323 		if ((tf[i].info_filter & ~tf[i].info_mask) ||
324 		    tf[i].info_mask & WATCH_INFO_LENGTH)
325 			goto err_filter;
326 		/* Ignore any unknown types */
327 		if (tf[i].type >= sizeof(wfilter->type_filter) * 8)
328 			continue;
329 		nr_filter++;
330 	}
331 
332 	/* Now we need to build the internal filter from only the relevant
333 	 * user-specified filters.
334 	 */
335 	ret = -ENOMEM;
336 	wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL);
337 	if (!wfilter)
338 		goto err_filter;
339 	wfilter->nr_filters = nr_filter;
340 
341 	q = wfilter->filters;
342 	for (i = 0; i < filter.nr_filters; i++) {
343 		if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG)
344 			continue;
345 
346 		q->type			= tf[i].type;
347 		q->info_filter		= tf[i].info_filter;
348 		q->info_mask		= tf[i].info_mask;
349 		q->subtype_filter[0]	= tf[i].subtype_filter[0];
350 		__set_bit(q->type, wfilter->type_filter);
351 		q++;
352 	}
353 
354 	kfree(tf);
355 set:
356 	pipe_lock(pipe);
357 	wfilter = rcu_replace_pointer(wqueue->filter, wfilter,
358 				      lockdep_is_held(&pipe->mutex));
359 	pipe_unlock(pipe);
360 	if (wfilter)
361 		kfree_rcu(wfilter, rcu);
362 	return 0;
363 
364 err_filter:
365 	kfree(tf);
366 	return ret;
367 }
368 
369 static void __put_watch_queue(struct kref *kref)
370 {
371 	struct watch_queue *wqueue =
372 		container_of(kref, struct watch_queue, usage);
373 	struct watch_filter *wfilter;
374 	int i;
375 
376 	for (i = 0; i < wqueue->nr_pages; i++)
377 		__free_page(wqueue->notes[i]);
378 
379 	wfilter = rcu_access_pointer(wqueue->filter);
380 	if (wfilter)
381 		kfree_rcu(wfilter, rcu);
382 	kfree_rcu(wqueue, rcu);
383 }
384 
385 /**
386  * put_watch_queue - Dispose of a ref on a watchqueue.
387  * @wqueue: The watch queue to unref.
388  */
389 void put_watch_queue(struct watch_queue *wqueue)
390 {
391 	kref_put(&wqueue->usage, __put_watch_queue);
392 }
393 EXPORT_SYMBOL(put_watch_queue);
394 
395 static void free_watch(struct rcu_head *rcu)
396 {
397 	struct watch *watch = container_of(rcu, struct watch, rcu);
398 
399 	put_watch_queue(rcu_access_pointer(watch->queue));
400 	put_cred(watch->cred);
401 }
402 
403 static void __put_watch(struct kref *kref)
404 {
405 	struct watch *watch = container_of(kref, struct watch, usage);
406 
407 	call_rcu(&watch->rcu, free_watch);
408 }
409 
410 /*
411  * Discard a watch.
412  */
413 static void put_watch(struct watch *watch)
414 {
415 	kref_put(&watch->usage, __put_watch);
416 }
417 
418 /**
419  * init_watch_queue - Initialise a watch
420  * @watch: The watch to initialise.
421  * @wqueue: The queue to assign.
422  *
423  * Initialise a watch and set the watch queue.
424  */
425 void init_watch(struct watch *watch, struct watch_queue *wqueue)
426 {
427 	kref_init(&watch->usage);
428 	INIT_HLIST_NODE(&watch->list_node);
429 	INIT_HLIST_NODE(&watch->queue_node);
430 	rcu_assign_pointer(watch->queue, wqueue);
431 }
432 
433 /**
434  * add_watch_to_object - Add a watch on an object to a watch list
435  * @watch: The watch to add
436  * @wlist: The watch list to add to
437  *
438  * @watch->queue must have been set to point to the queue to post notifications
439  * to and the watch list of the object to be watched.  @watch->cred must also
440  * have been set to the appropriate credentials and a ref taken on them.
441  *
442  * The caller must pin the queue and the list both and must hold the list
443  * locked against racing watch additions/removals.
444  */
445 int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
446 {
447 	struct watch_queue *wqueue = rcu_access_pointer(watch->queue);
448 	struct watch *w;
449 
450 	hlist_for_each_entry(w, &wlist->watchers, list_node) {
451 		struct watch_queue *wq = rcu_access_pointer(w->queue);
452 		if (wqueue == wq && watch->id == w->id)
453 			return -EBUSY;
454 	}
455 
456 	watch->cred = get_current_cred();
457 	rcu_assign_pointer(watch->watch_list, wlist);
458 
459 	spin_lock_bh(&wqueue->lock);
460 	kref_get(&wqueue->usage);
461 	kref_get(&watch->usage);
462 	hlist_add_head(&watch->queue_node, &wqueue->watches);
463 	spin_unlock_bh(&wqueue->lock);
464 
465 	hlist_add_head(&watch->list_node, &wlist->watchers);
466 	return 0;
467 }
468 EXPORT_SYMBOL(add_watch_to_object);
469 
470 /**
471  * remove_watch_from_object - Remove a watch or all watches from an object.
472  * @wlist: The watch list to remove from
473  * @wq: The watch queue of interest (ignored if @all is true)
474  * @id: The ID of the watch to remove (ignored if @all is true)
475  * @all: True to remove all objects
476  *
477  * Remove a specific watch or all watches from an object.  A notification is
478  * sent to the watcher to tell them that this happened.
479  */
480 int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq,
481 			     u64 id, bool all)
482 {
483 	struct watch_notification_removal n;
484 	struct watch_queue *wqueue;
485 	struct watch *watch;
486 	int ret = -EBADSLT;
487 
488 	rcu_read_lock();
489 
490 again:
491 	spin_lock(&wlist->lock);
492 	hlist_for_each_entry(watch, &wlist->watchers, list_node) {
493 		if (all ||
494 		    (watch->id == id && rcu_access_pointer(watch->queue) == wq))
495 			goto found;
496 	}
497 	spin_unlock(&wlist->lock);
498 	goto out;
499 
500 found:
501 	ret = 0;
502 	hlist_del_init_rcu(&watch->list_node);
503 	rcu_assign_pointer(watch->watch_list, NULL);
504 	spin_unlock(&wlist->lock);
505 
506 	/* We now own the reference on watch that used to belong to wlist. */
507 
508 	n.watch.type = WATCH_TYPE_META;
509 	n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION;
510 	n.watch.info = watch->info_id | watch_sizeof(n.watch);
511 	n.id = id;
512 	if (id != 0)
513 		n.watch.info = watch->info_id | watch_sizeof(n);
514 
515 	wqueue = rcu_dereference(watch->queue);
516 
517 	/* We don't need the watch list lock for the next bit as RCU is
518 	 * protecting *wqueue from deallocation.
519 	 */
520 	if (wqueue) {
521 		post_one_notification(wqueue, &n.watch);
522 
523 		spin_lock_bh(&wqueue->lock);
524 
525 		if (!hlist_unhashed(&watch->queue_node)) {
526 			hlist_del_init_rcu(&watch->queue_node);
527 			put_watch(watch);
528 		}
529 
530 		spin_unlock_bh(&wqueue->lock);
531 	}
532 
533 	if (wlist->release_watch) {
534 		void (*release_watch)(struct watch *);
535 
536 		release_watch = wlist->release_watch;
537 		rcu_read_unlock();
538 		(*release_watch)(watch);
539 		rcu_read_lock();
540 	}
541 	put_watch(watch);
542 
543 	if (all && !hlist_empty(&wlist->watchers))
544 		goto again;
545 out:
546 	rcu_read_unlock();
547 	return ret;
548 }
549 EXPORT_SYMBOL(remove_watch_from_object);
550 
551 /*
552  * Remove all the watches that are contributory to a queue.  This has the
553  * potential to race with removal of the watches by the destruction of the
554  * objects being watched or with the distribution of notifications.
555  */
556 void watch_queue_clear(struct watch_queue *wqueue)
557 {
558 	struct watch_list *wlist;
559 	struct watch *watch;
560 	bool release;
561 
562 	rcu_read_lock();
563 	spin_lock_bh(&wqueue->lock);
564 
565 	/* Prevent new additions and prevent notifications from happening */
566 	wqueue->defunct = true;
567 
568 	while (!hlist_empty(&wqueue->watches)) {
569 		watch = hlist_entry(wqueue->watches.first, struct watch, queue_node);
570 		hlist_del_init_rcu(&watch->queue_node);
571 		/* We now own a ref on the watch. */
572 		spin_unlock_bh(&wqueue->lock);
573 
574 		/* We can't do the next bit under the queue lock as we need to
575 		 * get the list lock - which would cause a deadlock if someone
576 		 * was removing from the opposite direction at the same time or
577 		 * posting a notification.
578 		 */
579 		wlist = rcu_dereference(watch->watch_list);
580 		if (wlist) {
581 			void (*release_watch)(struct watch *);
582 
583 			spin_lock(&wlist->lock);
584 
585 			release = !hlist_unhashed(&watch->list_node);
586 			if (release) {
587 				hlist_del_init_rcu(&watch->list_node);
588 				rcu_assign_pointer(watch->watch_list, NULL);
589 
590 				/* We now own a second ref on the watch. */
591 			}
592 
593 			release_watch = wlist->release_watch;
594 			spin_unlock(&wlist->lock);
595 
596 			if (release) {
597 				if (release_watch) {
598 					rcu_read_unlock();
599 					/* This might need to call dput(), so
600 					 * we have to drop all the locks.
601 					 */
602 					(*release_watch)(watch);
603 					rcu_read_lock();
604 				}
605 				put_watch(watch);
606 			}
607 		}
608 
609 		put_watch(watch);
610 		spin_lock_bh(&wqueue->lock);
611 	}
612 
613 	spin_unlock_bh(&wqueue->lock);
614 	rcu_read_unlock();
615 }
616 
617 /**
618  * get_watch_queue - Get a watch queue from its file descriptor.
619  * @fd: The fd to query.
620  */
621 struct watch_queue *get_watch_queue(int fd)
622 {
623 	struct pipe_inode_info *pipe;
624 	struct watch_queue *wqueue = ERR_PTR(-EINVAL);
625 	struct fd f;
626 
627 	f = fdget(fd);
628 	if (f.file) {
629 		pipe = get_pipe_info(f.file, false);
630 		if (pipe && pipe->watch_queue) {
631 			wqueue = pipe->watch_queue;
632 			kref_get(&wqueue->usage);
633 		}
634 		fdput(f);
635 	}
636 
637 	return wqueue;
638 }
639 EXPORT_SYMBOL(get_watch_queue);
640 
641 /*
642  * Initialise a watch queue
643  */
644 int watch_queue_init(struct pipe_inode_info *pipe)
645 {
646 	struct watch_queue *wqueue;
647 
648 	wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL);
649 	if (!wqueue)
650 		return -ENOMEM;
651 
652 	wqueue->pipe = pipe;
653 	kref_init(&wqueue->usage);
654 	spin_lock_init(&wqueue->lock);
655 	INIT_HLIST_HEAD(&wqueue->watches);
656 
657 	pipe->watch_queue = wqueue;
658 	return 0;
659 }
660