1 // SPDX-License-Identifier: GPL-2.0 2 /* Watch queue and general notification mechanism, built on pipes 3 * 4 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells ([email protected]) 6 * 7 * See Documentation/core-api/watch_queue.rst 8 */ 9 10 #define pr_fmt(fmt) "watchq: " fmt 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 #include <linux/printk.h> 16 #include <linux/miscdevice.h> 17 #include <linux/fs.h> 18 #include <linux/mm.h> 19 #include <linux/pagemap.h> 20 #include <linux/poll.h> 21 #include <linux/uaccess.h> 22 #include <linux/vmalloc.h> 23 #include <linux/file.h> 24 #include <linux/security.h> 25 #include <linux/cred.h> 26 #include <linux/sched/signal.h> 27 #include <linux/watch_queue.h> 28 #include <linux/pipe_fs_i.h> 29 30 MODULE_DESCRIPTION("Watch queue"); 31 MODULE_AUTHOR("Red Hat, Inc."); 32 33 #define WATCH_QUEUE_NOTE_SIZE 128 34 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE) 35 36 /* 37 * This must be called under the RCU read-lock, which makes 38 * sure that the wqueue still exists. It can then take the lock, 39 * and check that the wqueue hasn't been destroyed, which in 40 * turn makes sure that the notification pipe still exists. 41 */ 42 static inline bool lock_wqueue(struct watch_queue *wqueue) 43 { 44 spin_lock_bh(&wqueue->lock); 45 if (unlikely(wqueue->defunct)) { 46 spin_unlock_bh(&wqueue->lock); 47 return false; 48 } 49 return true; 50 } 51 52 static inline void unlock_wqueue(struct watch_queue *wqueue) 53 { 54 spin_unlock_bh(&wqueue->lock); 55 } 56 57 static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, 58 struct pipe_buffer *buf) 59 { 60 struct watch_queue *wqueue = (struct watch_queue *)buf->private; 61 struct page *page; 62 unsigned int bit; 63 64 /* We need to work out which note within the page this refers to, but 65 * the note might have been maximum size, so merely ANDing the offset 66 * off doesn't work. OTOH, the note must've been more than zero size. 67 */ 68 bit = buf->offset + buf->len; 69 if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0) 70 bit -= WATCH_QUEUE_NOTE_SIZE; 71 bit /= WATCH_QUEUE_NOTE_SIZE; 72 73 page = buf->page; 74 bit += page->index; 75 76 set_bit(bit, wqueue->notes_bitmap); 77 generic_pipe_buf_release(pipe, buf); 78 } 79 80 // No try_steal function => no stealing 81 #define watch_queue_pipe_buf_try_steal NULL 82 83 /* New data written to a pipe may be appended to a buffer with this type. */ 84 static const struct pipe_buf_operations watch_queue_pipe_buf_ops = { 85 .release = watch_queue_pipe_buf_release, 86 .try_steal = watch_queue_pipe_buf_try_steal, 87 .get = generic_pipe_buf_get, 88 }; 89 90 /* 91 * Post a notification to a watch queue. 92 * 93 * Must be called with the RCU lock for reading, and the 94 * watch_queue lock held, which guarantees that the pipe 95 * hasn't been released. 96 */ 97 static bool post_one_notification(struct watch_queue *wqueue, 98 struct watch_notification *n) 99 { 100 void *p; 101 struct pipe_inode_info *pipe = wqueue->pipe; 102 struct pipe_buffer *buf; 103 struct page *page; 104 unsigned int head, tail, mask, note, offset, len; 105 bool done = false; 106 107 if (!pipe) 108 return false; 109 110 spin_lock_irq(&pipe->rd_wait.lock); 111 112 mask = pipe->ring_size - 1; 113 head = pipe->head; 114 tail = pipe->tail; 115 if (pipe_full(head, tail, pipe->ring_size)) 116 goto lost; 117 118 note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes); 119 if (note >= wqueue->nr_notes) 120 goto lost; 121 122 page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE]; 123 offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE; 124 get_page(page); 125 len = n->info & WATCH_INFO_LENGTH; 126 p = kmap_atomic(page); 127 memcpy(p + offset, n, len); 128 kunmap_atomic(p); 129 130 buf = &pipe->bufs[head & mask]; 131 buf->page = page; 132 buf->private = (unsigned long)wqueue; 133 buf->ops = &watch_queue_pipe_buf_ops; 134 buf->offset = offset; 135 buf->len = len; 136 buf->flags = PIPE_BUF_FLAG_WHOLE; 137 smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */ 138 139 if (!test_and_clear_bit(note, wqueue->notes_bitmap)) { 140 spin_unlock_irq(&pipe->rd_wait.lock); 141 BUG(); 142 } 143 wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); 144 done = true; 145 146 out: 147 spin_unlock_irq(&pipe->rd_wait.lock); 148 if (done) 149 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 150 return done; 151 152 lost: 153 buf = &pipe->bufs[(head - 1) & mask]; 154 buf->flags |= PIPE_BUF_FLAG_LOSS; 155 goto out; 156 } 157 158 /* 159 * Apply filter rules to a notification. 160 */ 161 static bool filter_watch_notification(const struct watch_filter *wf, 162 const struct watch_notification *n) 163 { 164 const struct watch_type_filter *wt; 165 unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8; 166 unsigned int st_index = n->subtype / st_bits; 167 unsigned int st_bit = 1U << (n->subtype % st_bits); 168 int i; 169 170 if (!test_bit(n->type, wf->type_filter)) 171 return false; 172 173 for (i = 0; i < wf->nr_filters; i++) { 174 wt = &wf->filters[i]; 175 if (n->type == wt->type && 176 (wt->subtype_filter[st_index] & st_bit) && 177 (n->info & wt->info_mask) == wt->info_filter) 178 return true; 179 } 180 181 return false; /* If there is a filter, the default is to reject. */ 182 } 183 184 /** 185 * __post_watch_notification - Post an event notification 186 * @wlist: The watch list to post the event to. 187 * @n: The notification record to post. 188 * @cred: The creds of the process that triggered the notification. 189 * @id: The ID to match on the watch. 190 * 191 * Post a notification of an event into a set of watch queues and let the users 192 * know. 193 * 194 * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and 195 * should be in units of sizeof(*n). 196 */ 197 void __post_watch_notification(struct watch_list *wlist, 198 struct watch_notification *n, 199 const struct cred *cred, 200 u64 id) 201 { 202 const struct watch_filter *wf; 203 struct watch_queue *wqueue; 204 struct watch *watch; 205 206 if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) { 207 WARN_ON(1); 208 return; 209 } 210 211 rcu_read_lock(); 212 213 hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) { 214 if (watch->id != id) 215 continue; 216 n->info &= ~WATCH_INFO_ID; 217 n->info |= watch->info_id; 218 219 wqueue = rcu_dereference(watch->queue); 220 wf = rcu_dereference(wqueue->filter); 221 if (wf && !filter_watch_notification(wf, n)) 222 continue; 223 224 if (security_post_notification(watch->cred, cred, n) < 0) 225 continue; 226 227 if (lock_wqueue(wqueue)) { 228 post_one_notification(wqueue, n); 229 unlock_wqueue(wqueue); 230 } 231 } 232 233 rcu_read_unlock(); 234 } 235 EXPORT_SYMBOL(__post_watch_notification); 236 237 /* 238 * Allocate sufficient pages to preallocation for the requested number of 239 * notifications. 240 */ 241 long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) 242 { 243 struct watch_queue *wqueue = pipe->watch_queue; 244 struct page **pages; 245 unsigned long *bitmap; 246 unsigned long user_bufs; 247 int ret, i, nr_pages; 248 249 if (!wqueue) 250 return -ENODEV; 251 if (wqueue->notes) 252 return -EBUSY; 253 254 if (nr_notes < 1 || 255 nr_notes > 512) /* TODO: choose a better hard limit */ 256 return -EINVAL; 257 258 nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1); 259 nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE; 260 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages); 261 262 if (nr_pages > pipe->max_usage && 263 (too_many_pipe_buffers_hard(user_bufs) || 264 too_many_pipe_buffers_soft(user_bufs)) && 265 pipe_is_unprivileged_user()) { 266 ret = -EPERM; 267 goto error; 268 } 269 270 nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; 271 ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes)); 272 if (ret < 0) 273 goto error; 274 275 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); 276 if (!pages) 277 goto error; 278 279 for (i = 0; i < nr_pages; i++) { 280 pages[i] = alloc_page(GFP_KERNEL); 281 if (!pages[i]) 282 goto error_p; 283 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE; 284 } 285 286 bitmap = bitmap_alloc(nr_notes, GFP_KERNEL); 287 if (!bitmap) 288 goto error_p; 289 290 bitmap_fill(bitmap, nr_notes); 291 wqueue->notes = pages; 292 wqueue->notes_bitmap = bitmap; 293 wqueue->nr_pages = nr_pages; 294 wqueue->nr_notes = nr_notes; 295 return 0; 296 297 error_p: 298 while (--i >= 0) 299 __free_page(pages[i]); 300 kfree(pages); 301 error: 302 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted); 303 return ret; 304 } 305 306 /* 307 * Set the filter on a watch queue. 308 */ 309 long watch_queue_set_filter(struct pipe_inode_info *pipe, 310 struct watch_notification_filter __user *_filter) 311 { 312 struct watch_notification_type_filter *tf; 313 struct watch_notification_filter filter; 314 struct watch_type_filter *q; 315 struct watch_filter *wfilter; 316 struct watch_queue *wqueue = pipe->watch_queue; 317 int ret, nr_filter = 0, i; 318 319 if (!wqueue) 320 return -ENODEV; 321 322 if (!_filter) { 323 /* Remove the old filter */ 324 wfilter = NULL; 325 goto set; 326 } 327 328 /* Grab the user's filter specification */ 329 if (copy_from_user(&filter, _filter, sizeof(filter)) != 0) 330 return -EFAULT; 331 if (filter.nr_filters == 0 || 332 filter.nr_filters > 16 || 333 filter.__reserved != 0) 334 return -EINVAL; 335 336 tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf)); 337 if (IS_ERR(tf)) 338 return PTR_ERR(tf); 339 340 ret = -EINVAL; 341 for (i = 0; i < filter.nr_filters; i++) { 342 if ((tf[i].info_filter & ~tf[i].info_mask) || 343 tf[i].info_mask & WATCH_INFO_LENGTH) 344 goto err_filter; 345 /* Ignore any unknown types */ 346 if (tf[i].type >= WATCH_TYPE__NR) 347 continue; 348 nr_filter++; 349 } 350 351 /* Now we need to build the internal filter from only the relevant 352 * user-specified filters. 353 */ 354 ret = -ENOMEM; 355 wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL); 356 if (!wfilter) 357 goto err_filter; 358 wfilter->nr_filters = nr_filter; 359 360 q = wfilter->filters; 361 for (i = 0; i < filter.nr_filters; i++) { 362 if (tf[i].type >= WATCH_TYPE__NR) 363 continue; 364 365 q->type = tf[i].type; 366 q->info_filter = tf[i].info_filter; 367 q->info_mask = tf[i].info_mask; 368 q->subtype_filter[0] = tf[i].subtype_filter[0]; 369 __set_bit(q->type, wfilter->type_filter); 370 q++; 371 } 372 373 kfree(tf); 374 set: 375 pipe_lock(pipe); 376 wfilter = rcu_replace_pointer(wqueue->filter, wfilter, 377 lockdep_is_held(&pipe->mutex)); 378 pipe_unlock(pipe); 379 if (wfilter) 380 kfree_rcu(wfilter, rcu); 381 return 0; 382 383 err_filter: 384 kfree(tf); 385 return ret; 386 } 387 388 static void __put_watch_queue(struct kref *kref) 389 { 390 struct watch_queue *wqueue = 391 container_of(kref, struct watch_queue, usage); 392 struct watch_filter *wfilter; 393 int i; 394 395 for (i = 0; i < wqueue->nr_pages; i++) 396 __free_page(wqueue->notes[i]); 397 kfree(wqueue->notes); 398 bitmap_free(wqueue->notes_bitmap); 399 400 wfilter = rcu_access_pointer(wqueue->filter); 401 if (wfilter) 402 kfree_rcu(wfilter, rcu); 403 kfree_rcu(wqueue, rcu); 404 } 405 406 /** 407 * put_watch_queue - Dispose of a ref on a watchqueue. 408 * @wqueue: The watch queue to unref. 409 */ 410 void put_watch_queue(struct watch_queue *wqueue) 411 { 412 kref_put(&wqueue->usage, __put_watch_queue); 413 } 414 EXPORT_SYMBOL(put_watch_queue); 415 416 static void free_watch(struct rcu_head *rcu) 417 { 418 struct watch *watch = container_of(rcu, struct watch, rcu); 419 420 put_watch_queue(rcu_access_pointer(watch->queue)); 421 atomic_dec(&watch->cred->user->nr_watches); 422 put_cred(watch->cred); 423 kfree(watch); 424 } 425 426 static void __put_watch(struct kref *kref) 427 { 428 struct watch *watch = container_of(kref, struct watch, usage); 429 430 call_rcu(&watch->rcu, free_watch); 431 } 432 433 /* 434 * Discard a watch. 435 */ 436 static void put_watch(struct watch *watch) 437 { 438 kref_put(&watch->usage, __put_watch); 439 } 440 441 /** 442 * init_watch - Initialise a watch 443 * @watch: The watch to initialise. 444 * @wqueue: The queue to assign. 445 * 446 * Initialise a watch and set the watch queue. 447 */ 448 void init_watch(struct watch *watch, struct watch_queue *wqueue) 449 { 450 kref_init(&watch->usage); 451 INIT_HLIST_NODE(&watch->list_node); 452 INIT_HLIST_NODE(&watch->queue_node); 453 rcu_assign_pointer(watch->queue, wqueue); 454 } 455 456 static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue) 457 { 458 const struct cred *cred; 459 struct watch *w; 460 461 hlist_for_each_entry(w, &wlist->watchers, list_node) { 462 struct watch_queue *wq = rcu_access_pointer(w->queue); 463 if (wqueue == wq && watch->id == w->id) 464 return -EBUSY; 465 } 466 467 cred = current_cred(); 468 if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) { 469 atomic_dec(&cred->user->nr_watches); 470 return -EAGAIN; 471 } 472 473 watch->cred = get_cred(cred); 474 rcu_assign_pointer(watch->watch_list, wlist); 475 476 kref_get(&wqueue->usage); 477 kref_get(&watch->usage); 478 hlist_add_head(&watch->queue_node, &wqueue->watches); 479 hlist_add_head_rcu(&watch->list_node, &wlist->watchers); 480 return 0; 481 } 482 483 /** 484 * add_watch_to_object - Add a watch on an object to a watch list 485 * @watch: The watch to add 486 * @wlist: The watch list to add to 487 * 488 * @watch->queue must have been set to point to the queue to post notifications 489 * to and the watch list of the object to be watched. @watch->cred must also 490 * have been set to the appropriate credentials and a ref taken on them. 491 * 492 * The caller must pin the queue and the list both and must hold the list 493 * locked against racing watch additions/removals. 494 */ 495 int add_watch_to_object(struct watch *watch, struct watch_list *wlist) 496 { 497 struct watch_queue *wqueue; 498 int ret = -ENOENT; 499 500 rcu_read_lock(); 501 502 wqueue = rcu_access_pointer(watch->queue); 503 if (lock_wqueue(wqueue)) { 504 spin_lock(&wlist->lock); 505 ret = add_one_watch(watch, wlist, wqueue); 506 spin_unlock(&wlist->lock); 507 unlock_wqueue(wqueue); 508 } 509 510 rcu_read_unlock(); 511 return ret; 512 } 513 EXPORT_SYMBOL(add_watch_to_object); 514 515 /** 516 * remove_watch_from_object - Remove a watch or all watches from an object. 517 * @wlist: The watch list to remove from 518 * @wq: The watch queue of interest (ignored if @all is true) 519 * @id: The ID of the watch to remove (ignored if @all is true) 520 * @all: True to remove all objects 521 * 522 * Remove a specific watch or all watches from an object. A notification is 523 * sent to the watcher to tell them that this happened. 524 */ 525 int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq, 526 u64 id, bool all) 527 { 528 struct watch_notification_removal n; 529 struct watch_queue *wqueue; 530 struct watch *watch; 531 int ret = -EBADSLT; 532 533 rcu_read_lock(); 534 535 again: 536 spin_lock(&wlist->lock); 537 hlist_for_each_entry(watch, &wlist->watchers, list_node) { 538 if (all || 539 (watch->id == id && rcu_access_pointer(watch->queue) == wq)) 540 goto found; 541 } 542 spin_unlock(&wlist->lock); 543 goto out; 544 545 found: 546 ret = 0; 547 hlist_del_init_rcu(&watch->list_node); 548 rcu_assign_pointer(watch->watch_list, NULL); 549 spin_unlock(&wlist->lock); 550 551 /* We now own the reference on watch that used to belong to wlist. */ 552 553 n.watch.type = WATCH_TYPE_META; 554 n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION; 555 n.watch.info = watch->info_id | watch_sizeof(n.watch); 556 n.id = id; 557 if (id != 0) 558 n.watch.info = watch->info_id | watch_sizeof(n); 559 560 wqueue = rcu_dereference(watch->queue); 561 562 if (lock_wqueue(wqueue)) { 563 post_one_notification(wqueue, &n.watch); 564 565 if (!hlist_unhashed(&watch->queue_node)) { 566 hlist_del_init_rcu(&watch->queue_node); 567 put_watch(watch); 568 } 569 570 unlock_wqueue(wqueue); 571 } 572 573 if (wlist->release_watch) { 574 void (*release_watch)(struct watch *); 575 576 release_watch = wlist->release_watch; 577 rcu_read_unlock(); 578 (*release_watch)(watch); 579 rcu_read_lock(); 580 } 581 put_watch(watch); 582 583 if (all && !hlist_empty(&wlist->watchers)) 584 goto again; 585 out: 586 rcu_read_unlock(); 587 return ret; 588 } 589 EXPORT_SYMBOL(remove_watch_from_object); 590 591 /* 592 * Remove all the watches that are contributory to a queue. This has the 593 * potential to race with removal of the watches by the destruction of the 594 * objects being watched or with the distribution of notifications. 595 */ 596 void watch_queue_clear(struct watch_queue *wqueue) 597 { 598 struct watch_list *wlist; 599 struct watch *watch; 600 bool release; 601 602 rcu_read_lock(); 603 spin_lock_bh(&wqueue->lock); 604 605 /* Prevent new notifications from being stored. */ 606 wqueue->defunct = true; 607 608 while (!hlist_empty(&wqueue->watches)) { 609 watch = hlist_entry(wqueue->watches.first, struct watch, queue_node); 610 hlist_del_init_rcu(&watch->queue_node); 611 /* We now own a ref on the watch. */ 612 spin_unlock_bh(&wqueue->lock); 613 614 /* We can't do the next bit under the queue lock as we need to 615 * get the list lock - which would cause a deadlock if someone 616 * was removing from the opposite direction at the same time or 617 * posting a notification. 618 */ 619 wlist = rcu_dereference(watch->watch_list); 620 if (wlist) { 621 void (*release_watch)(struct watch *); 622 623 spin_lock(&wlist->lock); 624 625 release = !hlist_unhashed(&watch->list_node); 626 if (release) { 627 hlist_del_init_rcu(&watch->list_node); 628 rcu_assign_pointer(watch->watch_list, NULL); 629 630 /* We now own a second ref on the watch. */ 631 } 632 633 release_watch = wlist->release_watch; 634 spin_unlock(&wlist->lock); 635 636 if (release) { 637 if (release_watch) { 638 rcu_read_unlock(); 639 /* This might need to call dput(), so 640 * we have to drop all the locks. 641 */ 642 (*release_watch)(watch); 643 rcu_read_lock(); 644 } 645 put_watch(watch); 646 } 647 } 648 649 put_watch(watch); 650 spin_lock_bh(&wqueue->lock); 651 } 652 653 spin_unlock_bh(&wqueue->lock); 654 rcu_read_unlock(); 655 } 656 657 /** 658 * get_watch_queue - Get a watch queue from its file descriptor. 659 * @fd: The fd to query. 660 */ 661 struct watch_queue *get_watch_queue(int fd) 662 { 663 struct pipe_inode_info *pipe; 664 struct watch_queue *wqueue = ERR_PTR(-EINVAL); 665 struct fd f; 666 667 f = fdget(fd); 668 if (f.file) { 669 pipe = get_pipe_info(f.file, false); 670 if (pipe && pipe->watch_queue) { 671 wqueue = pipe->watch_queue; 672 kref_get(&wqueue->usage); 673 } 674 fdput(f); 675 } 676 677 return wqueue; 678 } 679 EXPORT_SYMBOL(get_watch_queue); 680 681 /* 682 * Initialise a watch queue 683 */ 684 int watch_queue_init(struct pipe_inode_info *pipe) 685 { 686 struct watch_queue *wqueue; 687 688 wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL); 689 if (!wqueue) 690 return -ENOMEM; 691 692 wqueue->pipe = pipe; 693 kref_init(&wqueue->usage); 694 spin_lock_init(&wqueue->lock); 695 INIT_HLIST_HEAD(&wqueue->watches); 696 697 pipe->watch_queue = wqueue; 698 return 0; 699 } 700