1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ntsync.c - Kernel driver for NT synchronization primitives 4 * 5 * Copyright (C) 2024 Elizabeth Figura <[email protected]> 6 */ 7 8 #include <linux/anon_inodes.h> 9 #include <linux/atomic.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/hrtimer.h> 13 #include <linux/ktime.h> 14 #include <linux/miscdevice.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/overflow.h> 18 #include <linux/sched.h> 19 #include <linux/sched/signal.h> 20 #include <linux/slab.h> 21 #include <linux/spinlock.h> 22 #include <uapi/linux/ntsync.h> 23 24 #define NTSYNC_NAME "ntsync" 25 26 enum ntsync_type { 27 NTSYNC_TYPE_SEM, 28 NTSYNC_TYPE_MUTEX, 29 NTSYNC_TYPE_EVENT, 30 }; 31 32 /* 33 * Individual synchronization primitives are represented by 34 * struct ntsync_obj, and each primitive is backed by a file. 35 * 36 * The whole namespace is represented by a struct ntsync_device also 37 * backed by a file. 38 * 39 * Both rely on struct file for reference counting. Individual 40 * ntsync_obj objects take a reference to the device when created. 41 * Wait operations take a reference to each object being waited on for 42 * the duration of the wait. 43 */ 44 45 struct ntsync_obj { 46 spinlock_t lock; 47 int dev_locked; 48 49 enum ntsync_type type; 50 51 struct file *file; 52 struct ntsync_device *dev; 53 54 /* The following fields are protected by the object lock. */ 55 union { 56 struct { 57 __u32 count; 58 __u32 max; 59 } sem; 60 struct { 61 __u32 count; 62 pid_t owner; 63 bool ownerdead; 64 } mutex; 65 struct { 66 bool manual; 67 bool signaled; 68 } event; 69 } u; 70 71 /* 72 * any_waiters is protected by the object lock, but all_waiters is 73 * protected by the device wait_all_lock. 74 */ 75 struct list_head any_waiters; 76 struct list_head all_waiters; 77 78 /* 79 * Hint describing how many tasks are queued on this object in a 80 * wait-all operation. 81 * 82 * Any time we do a wake, we may need to wake "all" waiters as well as 83 * "any" waiters. In order to atomically wake "all" waiters, we must 84 * lock all of the objects, and that means grabbing the wait_all_lock 85 * below (and, due to lock ordering rules, before locking this object). 86 * However, wait-all is a rare operation, and grabbing the wait-all 87 * lock for every wake would create unnecessary contention. 88 * Therefore we first check whether all_hint is zero, and, if it is, 89 * we skip trying to wake "all" waiters. 90 * 91 * Since wait requests must originate from user-space threads, we're 92 * limited here by PID_MAX_LIMIT, so there's no risk of overflow. 93 */ 94 atomic_t all_hint; 95 }; 96 97 struct ntsync_q_entry { 98 struct list_head node; 99 struct ntsync_q *q; 100 struct ntsync_obj *obj; 101 __u32 index; 102 }; 103 104 struct ntsync_q { 105 struct task_struct *task; 106 __u32 owner; 107 108 /* 109 * Protected via atomic_try_cmpxchg(). Only the thread that wins the 110 * compare-and-swap may actually change object states and wake this 111 * task. 112 */ 113 atomic_t signaled; 114 115 bool all; 116 bool ownerdead; 117 __u32 count; 118 struct ntsync_q_entry entries[]; 119 }; 120 121 struct ntsync_device { 122 /* 123 * Wait-all operations must atomically grab all objects, and be totally 124 * ordered with respect to each other and wait-any operations. 125 * If one thread is trying to acquire several objects, another thread 126 * cannot touch the object at the same time. 127 * 128 * This device-wide lock is used to serialize wait-for-all 129 * operations, and operations on an object that is involved in a 130 * wait-for-all. 131 */ 132 struct mutex wait_all_lock; 133 134 struct file *file; 135 }; 136 137 /* 138 * Single objects are locked using obj->lock. 139 * 140 * Multiple objects are 'locked' while holding dev->wait_all_lock. 141 * In this case however, individual objects are not locked by holding 142 * obj->lock, but by setting obj->dev_locked. 143 * 144 * This means that in order to lock a single object, the sequence is slightly 145 * more complicated than usual. Specifically it needs to check obj->dev_locked 146 * after acquiring obj->lock, if set, it needs to drop the lock and acquire 147 * dev->wait_all_lock in order to serialize against the multi-object operation. 148 */ 149 150 static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) 151 { 152 lockdep_assert_held(&dev->wait_all_lock); 153 lockdep_assert(obj->dev == dev); 154 spin_lock(&obj->lock); 155 /* 156 * By setting obj->dev_locked inside obj->lock, it is ensured that 157 * anyone holding obj->lock must see the value. 158 */ 159 obj->dev_locked = 1; 160 spin_unlock(&obj->lock); 161 } 162 163 static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) 164 { 165 lockdep_assert_held(&dev->wait_all_lock); 166 lockdep_assert(obj->dev == dev); 167 spin_lock(&obj->lock); 168 obj->dev_locked = 0; 169 spin_unlock(&obj->lock); 170 } 171 172 static void obj_lock(struct ntsync_obj *obj) 173 { 174 struct ntsync_device *dev = obj->dev; 175 176 for (;;) { 177 spin_lock(&obj->lock); 178 if (likely(!obj->dev_locked)) 179 break; 180 181 spin_unlock(&obj->lock); 182 mutex_lock(&dev->wait_all_lock); 183 spin_lock(&obj->lock); 184 /* 185 * obj->dev_locked should be set and released under the same 186 * wait_all_lock section, since we now own this lock, it should 187 * be clear. 188 */ 189 lockdep_assert(!obj->dev_locked); 190 spin_unlock(&obj->lock); 191 mutex_unlock(&dev->wait_all_lock); 192 } 193 } 194 195 static void obj_unlock(struct ntsync_obj *obj) 196 { 197 spin_unlock(&obj->lock); 198 } 199 200 static bool ntsync_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) 201 { 202 bool all; 203 204 obj_lock(obj); 205 all = atomic_read(&obj->all_hint); 206 if (unlikely(all)) { 207 obj_unlock(obj); 208 mutex_lock(&dev->wait_all_lock); 209 dev_lock_obj(dev, obj); 210 } 211 212 return all; 213 } 214 215 static void ntsync_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj, bool all) 216 { 217 if (all) { 218 dev_unlock_obj(dev, obj); 219 mutex_unlock(&dev->wait_all_lock); 220 } else { 221 obj_unlock(obj); 222 } 223 } 224 225 #define ntsync_assert_held(obj) \ 226 lockdep_assert((lockdep_is_held(&(obj)->lock) != LOCK_STATE_NOT_HELD) || \ 227 ((lockdep_is_held(&(obj)->dev->wait_all_lock) != LOCK_STATE_NOT_HELD) && \ 228 (obj)->dev_locked)) 229 230 static bool is_signaled(struct ntsync_obj *obj, __u32 owner) 231 { 232 ntsync_assert_held(obj); 233 234 switch (obj->type) { 235 case NTSYNC_TYPE_SEM: 236 return !!obj->u.sem.count; 237 case NTSYNC_TYPE_MUTEX: 238 if (obj->u.mutex.owner && obj->u.mutex.owner != owner) 239 return false; 240 return obj->u.mutex.count < UINT_MAX; 241 case NTSYNC_TYPE_EVENT: 242 return obj->u.event.signaled; 243 } 244 245 WARN(1, "bad object type %#x\n", obj->type); 246 return false; 247 } 248 249 /* 250 * "locked_obj" is an optional pointer to an object which is already locked and 251 * should not be locked again. This is necessary so that changing an object's 252 * state and waking it can be a single atomic operation. 253 */ 254 static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q, 255 struct ntsync_obj *locked_obj) 256 { 257 __u32 count = q->count; 258 bool can_wake = true; 259 int signaled = -1; 260 __u32 i; 261 262 lockdep_assert_held(&dev->wait_all_lock); 263 if (locked_obj) 264 lockdep_assert(locked_obj->dev_locked); 265 266 for (i = 0; i < count; i++) { 267 if (q->entries[i].obj != locked_obj) 268 dev_lock_obj(dev, q->entries[i].obj); 269 } 270 271 for (i = 0; i < count; i++) { 272 if (!is_signaled(q->entries[i].obj, q->owner)) { 273 can_wake = false; 274 break; 275 } 276 } 277 278 if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) { 279 for (i = 0; i < count; i++) { 280 struct ntsync_obj *obj = q->entries[i].obj; 281 282 switch (obj->type) { 283 case NTSYNC_TYPE_SEM: 284 obj->u.sem.count--; 285 break; 286 case NTSYNC_TYPE_MUTEX: 287 if (obj->u.mutex.ownerdead) 288 q->ownerdead = true; 289 obj->u.mutex.ownerdead = false; 290 obj->u.mutex.count++; 291 obj->u.mutex.owner = q->owner; 292 break; 293 case NTSYNC_TYPE_EVENT: 294 if (!obj->u.event.manual) 295 obj->u.event.signaled = false; 296 break; 297 } 298 } 299 wake_up_process(q->task); 300 } 301 302 for (i = 0; i < count; i++) { 303 if (q->entries[i].obj != locked_obj) 304 dev_unlock_obj(dev, q->entries[i].obj); 305 } 306 } 307 308 static void try_wake_all_obj(struct ntsync_device *dev, struct ntsync_obj *obj) 309 { 310 struct ntsync_q_entry *entry; 311 312 lockdep_assert_held(&dev->wait_all_lock); 313 lockdep_assert(obj->dev_locked); 314 315 list_for_each_entry(entry, &obj->all_waiters, node) 316 try_wake_all(dev, entry->q, obj); 317 } 318 319 static void try_wake_any_sem(struct ntsync_obj *sem) 320 { 321 struct ntsync_q_entry *entry; 322 323 ntsync_assert_held(sem); 324 lockdep_assert(sem->type == NTSYNC_TYPE_SEM); 325 326 list_for_each_entry(entry, &sem->any_waiters, node) { 327 struct ntsync_q *q = entry->q; 328 int signaled = -1; 329 330 if (!sem->u.sem.count) 331 break; 332 333 if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { 334 sem->u.sem.count--; 335 wake_up_process(q->task); 336 } 337 } 338 } 339 340 static void try_wake_any_mutex(struct ntsync_obj *mutex) 341 { 342 struct ntsync_q_entry *entry; 343 344 ntsync_assert_held(mutex); 345 lockdep_assert(mutex->type == NTSYNC_TYPE_MUTEX); 346 347 list_for_each_entry(entry, &mutex->any_waiters, node) { 348 struct ntsync_q *q = entry->q; 349 int signaled = -1; 350 351 if (mutex->u.mutex.count == UINT_MAX) 352 break; 353 if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner) 354 continue; 355 356 if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { 357 if (mutex->u.mutex.ownerdead) 358 q->ownerdead = true; 359 mutex->u.mutex.ownerdead = false; 360 mutex->u.mutex.count++; 361 mutex->u.mutex.owner = q->owner; 362 wake_up_process(q->task); 363 } 364 } 365 } 366 367 static void try_wake_any_event(struct ntsync_obj *event) 368 { 369 struct ntsync_q_entry *entry; 370 371 ntsync_assert_held(event); 372 lockdep_assert(event->type == NTSYNC_TYPE_EVENT); 373 374 list_for_each_entry(entry, &event->any_waiters, node) { 375 struct ntsync_q *q = entry->q; 376 int signaled = -1; 377 378 if (!event->u.event.signaled) 379 break; 380 381 if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { 382 if (!event->u.event.manual) 383 event->u.event.signaled = false; 384 wake_up_process(q->task); 385 } 386 } 387 } 388 389 /* 390 * Actually change the semaphore state, returning -EOVERFLOW if it is made 391 * invalid. 392 */ 393 static int release_sem_state(struct ntsync_obj *sem, __u32 count) 394 { 395 __u32 sum; 396 397 ntsync_assert_held(sem); 398 399 if (check_add_overflow(sem->u.sem.count, count, &sum) || 400 sum > sem->u.sem.max) 401 return -EOVERFLOW; 402 403 sem->u.sem.count = sum; 404 return 0; 405 } 406 407 static int ntsync_sem_release(struct ntsync_obj *sem, void __user *argp) 408 { 409 struct ntsync_device *dev = sem->dev; 410 __u32 __user *user_args = argp; 411 __u32 prev_count; 412 __u32 args; 413 bool all; 414 int ret; 415 416 if (copy_from_user(&args, argp, sizeof(args))) 417 return -EFAULT; 418 419 if (sem->type != NTSYNC_TYPE_SEM) 420 return -EINVAL; 421 422 all = ntsync_lock_obj(dev, sem); 423 424 prev_count = sem->u.sem.count; 425 ret = release_sem_state(sem, args); 426 if (!ret) { 427 if (all) 428 try_wake_all_obj(dev, sem); 429 try_wake_any_sem(sem); 430 } 431 432 ntsync_unlock_obj(dev, sem, all); 433 434 if (!ret && put_user(prev_count, user_args)) 435 ret = -EFAULT; 436 437 return ret; 438 } 439 440 /* 441 * Actually change the mutex state, returning -EPERM if not the owner. 442 */ 443 static int unlock_mutex_state(struct ntsync_obj *mutex, 444 const struct ntsync_mutex_args *args) 445 { 446 ntsync_assert_held(mutex); 447 448 if (mutex->u.mutex.owner != args->owner) 449 return -EPERM; 450 451 if (!--mutex->u.mutex.count) 452 mutex->u.mutex.owner = 0; 453 return 0; 454 } 455 456 static int ntsync_mutex_unlock(struct ntsync_obj *mutex, void __user *argp) 457 { 458 struct ntsync_mutex_args __user *user_args = argp; 459 struct ntsync_device *dev = mutex->dev; 460 struct ntsync_mutex_args args; 461 __u32 prev_count; 462 bool all; 463 int ret; 464 465 if (copy_from_user(&args, argp, sizeof(args))) 466 return -EFAULT; 467 if (!args.owner) 468 return -EINVAL; 469 470 if (mutex->type != NTSYNC_TYPE_MUTEX) 471 return -EINVAL; 472 473 all = ntsync_lock_obj(dev, mutex); 474 475 prev_count = mutex->u.mutex.count; 476 ret = unlock_mutex_state(mutex, &args); 477 if (!ret) { 478 if (all) 479 try_wake_all_obj(dev, mutex); 480 try_wake_any_mutex(mutex); 481 } 482 483 ntsync_unlock_obj(dev, mutex, all); 484 485 if (!ret && put_user(prev_count, &user_args->count)) 486 ret = -EFAULT; 487 488 return ret; 489 } 490 491 /* 492 * Actually change the mutex state to mark its owner as dead, 493 * returning -EPERM if not the owner. 494 */ 495 static int kill_mutex_state(struct ntsync_obj *mutex, __u32 owner) 496 { 497 ntsync_assert_held(mutex); 498 499 if (mutex->u.mutex.owner != owner) 500 return -EPERM; 501 502 mutex->u.mutex.ownerdead = true; 503 mutex->u.mutex.owner = 0; 504 mutex->u.mutex.count = 0; 505 return 0; 506 } 507 508 static int ntsync_mutex_kill(struct ntsync_obj *mutex, void __user *argp) 509 { 510 struct ntsync_device *dev = mutex->dev; 511 __u32 owner; 512 bool all; 513 int ret; 514 515 if (get_user(owner, (__u32 __user *)argp)) 516 return -EFAULT; 517 if (!owner) 518 return -EINVAL; 519 520 if (mutex->type != NTSYNC_TYPE_MUTEX) 521 return -EINVAL; 522 523 all = ntsync_lock_obj(dev, mutex); 524 525 ret = kill_mutex_state(mutex, owner); 526 if (!ret) { 527 if (all) 528 try_wake_all_obj(dev, mutex); 529 try_wake_any_mutex(mutex); 530 } 531 532 ntsync_unlock_obj(dev, mutex, all); 533 534 return ret; 535 } 536 537 static int ntsync_event_set(struct ntsync_obj *event, void __user *argp) 538 { 539 struct ntsync_device *dev = event->dev; 540 __u32 prev_state; 541 bool all; 542 543 if (event->type != NTSYNC_TYPE_EVENT) 544 return -EINVAL; 545 546 all = ntsync_lock_obj(dev, event); 547 548 prev_state = event->u.event.signaled; 549 event->u.event.signaled = true; 550 if (all) 551 try_wake_all_obj(dev, event); 552 try_wake_any_event(event); 553 554 ntsync_unlock_obj(dev, event, all); 555 556 if (put_user(prev_state, (__u32 __user *)argp)) 557 return -EFAULT; 558 559 return 0; 560 } 561 562 static int ntsync_event_reset(struct ntsync_obj *event, void __user *argp) 563 { 564 struct ntsync_device *dev = event->dev; 565 __u32 prev_state; 566 bool all; 567 568 if (event->type != NTSYNC_TYPE_EVENT) 569 return -EINVAL; 570 571 all = ntsync_lock_obj(dev, event); 572 573 prev_state = event->u.event.signaled; 574 event->u.event.signaled = false; 575 576 ntsync_unlock_obj(dev, event, all); 577 578 if (put_user(prev_state, (__u32 __user *)argp)) 579 return -EFAULT; 580 581 return 0; 582 } 583 584 static int ntsync_obj_release(struct inode *inode, struct file *file) 585 { 586 struct ntsync_obj *obj = file->private_data; 587 588 fput(obj->dev->file); 589 kfree(obj); 590 591 return 0; 592 } 593 594 static long ntsync_obj_ioctl(struct file *file, unsigned int cmd, 595 unsigned long parm) 596 { 597 struct ntsync_obj *obj = file->private_data; 598 void __user *argp = (void __user *)parm; 599 600 switch (cmd) { 601 case NTSYNC_IOC_SEM_RELEASE: 602 return ntsync_sem_release(obj, argp); 603 case NTSYNC_IOC_MUTEX_UNLOCK: 604 return ntsync_mutex_unlock(obj, argp); 605 case NTSYNC_IOC_MUTEX_KILL: 606 return ntsync_mutex_kill(obj, argp); 607 case NTSYNC_IOC_EVENT_SET: 608 return ntsync_event_set(obj, argp); 609 case NTSYNC_IOC_EVENT_RESET: 610 return ntsync_event_reset(obj, argp); 611 default: 612 return -ENOIOCTLCMD; 613 } 614 } 615 616 static const struct file_operations ntsync_obj_fops = { 617 .owner = THIS_MODULE, 618 .release = ntsync_obj_release, 619 .unlocked_ioctl = ntsync_obj_ioctl, 620 .compat_ioctl = compat_ptr_ioctl, 621 }; 622 623 static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev, 624 enum ntsync_type type) 625 { 626 struct ntsync_obj *obj; 627 628 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 629 if (!obj) 630 return NULL; 631 obj->type = type; 632 obj->dev = dev; 633 get_file(dev->file); 634 spin_lock_init(&obj->lock); 635 INIT_LIST_HEAD(&obj->any_waiters); 636 INIT_LIST_HEAD(&obj->all_waiters); 637 atomic_set(&obj->all_hint, 0); 638 639 return obj; 640 } 641 642 static int ntsync_obj_get_fd(struct ntsync_obj *obj) 643 { 644 struct file *file; 645 int fd; 646 647 fd = get_unused_fd_flags(O_CLOEXEC); 648 if (fd < 0) 649 return fd; 650 file = anon_inode_getfile("ntsync", &ntsync_obj_fops, obj, O_RDWR); 651 if (IS_ERR(file)) { 652 put_unused_fd(fd); 653 return PTR_ERR(file); 654 } 655 obj->file = file; 656 fd_install(fd, file); 657 658 return fd; 659 } 660 661 static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp) 662 { 663 struct ntsync_sem_args args; 664 struct ntsync_obj *sem; 665 int fd; 666 667 if (copy_from_user(&args, argp, sizeof(args))) 668 return -EFAULT; 669 670 if (args.count > args.max) 671 return -EINVAL; 672 673 sem = ntsync_alloc_obj(dev, NTSYNC_TYPE_SEM); 674 if (!sem) 675 return -ENOMEM; 676 sem->u.sem.count = args.count; 677 sem->u.sem.max = args.max; 678 fd = ntsync_obj_get_fd(sem); 679 if (fd < 0) 680 kfree(sem); 681 682 return fd; 683 } 684 685 static int ntsync_create_mutex(struct ntsync_device *dev, void __user *argp) 686 { 687 struct ntsync_mutex_args args; 688 struct ntsync_obj *mutex; 689 int fd; 690 691 if (copy_from_user(&args, argp, sizeof(args))) 692 return -EFAULT; 693 694 if (!args.owner != !args.count) 695 return -EINVAL; 696 697 mutex = ntsync_alloc_obj(dev, NTSYNC_TYPE_MUTEX); 698 if (!mutex) 699 return -ENOMEM; 700 mutex->u.mutex.count = args.count; 701 mutex->u.mutex.owner = args.owner; 702 fd = ntsync_obj_get_fd(mutex); 703 if (fd < 0) 704 kfree(mutex); 705 706 return fd; 707 } 708 709 static int ntsync_create_event(struct ntsync_device *dev, void __user *argp) 710 { 711 struct ntsync_event_args args; 712 struct ntsync_obj *event; 713 int fd; 714 715 if (copy_from_user(&args, argp, sizeof(args))) 716 return -EFAULT; 717 718 event = ntsync_alloc_obj(dev, NTSYNC_TYPE_EVENT); 719 if (!event) 720 return -ENOMEM; 721 event->u.event.manual = args.manual; 722 event->u.event.signaled = args.signaled; 723 fd = ntsync_obj_get_fd(event); 724 if (fd < 0) 725 kfree(event); 726 727 return fd; 728 } 729 730 static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd) 731 { 732 struct file *file = fget(fd); 733 struct ntsync_obj *obj; 734 735 if (!file) 736 return NULL; 737 738 if (file->f_op != &ntsync_obj_fops) { 739 fput(file); 740 return NULL; 741 } 742 743 obj = file->private_data; 744 if (obj->dev != dev) { 745 fput(file); 746 return NULL; 747 } 748 749 return obj; 750 } 751 752 static void put_obj(struct ntsync_obj *obj) 753 { 754 fput(obj->file); 755 } 756 757 static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args) 758 { 759 ktime_t timeout = ns_to_ktime(args->timeout); 760 clockid_t clock = CLOCK_MONOTONIC; 761 ktime_t *timeout_ptr; 762 int ret = 0; 763 764 timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout); 765 766 if (args->flags & NTSYNC_WAIT_REALTIME) 767 clock = CLOCK_REALTIME; 768 769 do { 770 if (signal_pending(current)) { 771 ret = -ERESTARTSYS; 772 break; 773 } 774 775 set_current_state(TASK_INTERRUPTIBLE); 776 if (atomic_read(&q->signaled) != -1) { 777 ret = 0; 778 break; 779 } 780 ret = schedule_hrtimeout_range_clock(timeout_ptr, 0, HRTIMER_MODE_ABS, clock); 781 } while (ret < 0); 782 __set_current_state(TASK_RUNNING); 783 784 return ret; 785 } 786 787 /* 788 * Allocate and initialize the ntsync_q structure, but do not queue us yet. 789 */ 790 static int setup_wait(struct ntsync_device *dev, 791 const struct ntsync_wait_args *args, bool all, 792 struct ntsync_q **ret_q) 793 { 794 const __u32 count = args->count; 795 int fds[NTSYNC_MAX_WAIT_COUNT]; 796 struct ntsync_q *q; 797 __u32 i, j; 798 799 if (args->pad[0] || args->pad[1] || (args->flags & ~NTSYNC_WAIT_REALTIME)) 800 return -EINVAL; 801 802 if (args->count > NTSYNC_MAX_WAIT_COUNT) 803 return -EINVAL; 804 805 if (copy_from_user(fds, u64_to_user_ptr(args->objs), 806 array_size(count, sizeof(*fds)))) 807 return -EFAULT; 808 809 q = kmalloc(struct_size(q, entries, count), GFP_KERNEL); 810 if (!q) 811 return -ENOMEM; 812 q->task = current; 813 q->owner = args->owner; 814 atomic_set(&q->signaled, -1); 815 q->all = all; 816 q->ownerdead = false; 817 q->count = count; 818 819 for (i = 0; i < count; i++) { 820 struct ntsync_q_entry *entry = &q->entries[i]; 821 struct ntsync_obj *obj = get_obj(dev, fds[i]); 822 823 if (!obj) 824 goto err; 825 826 if (all) { 827 /* Check that the objects are all distinct. */ 828 for (j = 0; j < i; j++) { 829 if (obj == q->entries[j].obj) { 830 put_obj(obj); 831 goto err; 832 } 833 } 834 } 835 836 entry->obj = obj; 837 entry->q = q; 838 entry->index = i; 839 } 840 841 *ret_q = q; 842 return 0; 843 844 err: 845 for (j = 0; j < i; j++) 846 put_obj(q->entries[j].obj); 847 kfree(q); 848 return -EINVAL; 849 } 850 851 static void try_wake_any_obj(struct ntsync_obj *obj) 852 { 853 switch (obj->type) { 854 case NTSYNC_TYPE_SEM: 855 try_wake_any_sem(obj); 856 break; 857 case NTSYNC_TYPE_MUTEX: 858 try_wake_any_mutex(obj); 859 break; 860 case NTSYNC_TYPE_EVENT: 861 try_wake_any_event(obj); 862 break; 863 } 864 } 865 866 static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp) 867 { 868 struct ntsync_wait_args args; 869 struct ntsync_q *q; 870 int signaled; 871 bool all; 872 __u32 i; 873 int ret; 874 875 if (copy_from_user(&args, argp, sizeof(args))) 876 return -EFAULT; 877 878 ret = setup_wait(dev, &args, false, &q); 879 if (ret < 0) 880 return ret; 881 882 /* queue ourselves */ 883 884 for (i = 0; i < args.count; i++) { 885 struct ntsync_q_entry *entry = &q->entries[i]; 886 struct ntsync_obj *obj = entry->obj; 887 888 all = ntsync_lock_obj(dev, obj); 889 list_add_tail(&entry->node, &obj->any_waiters); 890 ntsync_unlock_obj(dev, obj, all); 891 } 892 893 /* check if we are already signaled */ 894 895 for (i = 0; i < args.count; i++) { 896 struct ntsync_obj *obj = q->entries[i].obj; 897 898 if (atomic_read(&q->signaled) != -1) 899 break; 900 901 all = ntsync_lock_obj(dev, obj); 902 try_wake_any_obj(obj); 903 ntsync_unlock_obj(dev, obj, all); 904 } 905 906 /* sleep */ 907 908 ret = ntsync_schedule(q, &args); 909 910 /* and finally, unqueue */ 911 912 for (i = 0; i < args.count; i++) { 913 struct ntsync_q_entry *entry = &q->entries[i]; 914 struct ntsync_obj *obj = entry->obj; 915 916 all = ntsync_lock_obj(dev, obj); 917 list_del(&entry->node); 918 ntsync_unlock_obj(dev, obj, all); 919 920 put_obj(obj); 921 } 922 923 signaled = atomic_read(&q->signaled); 924 if (signaled != -1) { 925 struct ntsync_wait_args __user *user_args = argp; 926 927 /* even if we caught a signal, we need to communicate success */ 928 ret = q->ownerdead ? -EOWNERDEAD : 0; 929 930 if (put_user(signaled, &user_args->index)) 931 ret = -EFAULT; 932 } else if (!ret) { 933 ret = -ETIMEDOUT; 934 } 935 936 kfree(q); 937 return ret; 938 } 939 940 static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp) 941 { 942 struct ntsync_wait_args args; 943 struct ntsync_q *q; 944 int signaled; 945 __u32 i; 946 int ret; 947 948 if (copy_from_user(&args, argp, sizeof(args))) 949 return -EFAULT; 950 951 ret = setup_wait(dev, &args, true, &q); 952 if (ret < 0) 953 return ret; 954 955 /* queue ourselves */ 956 957 mutex_lock(&dev->wait_all_lock); 958 959 for (i = 0; i < args.count; i++) { 960 struct ntsync_q_entry *entry = &q->entries[i]; 961 struct ntsync_obj *obj = entry->obj; 962 963 atomic_inc(&obj->all_hint); 964 965 /* 966 * obj->all_waiters is protected by dev->wait_all_lock rather 967 * than obj->lock, so there is no need to acquire obj->lock 968 * here. 969 */ 970 list_add_tail(&entry->node, &obj->all_waiters); 971 } 972 973 /* check if we are already signaled */ 974 975 try_wake_all(dev, q, NULL); 976 977 mutex_unlock(&dev->wait_all_lock); 978 979 /* sleep */ 980 981 ret = ntsync_schedule(q, &args); 982 983 /* and finally, unqueue */ 984 985 mutex_lock(&dev->wait_all_lock); 986 987 for (i = 0; i < args.count; i++) { 988 struct ntsync_q_entry *entry = &q->entries[i]; 989 struct ntsync_obj *obj = entry->obj; 990 991 /* 992 * obj->all_waiters is protected by dev->wait_all_lock rather 993 * than obj->lock, so there is no need to acquire it here. 994 */ 995 list_del(&entry->node); 996 997 atomic_dec(&obj->all_hint); 998 999 put_obj(obj); 1000 } 1001 1002 mutex_unlock(&dev->wait_all_lock); 1003 1004 signaled = atomic_read(&q->signaled); 1005 if (signaled != -1) { 1006 struct ntsync_wait_args __user *user_args = argp; 1007 1008 /* even if we caught a signal, we need to communicate success */ 1009 ret = q->ownerdead ? -EOWNERDEAD : 0; 1010 1011 if (put_user(signaled, &user_args->index)) 1012 ret = -EFAULT; 1013 } else if (!ret) { 1014 ret = -ETIMEDOUT; 1015 } 1016 1017 kfree(q); 1018 return ret; 1019 } 1020 1021 static int ntsync_char_open(struct inode *inode, struct file *file) 1022 { 1023 struct ntsync_device *dev; 1024 1025 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1026 if (!dev) 1027 return -ENOMEM; 1028 1029 mutex_init(&dev->wait_all_lock); 1030 1031 file->private_data = dev; 1032 dev->file = file; 1033 return nonseekable_open(inode, file); 1034 } 1035 1036 static int ntsync_char_release(struct inode *inode, struct file *file) 1037 { 1038 struct ntsync_device *dev = file->private_data; 1039 1040 kfree(dev); 1041 1042 return 0; 1043 } 1044 1045 static long ntsync_char_ioctl(struct file *file, unsigned int cmd, 1046 unsigned long parm) 1047 { 1048 struct ntsync_device *dev = file->private_data; 1049 void __user *argp = (void __user *)parm; 1050 1051 switch (cmd) { 1052 case NTSYNC_IOC_CREATE_EVENT: 1053 return ntsync_create_event(dev, argp); 1054 case NTSYNC_IOC_CREATE_MUTEX: 1055 return ntsync_create_mutex(dev, argp); 1056 case NTSYNC_IOC_CREATE_SEM: 1057 return ntsync_create_sem(dev, argp); 1058 case NTSYNC_IOC_WAIT_ALL: 1059 return ntsync_wait_all(dev, argp); 1060 case NTSYNC_IOC_WAIT_ANY: 1061 return ntsync_wait_any(dev, argp); 1062 default: 1063 return -ENOIOCTLCMD; 1064 } 1065 } 1066 1067 static const struct file_operations ntsync_fops = { 1068 .owner = THIS_MODULE, 1069 .open = ntsync_char_open, 1070 .release = ntsync_char_release, 1071 .unlocked_ioctl = ntsync_char_ioctl, 1072 .compat_ioctl = compat_ptr_ioctl, 1073 }; 1074 1075 static struct miscdevice ntsync_misc = { 1076 .minor = MISC_DYNAMIC_MINOR, 1077 .name = NTSYNC_NAME, 1078 .fops = &ntsync_fops, 1079 }; 1080 1081 module_misc_device(ntsync_misc); 1082 1083 MODULE_AUTHOR("Elizabeth Figura <[email protected]>"); 1084 MODULE_DESCRIPTION("Kernel driver for NT synchronization primitives"); 1085 MODULE_LICENSE("GPL"); 1086