xref: /linux-6.15/drivers/misc/ntsync.c (revision 2dcba6fc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ntsync.c - Kernel driver for NT synchronization primitives
4  *
5  * Copyright (C) 2024 Elizabeth Figura <[email protected]>
6  */
7 
8 #include <linux/anon_inodes.h>
9 #include <linux/atomic.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/hrtimer.h>
13 #include <linux/ktime.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/overflow.h>
18 #include <linux/sched.h>
19 #include <linux/sched/signal.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <uapi/linux/ntsync.h>
23 
24 #define NTSYNC_NAME	"ntsync"
25 
26 enum ntsync_type {
27 	NTSYNC_TYPE_SEM,
28 	NTSYNC_TYPE_MUTEX,
29 	NTSYNC_TYPE_EVENT,
30 };
31 
32 /*
33  * Individual synchronization primitives are represented by
34  * struct ntsync_obj, and each primitive is backed by a file.
35  *
36  * The whole namespace is represented by a struct ntsync_device also
37  * backed by a file.
38  *
39  * Both rely on struct file for reference counting. Individual
40  * ntsync_obj objects take a reference to the device when created.
41  * Wait operations take a reference to each object being waited on for
42  * the duration of the wait.
43  */
44 
45 struct ntsync_obj {
46 	spinlock_t lock;
47 	int dev_locked;
48 
49 	enum ntsync_type type;
50 
51 	struct file *file;
52 	struct ntsync_device *dev;
53 
54 	/* The following fields are protected by the object lock. */
55 	union {
56 		struct {
57 			__u32 count;
58 			__u32 max;
59 		} sem;
60 		struct {
61 			__u32 count;
62 			pid_t owner;
63 			bool ownerdead;
64 		} mutex;
65 		struct {
66 			bool manual;
67 			bool signaled;
68 		} event;
69 	} u;
70 
71 	/*
72 	 * any_waiters is protected by the object lock, but all_waiters is
73 	 * protected by the device wait_all_lock.
74 	 */
75 	struct list_head any_waiters;
76 	struct list_head all_waiters;
77 
78 	/*
79 	 * Hint describing how many tasks are queued on this object in a
80 	 * wait-all operation.
81 	 *
82 	 * Any time we do a wake, we may need to wake "all" waiters as well as
83 	 * "any" waiters. In order to atomically wake "all" waiters, we must
84 	 * lock all of the objects, and that means grabbing the wait_all_lock
85 	 * below (and, due to lock ordering rules, before locking this object).
86 	 * However, wait-all is a rare operation, and grabbing the wait-all
87 	 * lock for every wake would create unnecessary contention.
88 	 * Therefore we first check whether all_hint is zero, and, if it is,
89 	 * we skip trying to wake "all" waiters.
90 	 *
91 	 * Since wait requests must originate from user-space threads, we're
92 	 * limited here by PID_MAX_LIMIT, so there's no risk of overflow.
93 	 */
94 	atomic_t all_hint;
95 };
96 
97 struct ntsync_q_entry {
98 	struct list_head node;
99 	struct ntsync_q *q;
100 	struct ntsync_obj *obj;
101 	__u32 index;
102 };
103 
104 struct ntsync_q {
105 	struct task_struct *task;
106 	__u32 owner;
107 
108 	/*
109 	 * Protected via atomic_try_cmpxchg(). Only the thread that wins the
110 	 * compare-and-swap may actually change object states and wake this
111 	 * task.
112 	 */
113 	atomic_t signaled;
114 
115 	bool all;
116 	bool ownerdead;
117 	__u32 count;
118 	struct ntsync_q_entry entries[];
119 };
120 
121 struct ntsync_device {
122 	/*
123 	 * Wait-all operations must atomically grab all objects, and be totally
124 	 * ordered with respect to each other and wait-any operations.
125 	 * If one thread is trying to acquire several objects, another thread
126 	 * cannot touch the object at the same time.
127 	 *
128 	 * This device-wide lock is used to serialize wait-for-all
129 	 * operations, and operations on an object that is involved in a
130 	 * wait-for-all.
131 	 */
132 	struct mutex wait_all_lock;
133 
134 	struct file *file;
135 };
136 
137 /*
138  * Single objects are locked using obj->lock.
139  *
140  * Multiple objects are 'locked' while holding dev->wait_all_lock.
141  * In this case however, individual objects are not locked by holding
142  * obj->lock, but by setting obj->dev_locked.
143  *
144  * This means that in order to lock a single object, the sequence is slightly
145  * more complicated than usual. Specifically it needs to check obj->dev_locked
146  * after acquiring obj->lock, if set, it needs to drop the lock and acquire
147  * dev->wait_all_lock in order to serialize against the multi-object operation.
148  */
149 
150 static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
151 {
152 	lockdep_assert_held(&dev->wait_all_lock);
153 	lockdep_assert(obj->dev == dev);
154 	spin_lock(&obj->lock);
155 	/*
156 	 * By setting obj->dev_locked inside obj->lock, it is ensured that
157 	 * anyone holding obj->lock must see the value.
158 	 */
159 	obj->dev_locked = 1;
160 	spin_unlock(&obj->lock);
161 }
162 
163 static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
164 {
165 	lockdep_assert_held(&dev->wait_all_lock);
166 	lockdep_assert(obj->dev == dev);
167 	spin_lock(&obj->lock);
168 	obj->dev_locked = 0;
169 	spin_unlock(&obj->lock);
170 }
171 
172 static void obj_lock(struct ntsync_obj *obj)
173 {
174 	struct ntsync_device *dev = obj->dev;
175 
176 	for (;;) {
177 		spin_lock(&obj->lock);
178 		if (likely(!obj->dev_locked))
179 			break;
180 
181 		spin_unlock(&obj->lock);
182 		mutex_lock(&dev->wait_all_lock);
183 		spin_lock(&obj->lock);
184 		/*
185 		 * obj->dev_locked should be set and released under the same
186 		 * wait_all_lock section, since we now own this lock, it should
187 		 * be clear.
188 		 */
189 		lockdep_assert(!obj->dev_locked);
190 		spin_unlock(&obj->lock);
191 		mutex_unlock(&dev->wait_all_lock);
192 	}
193 }
194 
195 static void obj_unlock(struct ntsync_obj *obj)
196 {
197 	spin_unlock(&obj->lock);
198 }
199 
200 static bool ntsync_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
201 {
202 	bool all;
203 
204 	obj_lock(obj);
205 	all = atomic_read(&obj->all_hint);
206 	if (unlikely(all)) {
207 		obj_unlock(obj);
208 		mutex_lock(&dev->wait_all_lock);
209 		dev_lock_obj(dev, obj);
210 	}
211 
212 	return all;
213 }
214 
215 static void ntsync_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj, bool all)
216 {
217 	if (all) {
218 		dev_unlock_obj(dev, obj);
219 		mutex_unlock(&dev->wait_all_lock);
220 	} else {
221 		obj_unlock(obj);
222 	}
223 }
224 
225 #define ntsync_assert_held(obj) \
226 	lockdep_assert((lockdep_is_held(&(obj)->lock) != LOCK_STATE_NOT_HELD) || \
227 		       ((lockdep_is_held(&(obj)->dev->wait_all_lock) != LOCK_STATE_NOT_HELD) && \
228 			(obj)->dev_locked))
229 
230 static bool is_signaled(struct ntsync_obj *obj, __u32 owner)
231 {
232 	ntsync_assert_held(obj);
233 
234 	switch (obj->type) {
235 	case NTSYNC_TYPE_SEM:
236 		return !!obj->u.sem.count;
237 	case NTSYNC_TYPE_MUTEX:
238 		if (obj->u.mutex.owner && obj->u.mutex.owner != owner)
239 			return false;
240 		return obj->u.mutex.count < UINT_MAX;
241 	case NTSYNC_TYPE_EVENT:
242 		return obj->u.event.signaled;
243 	}
244 
245 	WARN(1, "bad object type %#x\n", obj->type);
246 	return false;
247 }
248 
249 /*
250  * "locked_obj" is an optional pointer to an object which is already locked and
251  * should not be locked again. This is necessary so that changing an object's
252  * state and waking it can be a single atomic operation.
253  */
254 static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q,
255 			 struct ntsync_obj *locked_obj)
256 {
257 	__u32 count = q->count;
258 	bool can_wake = true;
259 	int signaled = -1;
260 	__u32 i;
261 
262 	lockdep_assert_held(&dev->wait_all_lock);
263 	if (locked_obj)
264 		lockdep_assert(locked_obj->dev_locked);
265 
266 	for (i = 0; i < count; i++) {
267 		if (q->entries[i].obj != locked_obj)
268 			dev_lock_obj(dev, q->entries[i].obj);
269 	}
270 
271 	for (i = 0; i < count; i++) {
272 		if (!is_signaled(q->entries[i].obj, q->owner)) {
273 			can_wake = false;
274 			break;
275 		}
276 	}
277 
278 	if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) {
279 		for (i = 0; i < count; i++) {
280 			struct ntsync_obj *obj = q->entries[i].obj;
281 
282 			switch (obj->type) {
283 			case NTSYNC_TYPE_SEM:
284 				obj->u.sem.count--;
285 				break;
286 			case NTSYNC_TYPE_MUTEX:
287 				if (obj->u.mutex.ownerdead)
288 					q->ownerdead = true;
289 				obj->u.mutex.ownerdead = false;
290 				obj->u.mutex.count++;
291 				obj->u.mutex.owner = q->owner;
292 				break;
293 			case NTSYNC_TYPE_EVENT:
294 				if (!obj->u.event.manual)
295 					obj->u.event.signaled = false;
296 				break;
297 			}
298 		}
299 		wake_up_process(q->task);
300 	}
301 
302 	for (i = 0; i < count; i++) {
303 		if (q->entries[i].obj != locked_obj)
304 			dev_unlock_obj(dev, q->entries[i].obj);
305 	}
306 }
307 
308 static void try_wake_all_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
309 {
310 	struct ntsync_q_entry *entry;
311 
312 	lockdep_assert_held(&dev->wait_all_lock);
313 	lockdep_assert(obj->dev_locked);
314 
315 	list_for_each_entry(entry, &obj->all_waiters, node)
316 		try_wake_all(dev, entry->q, obj);
317 }
318 
319 static void try_wake_any_sem(struct ntsync_obj *sem)
320 {
321 	struct ntsync_q_entry *entry;
322 
323 	ntsync_assert_held(sem);
324 	lockdep_assert(sem->type == NTSYNC_TYPE_SEM);
325 
326 	list_for_each_entry(entry, &sem->any_waiters, node) {
327 		struct ntsync_q *q = entry->q;
328 		int signaled = -1;
329 
330 		if (!sem->u.sem.count)
331 			break;
332 
333 		if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
334 			sem->u.sem.count--;
335 			wake_up_process(q->task);
336 		}
337 	}
338 }
339 
340 static void try_wake_any_mutex(struct ntsync_obj *mutex)
341 {
342 	struct ntsync_q_entry *entry;
343 
344 	ntsync_assert_held(mutex);
345 	lockdep_assert(mutex->type == NTSYNC_TYPE_MUTEX);
346 
347 	list_for_each_entry(entry, &mutex->any_waiters, node) {
348 		struct ntsync_q *q = entry->q;
349 		int signaled = -1;
350 
351 		if (mutex->u.mutex.count == UINT_MAX)
352 			break;
353 		if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner)
354 			continue;
355 
356 		if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
357 			if (mutex->u.mutex.ownerdead)
358 				q->ownerdead = true;
359 			mutex->u.mutex.ownerdead = false;
360 			mutex->u.mutex.count++;
361 			mutex->u.mutex.owner = q->owner;
362 			wake_up_process(q->task);
363 		}
364 	}
365 }
366 
367 static void try_wake_any_event(struct ntsync_obj *event)
368 {
369 	struct ntsync_q_entry *entry;
370 
371 	ntsync_assert_held(event);
372 	lockdep_assert(event->type == NTSYNC_TYPE_EVENT);
373 
374 	list_for_each_entry(entry, &event->any_waiters, node) {
375 		struct ntsync_q *q = entry->q;
376 		int signaled = -1;
377 
378 		if (!event->u.event.signaled)
379 			break;
380 
381 		if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
382 			if (!event->u.event.manual)
383 				event->u.event.signaled = false;
384 			wake_up_process(q->task);
385 		}
386 	}
387 }
388 
389 /*
390  * Actually change the semaphore state, returning -EOVERFLOW if it is made
391  * invalid.
392  */
393 static int release_sem_state(struct ntsync_obj *sem, __u32 count)
394 {
395 	__u32 sum;
396 
397 	ntsync_assert_held(sem);
398 
399 	if (check_add_overflow(sem->u.sem.count, count, &sum) ||
400 	    sum > sem->u.sem.max)
401 		return -EOVERFLOW;
402 
403 	sem->u.sem.count = sum;
404 	return 0;
405 }
406 
407 static int ntsync_sem_release(struct ntsync_obj *sem, void __user *argp)
408 {
409 	struct ntsync_device *dev = sem->dev;
410 	__u32 __user *user_args = argp;
411 	__u32 prev_count;
412 	__u32 args;
413 	bool all;
414 	int ret;
415 
416 	if (copy_from_user(&args, argp, sizeof(args)))
417 		return -EFAULT;
418 
419 	if (sem->type != NTSYNC_TYPE_SEM)
420 		return -EINVAL;
421 
422 	all = ntsync_lock_obj(dev, sem);
423 
424 	prev_count = sem->u.sem.count;
425 	ret = release_sem_state(sem, args);
426 	if (!ret) {
427 		if (all)
428 			try_wake_all_obj(dev, sem);
429 		try_wake_any_sem(sem);
430 	}
431 
432 	ntsync_unlock_obj(dev, sem, all);
433 
434 	if (!ret && put_user(prev_count, user_args))
435 		ret = -EFAULT;
436 
437 	return ret;
438 }
439 
440 /*
441  * Actually change the mutex state, returning -EPERM if not the owner.
442  */
443 static int unlock_mutex_state(struct ntsync_obj *mutex,
444 			      const struct ntsync_mutex_args *args)
445 {
446 	ntsync_assert_held(mutex);
447 
448 	if (mutex->u.mutex.owner != args->owner)
449 		return -EPERM;
450 
451 	if (!--mutex->u.mutex.count)
452 		mutex->u.mutex.owner = 0;
453 	return 0;
454 }
455 
456 static int ntsync_mutex_unlock(struct ntsync_obj *mutex, void __user *argp)
457 {
458 	struct ntsync_mutex_args __user *user_args = argp;
459 	struct ntsync_device *dev = mutex->dev;
460 	struct ntsync_mutex_args args;
461 	__u32 prev_count;
462 	bool all;
463 	int ret;
464 
465 	if (copy_from_user(&args, argp, sizeof(args)))
466 		return -EFAULT;
467 	if (!args.owner)
468 		return -EINVAL;
469 
470 	if (mutex->type != NTSYNC_TYPE_MUTEX)
471 		return -EINVAL;
472 
473 	all = ntsync_lock_obj(dev, mutex);
474 
475 	prev_count = mutex->u.mutex.count;
476 	ret = unlock_mutex_state(mutex, &args);
477 	if (!ret) {
478 		if (all)
479 			try_wake_all_obj(dev, mutex);
480 		try_wake_any_mutex(mutex);
481 	}
482 
483 	ntsync_unlock_obj(dev, mutex, all);
484 
485 	if (!ret && put_user(prev_count, &user_args->count))
486 		ret = -EFAULT;
487 
488 	return ret;
489 }
490 
491 /*
492  * Actually change the mutex state to mark its owner as dead,
493  * returning -EPERM if not the owner.
494  */
495 static int kill_mutex_state(struct ntsync_obj *mutex, __u32 owner)
496 {
497 	ntsync_assert_held(mutex);
498 
499 	if (mutex->u.mutex.owner != owner)
500 		return -EPERM;
501 
502 	mutex->u.mutex.ownerdead = true;
503 	mutex->u.mutex.owner = 0;
504 	mutex->u.mutex.count = 0;
505 	return 0;
506 }
507 
508 static int ntsync_mutex_kill(struct ntsync_obj *mutex, void __user *argp)
509 {
510 	struct ntsync_device *dev = mutex->dev;
511 	__u32 owner;
512 	bool all;
513 	int ret;
514 
515 	if (get_user(owner, (__u32 __user *)argp))
516 		return -EFAULT;
517 	if (!owner)
518 		return -EINVAL;
519 
520 	if (mutex->type != NTSYNC_TYPE_MUTEX)
521 		return -EINVAL;
522 
523 	all = ntsync_lock_obj(dev, mutex);
524 
525 	ret = kill_mutex_state(mutex, owner);
526 	if (!ret) {
527 		if (all)
528 			try_wake_all_obj(dev, mutex);
529 		try_wake_any_mutex(mutex);
530 	}
531 
532 	ntsync_unlock_obj(dev, mutex, all);
533 
534 	return ret;
535 }
536 
537 static int ntsync_event_set(struct ntsync_obj *event, void __user *argp)
538 {
539 	struct ntsync_device *dev = event->dev;
540 	__u32 prev_state;
541 	bool all;
542 
543 	if (event->type != NTSYNC_TYPE_EVENT)
544 		return -EINVAL;
545 
546 	all = ntsync_lock_obj(dev, event);
547 
548 	prev_state = event->u.event.signaled;
549 	event->u.event.signaled = true;
550 	if (all)
551 		try_wake_all_obj(dev, event);
552 	try_wake_any_event(event);
553 
554 	ntsync_unlock_obj(dev, event, all);
555 
556 	if (put_user(prev_state, (__u32 __user *)argp))
557 		return -EFAULT;
558 
559 	return 0;
560 }
561 
562 static int ntsync_obj_release(struct inode *inode, struct file *file)
563 {
564 	struct ntsync_obj *obj = file->private_data;
565 
566 	fput(obj->dev->file);
567 	kfree(obj);
568 
569 	return 0;
570 }
571 
572 static long ntsync_obj_ioctl(struct file *file, unsigned int cmd,
573 			     unsigned long parm)
574 {
575 	struct ntsync_obj *obj = file->private_data;
576 	void __user *argp = (void __user *)parm;
577 
578 	switch (cmd) {
579 	case NTSYNC_IOC_SEM_RELEASE:
580 		return ntsync_sem_release(obj, argp);
581 	case NTSYNC_IOC_MUTEX_UNLOCK:
582 		return ntsync_mutex_unlock(obj, argp);
583 	case NTSYNC_IOC_MUTEX_KILL:
584 		return ntsync_mutex_kill(obj, argp);
585 	case NTSYNC_IOC_EVENT_SET:
586 		return ntsync_event_set(obj, argp);
587 	default:
588 		return -ENOIOCTLCMD;
589 	}
590 }
591 
592 static const struct file_operations ntsync_obj_fops = {
593 	.owner		= THIS_MODULE,
594 	.release	= ntsync_obj_release,
595 	.unlocked_ioctl	= ntsync_obj_ioctl,
596 	.compat_ioctl	= compat_ptr_ioctl,
597 };
598 
599 static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev,
600 					   enum ntsync_type type)
601 {
602 	struct ntsync_obj *obj;
603 
604 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
605 	if (!obj)
606 		return NULL;
607 	obj->type = type;
608 	obj->dev = dev;
609 	get_file(dev->file);
610 	spin_lock_init(&obj->lock);
611 	INIT_LIST_HEAD(&obj->any_waiters);
612 	INIT_LIST_HEAD(&obj->all_waiters);
613 	atomic_set(&obj->all_hint, 0);
614 
615 	return obj;
616 }
617 
618 static int ntsync_obj_get_fd(struct ntsync_obj *obj)
619 {
620 	struct file *file;
621 	int fd;
622 
623 	fd = get_unused_fd_flags(O_CLOEXEC);
624 	if (fd < 0)
625 		return fd;
626 	file = anon_inode_getfile("ntsync", &ntsync_obj_fops, obj, O_RDWR);
627 	if (IS_ERR(file)) {
628 		put_unused_fd(fd);
629 		return PTR_ERR(file);
630 	}
631 	obj->file = file;
632 	fd_install(fd, file);
633 
634 	return fd;
635 }
636 
637 static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
638 {
639 	struct ntsync_sem_args args;
640 	struct ntsync_obj *sem;
641 	int fd;
642 
643 	if (copy_from_user(&args, argp, sizeof(args)))
644 		return -EFAULT;
645 
646 	if (args.count > args.max)
647 		return -EINVAL;
648 
649 	sem = ntsync_alloc_obj(dev, NTSYNC_TYPE_SEM);
650 	if (!sem)
651 		return -ENOMEM;
652 	sem->u.sem.count = args.count;
653 	sem->u.sem.max = args.max;
654 	fd = ntsync_obj_get_fd(sem);
655 	if (fd < 0)
656 		kfree(sem);
657 
658 	return fd;
659 }
660 
661 static int ntsync_create_mutex(struct ntsync_device *dev, void __user *argp)
662 {
663 	struct ntsync_mutex_args args;
664 	struct ntsync_obj *mutex;
665 	int fd;
666 
667 	if (copy_from_user(&args, argp, sizeof(args)))
668 		return -EFAULT;
669 
670 	if (!args.owner != !args.count)
671 		return -EINVAL;
672 
673 	mutex = ntsync_alloc_obj(dev, NTSYNC_TYPE_MUTEX);
674 	if (!mutex)
675 		return -ENOMEM;
676 	mutex->u.mutex.count = args.count;
677 	mutex->u.mutex.owner = args.owner;
678 	fd = ntsync_obj_get_fd(mutex);
679 	if (fd < 0)
680 		kfree(mutex);
681 
682 	return fd;
683 }
684 
685 static int ntsync_create_event(struct ntsync_device *dev, void __user *argp)
686 {
687 	struct ntsync_event_args args;
688 	struct ntsync_obj *event;
689 	int fd;
690 
691 	if (copy_from_user(&args, argp, sizeof(args)))
692 		return -EFAULT;
693 
694 	event = ntsync_alloc_obj(dev, NTSYNC_TYPE_EVENT);
695 	if (!event)
696 		return -ENOMEM;
697 	event->u.event.manual = args.manual;
698 	event->u.event.signaled = args.signaled;
699 	fd = ntsync_obj_get_fd(event);
700 	if (fd < 0)
701 		kfree(event);
702 
703 	return fd;
704 }
705 
706 static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd)
707 {
708 	struct file *file = fget(fd);
709 	struct ntsync_obj *obj;
710 
711 	if (!file)
712 		return NULL;
713 
714 	if (file->f_op != &ntsync_obj_fops) {
715 		fput(file);
716 		return NULL;
717 	}
718 
719 	obj = file->private_data;
720 	if (obj->dev != dev) {
721 		fput(file);
722 		return NULL;
723 	}
724 
725 	return obj;
726 }
727 
728 static void put_obj(struct ntsync_obj *obj)
729 {
730 	fput(obj->file);
731 }
732 
733 static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args)
734 {
735 	ktime_t timeout = ns_to_ktime(args->timeout);
736 	clockid_t clock = CLOCK_MONOTONIC;
737 	ktime_t *timeout_ptr;
738 	int ret = 0;
739 
740 	timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout);
741 
742 	if (args->flags & NTSYNC_WAIT_REALTIME)
743 		clock = CLOCK_REALTIME;
744 
745 	do {
746 		if (signal_pending(current)) {
747 			ret = -ERESTARTSYS;
748 			break;
749 		}
750 
751 		set_current_state(TASK_INTERRUPTIBLE);
752 		if (atomic_read(&q->signaled) != -1) {
753 			ret = 0;
754 			break;
755 		}
756 		ret = schedule_hrtimeout_range_clock(timeout_ptr, 0, HRTIMER_MODE_ABS, clock);
757 	} while (ret < 0);
758 	__set_current_state(TASK_RUNNING);
759 
760 	return ret;
761 }
762 
763 /*
764  * Allocate and initialize the ntsync_q structure, but do not queue us yet.
765  */
766 static int setup_wait(struct ntsync_device *dev,
767 		      const struct ntsync_wait_args *args, bool all,
768 		      struct ntsync_q **ret_q)
769 {
770 	const __u32 count = args->count;
771 	int fds[NTSYNC_MAX_WAIT_COUNT];
772 	struct ntsync_q *q;
773 	__u32 i, j;
774 
775 	if (args->pad[0] || args->pad[1] || (args->flags & ~NTSYNC_WAIT_REALTIME))
776 		return -EINVAL;
777 
778 	if (args->count > NTSYNC_MAX_WAIT_COUNT)
779 		return -EINVAL;
780 
781 	if (copy_from_user(fds, u64_to_user_ptr(args->objs),
782 			   array_size(count, sizeof(*fds))))
783 		return -EFAULT;
784 
785 	q = kmalloc(struct_size(q, entries, count), GFP_KERNEL);
786 	if (!q)
787 		return -ENOMEM;
788 	q->task = current;
789 	q->owner = args->owner;
790 	atomic_set(&q->signaled, -1);
791 	q->all = all;
792 	q->ownerdead = false;
793 	q->count = count;
794 
795 	for (i = 0; i < count; i++) {
796 		struct ntsync_q_entry *entry = &q->entries[i];
797 		struct ntsync_obj *obj = get_obj(dev, fds[i]);
798 
799 		if (!obj)
800 			goto err;
801 
802 		if (all) {
803 			/* Check that the objects are all distinct. */
804 			for (j = 0; j < i; j++) {
805 				if (obj == q->entries[j].obj) {
806 					put_obj(obj);
807 					goto err;
808 				}
809 			}
810 		}
811 
812 		entry->obj = obj;
813 		entry->q = q;
814 		entry->index = i;
815 	}
816 
817 	*ret_q = q;
818 	return 0;
819 
820 err:
821 	for (j = 0; j < i; j++)
822 		put_obj(q->entries[j].obj);
823 	kfree(q);
824 	return -EINVAL;
825 }
826 
827 static void try_wake_any_obj(struct ntsync_obj *obj)
828 {
829 	switch (obj->type) {
830 	case NTSYNC_TYPE_SEM:
831 		try_wake_any_sem(obj);
832 		break;
833 	case NTSYNC_TYPE_MUTEX:
834 		try_wake_any_mutex(obj);
835 		break;
836 	case NTSYNC_TYPE_EVENT:
837 		try_wake_any_event(obj);
838 		break;
839 	}
840 }
841 
842 static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp)
843 {
844 	struct ntsync_wait_args args;
845 	struct ntsync_q *q;
846 	int signaled;
847 	bool all;
848 	__u32 i;
849 	int ret;
850 
851 	if (copy_from_user(&args, argp, sizeof(args)))
852 		return -EFAULT;
853 
854 	ret = setup_wait(dev, &args, false, &q);
855 	if (ret < 0)
856 		return ret;
857 
858 	/* queue ourselves */
859 
860 	for (i = 0; i < args.count; i++) {
861 		struct ntsync_q_entry *entry = &q->entries[i];
862 		struct ntsync_obj *obj = entry->obj;
863 
864 		all = ntsync_lock_obj(dev, obj);
865 		list_add_tail(&entry->node, &obj->any_waiters);
866 		ntsync_unlock_obj(dev, obj, all);
867 	}
868 
869 	/* check if we are already signaled */
870 
871 	for (i = 0; i < args.count; i++) {
872 		struct ntsync_obj *obj = q->entries[i].obj;
873 
874 		if (atomic_read(&q->signaled) != -1)
875 			break;
876 
877 		all = ntsync_lock_obj(dev, obj);
878 		try_wake_any_obj(obj);
879 		ntsync_unlock_obj(dev, obj, all);
880 	}
881 
882 	/* sleep */
883 
884 	ret = ntsync_schedule(q, &args);
885 
886 	/* and finally, unqueue */
887 
888 	for (i = 0; i < args.count; i++) {
889 		struct ntsync_q_entry *entry = &q->entries[i];
890 		struct ntsync_obj *obj = entry->obj;
891 
892 		all = ntsync_lock_obj(dev, obj);
893 		list_del(&entry->node);
894 		ntsync_unlock_obj(dev, obj, all);
895 
896 		put_obj(obj);
897 	}
898 
899 	signaled = atomic_read(&q->signaled);
900 	if (signaled != -1) {
901 		struct ntsync_wait_args __user *user_args = argp;
902 
903 		/* even if we caught a signal, we need to communicate success */
904 		ret = q->ownerdead ? -EOWNERDEAD : 0;
905 
906 		if (put_user(signaled, &user_args->index))
907 			ret = -EFAULT;
908 	} else if (!ret) {
909 		ret = -ETIMEDOUT;
910 	}
911 
912 	kfree(q);
913 	return ret;
914 }
915 
916 static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp)
917 {
918 	struct ntsync_wait_args args;
919 	struct ntsync_q *q;
920 	int signaled;
921 	__u32 i;
922 	int ret;
923 
924 	if (copy_from_user(&args, argp, sizeof(args)))
925 		return -EFAULT;
926 
927 	ret = setup_wait(dev, &args, true, &q);
928 	if (ret < 0)
929 		return ret;
930 
931 	/* queue ourselves */
932 
933 	mutex_lock(&dev->wait_all_lock);
934 
935 	for (i = 0; i < args.count; i++) {
936 		struct ntsync_q_entry *entry = &q->entries[i];
937 		struct ntsync_obj *obj = entry->obj;
938 
939 		atomic_inc(&obj->all_hint);
940 
941 		/*
942 		 * obj->all_waiters is protected by dev->wait_all_lock rather
943 		 * than obj->lock, so there is no need to acquire obj->lock
944 		 * here.
945 		 */
946 		list_add_tail(&entry->node, &obj->all_waiters);
947 	}
948 
949 	/* check if we are already signaled */
950 
951 	try_wake_all(dev, q, NULL);
952 
953 	mutex_unlock(&dev->wait_all_lock);
954 
955 	/* sleep */
956 
957 	ret = ntsync_schedule(q, &args);
958 
959 	/* and finally, unqueue */
960 
961 	mutex_lock(&dev->wait_all_lock);
962 
963 	for (i = 0; i < args.count; i++) {
964 		struct ntsync_q_entry *entry = &q->entries[i];
965 		struct ntsync_obj *obj = entry->obj;
966 
967 		/*
968 		 * obj->all_waiters is protected by dev->wait_all_lock rather
969 		 * than obj->lock, so there is no need to acquire it here.
970 		 */
971 		list_del(&entry->node);
972 
973 		atomic_dec(&obj->all_hint);
974 
975 		put_obj(obj);
976 	}
977 
978 	mutex_unlock(&dev->wait_all_lock);
979 
980 	signaled = atomic_read(&q->signaled);
981 	if (signaled != -1) {
982 		struct ntsync_wait_args __user *user_args = argp;
983 
984 		/* even if we caught a signal, we need to communicate success */
985 		ret = q->ownerdead ? -EOWNERDEAD : 0;
986 
987 		if (put_user(signaled, &user_args->index))
988 			ret = -EFAULT;
989 	} else if (!ret) {
990 		ret = -ETIMEDOUT;
991 	}
992 
993 	kfree(q);
994 	return ret;
995 }
996 
997 static int ntsync_char_open(struct inode *inode, struct file *file)
998 {
999 	struct ntsync_device *dev;
1000 
1001 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1002 	if (!dev)
1003 		return -ENOMEM;
1004 
1005 	mutex_init(&dev->wait_all_lock);
1006 
1007 	file->private_data = dev;
1008 	dev->file = file;
1009 	return nonseekable_open(inode, file);
1010 }
1011 
1012 static int ntsync_char_release(struct inode *inode, struct file *file)
1013 {
1014 	struct ntsync_device *dev = file->private_data;
1015 
1016 	kfree(dev);
1017 
1018 	return 0;
1019 }
1020 
1021 static long ntsync_char_ioctl(struct file *file, unsigned int cmd,
1022 			      unsigned long parm)
1023 {
1024 	struct ntsync_device *dev = file->private_data;
1025 	void __user *argp = (void __user *)parm;
1026 
1027 	switch (cmd) {
1028 	case NTSYNC_IOC_CREATE_EVENT:
1029 		return ntsync_create_event(dev, argp);
1030 	case NTSYNC_IOC_CREATE_MUTEX:
1031 		return ntsync_create_mutex(dev, argp);
1032 	case NTSYNC_IOC_CREATE_SEM:
1033 		return ntsync_create_sem(dev, argp);
1034 	case NTSYNC_IOC_WAIT_ALL:
1035 		return ntsync_wait_all(dev, argp);
1036 	case NTSYNC_IOC_WAIT_ANY:
1037 		return ntsync_wait_any(dev, argp);
1038 	default:
1039 		return -ENOIOCTLCMD;
1040 	}
1041 }
1042 
1043 static const struct file_operations ntsync_fops = {
1044 	.owner		= THIS_MODULE,
1045 	.open		= ntsync_char_open,
1046 	.release	= ntsync_char_release,
1047 	.unlocked_ioctl	= ntsync_char_ioctl,
1048 	.compat_ioctl	= compat_ptr_ioctl,
1049 };
1050 
1051 static struct miscdevice ntsync_misc = {
1052 	.minor		= MISC_DYNAMIC_MINOR,
1053 	.name		= NTSYNC_NAME,
1054 	.fops		= &ntsync_fops,
1055 };
1056 
1057 module_misc_device(ntsync_misc);
1058 
1059 MODULE_AUTHOR("Elizabeth Figura <[email protected]>");
1060 MODULE_DESCRIPTION("Kernel driver for NT synchronization primitives");
1061 MODULE_LICENSE("GPL");
1062