1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Various unit tests for the "ntsync" synchronization primitive driver.
4  *
5  * Copyright (C) 2021-2022 Elizabeth Figura <[email protected]>
6  */
7 
8 #define _GNU_SOURCE
9 #include <sys/ioctl.h>
10 #include <sys/stat.h>
11 #include <fcntl.h>
12 #include <time.h>
13 #include <pthread.h>
14 #include <linux/ntsync.h>
15 #include "../../kselftest_harness.h"
16 
17 static int read_sem_state(int sem, __u32 *count, __u32 *max)
18 {
19 	struct ntsync_sem_args args;
20 	int ret;
21 
22 	memset(&args, 0xcc, sizeof(args));
23 	ret = ioctl(sem, NTSYNC_IOC_SEM_READ, &args);
24 	*count = args.count;
25 	*max = args.max;
26 	return ret;
27 }
28 
29 #define check_sem_state(sem, count, max) \
30 	({ \
31 		__u32 __count, __max; \
32 		int ret = read_sem_state((sem), &__count, &__max); \
33 		EXPECT_EQ(0, ret); \
34 		EXPECT_EQ((count), __count); \
35 		EXPECT_EQ((max), __max); \
36 	})
37 
38 static int release_sem(int sem, __u32 *count)
39 {
40 	return ioctl(sem, NTSYNC_IOC_SEM_RELEASE, count);
41 }
42 
43 static int read_mutex_state(int mutex, __u32 *count, __u32 *owner)
44 {
45 	struct ntsync_mutex_args args;
46 	int ret;
47 
48 	memset(&args, 0xcc, sizeof(args));
49 	ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &args);
50 	*count = args.count;
51 	*owner = args.owner;
52 	return ret;
53 }
54 
55 #define check_mutex_state(mutex, count, owner) \
56 	({ \
57 		__u32 __count, __owner; \
58 		int ret = read_mutex_state((mutex), &__count, &__owner); \
59 		EXPECT_EQ(0, ret); \
60 		EXPECT_EQ((count), __count); \
61 		EXPECT_EQ((owner), __owner); \
62 	})
63 
64 static int unlock_mutex(int mutex, __u32 owner, __u32 *count)
65 {
66 	struct ntsync_mutex_args args;
67 	int ret;
68 
69 	args.owner = owner;
70 	args.count = 0xdeadbeef;
71 	ret = ioctl(mutex, NTSYNC_IOC_MUTEX_UNLOCK, &args);
72 	*count = args.count;
73 	return ret;
74 }
75 
76 static int read_event_state(int event, __u32 *signaled, __u32 *manual)
77 {
78 	struct ntsync_event_args args;
79 	int ret;
80 
81 	memset(&args, 0xcc, sizeof(args));
82 	ret = ioctl(event, NTSYNC_IOC_EVENT_READ, &args);
83 	*signaled = args.signaled;
84 	*manual = args.manual;
85 	return ret;
86 }
87 
88 #define check_event_state(event, signaled, manual) \
89 	({ \
90 		__u32 __signaled, __manual; \
91 		int ret = read_event_state((event), &__signaled, &__manual); \
92 		EXPECT_EQ(0, ret); \
93 		EXPECT_EQ((signaled), __signaled); \
94 		EXPECT_EQ((manual), __manual); \
95 	})
96 
97 static int wait_objs(int fd, unsigned long request, __u32 count,
98 		     const int *objs, __u32 owner, __u32 *index)
99 {
100 	struct ntsync_wait_args args = {0};
101 	struct timespec timeout;
102 	int ret;
103 
104 	clock_gettime(CLOCK_MONOTONIC, &timeout);
105 
106 	args.timeout = timeout.tv_sec * 1000000000 + timeout.tv_nsec;
107 	args.count = count;
108 	args.objs = (uintptr_t)objs;
109 	args.owner = owner;
110 	args.index = 0xdeadbeef;
111 	ret = ioctl(fd, request, &args);
112 	*index = args.index;
113 	return ret;
114 }
115 
116 static int wait_any(int fd, __u32 count, const int *objs, __u32 owner, __u32 *index)
117 {
118 	return wait_objs(fd, NTSYNC_IOC_WAIT_ANY, count, objs, owner, index);
119 }
120 
121 static int wait_all(int fd, __u32 count, const int *objs, __u32 owner, __u32 *index)
122 {
123 	return wait_objs(fd, NTSYNC_IOC_WAIT_ALL, count, objs, owner, index);
124 }
125 
126 TEST(semaphore_state)
127 {
128 	struct ntsync_sem_args sem_args;
129 	struct timespec timeout;
130 	__u32 count, index;
131 	int fd, ret, sem;
132 
133 	clock_gettime(CLOCK_MONOTONIC, &timeout);
134 
135 	fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
136 	ASSERT_LE(0, fd);
137 
138 	sem_args.count = 3;
139 	sem_args.max = 2;
140 	sem = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
141 	EXPECT_EQ(-1, sem);
142 	EXPECT_EQ(EINVAL, errno);
143 
144 	sem_args.count = 2;
145 	sem_args.max = 2;
146 	sem = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
147 	EXPECT_LE(0, sem);
148 	check_sem_state(sem, 2, 2);
149 
150 	count = 0;
151 	ret = release_sem(sem, &count);
152 	EXPECT_EQ(0, ret);
153 	EXPECT_EQ(2, count);
154 	check_sem_state(sem, 2, 2);
155 
156 	count = 1;
157 	ret = release_sem(sem, &count);
158 	EXPECT_EQ(-1, ret);
159 	EXPECT_EQ(EOVERFLOW, errno);
160 	check_sem_state(sem, 2, 2);
161 
162 	ret = wait_any(fd, 1, &sem, 123, &index);
163 	EXPECT_EQ(0, ret);
164 	EXPECT_EQ(0, index);
165 	check_sem_state(sem, 1, 2);
166 
167 	ret = wait_any(fd, 1, &sem, 123, &index);
168 	EXPECT_EQ(0, ret);
169 	EXPECT_EQ(0, index);
170 	check_sem_state(sem, 0, 2);
171 
172 	ret = wait_any(fd, 1, &sem, 123, &index);
173 	EXPECT_EQ(-1, ret);
174 	EXPECT_EQ(ETIMEDOUT, errno);
175 
176 	count = 3;
177 	ret = release_sem(sem, &count);
178 	EXPECT_EQ(-1, ret);
179 	EXPECT_EQ(EOVERFLOW, errno);
180 	check_sem_state(sem, 0, 2);
181 
182 	count = 2;
183 	ret = release_sem(sem, &count);
184 	EXPECT_EQ(0, ret);
185 	EXPECT_EQ(0, count);
186 	check_sem_state(sem, 2, 2);
187 
188 	ret = wait_any(fd, 1, &sem, 123, &index);
189 	EXPECT_EQ(0, ret);
190 	ret = wait_any(fd, 1, &sem, 123, &index);
191 	EXPECT_EQ(0, ret);
192 
193 	count = 1;
194 	ret = release_sem(sem, &count);
195 	EXPECT_EQ(0, ret);
196 	EXPECT_EQ(0, count);
197 	check_sem_state(sem, 1, 2);
198 
199 	count = ~0u;
200 	ret = release_sem(sem, &count);
201 	EXPECT_EQ(-1, ret);
202 	EXPECT_EQ(EOVERFLOW, errno);
203 	check_sem_state(sem, 1, 2);
204 
205 	close(sem);
206 
207 	close(fd);
208 }
209 
210 TEST(mutex_state)
211 {
212 	struct ntsync_mutex_args mutex_args;
213 	__u32 owner, count, index;
214 	struct timespec timeout;
215 	int fd, ret, mutex;
216 
217 	clock_gettime(CLOCK_MONOTONIC, &timeout);
218 
219 	fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
220 	ASSERT_LE(0, fd);
221 
222 	mutex_args.owner = 123;
223 	mutex_args.count = 0;
224 	mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
225 	EXPECT_EQ(-1, mutex);
226 	EXPECT_EQ(EINVAL, errno);
227 
228 	mutex_args.owner = 0;
229 	mutex_args.count = 2;
230 	mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
231 	EXPECT_EQ(-1, mutex);
232 	EXPECT_EQ(EINVAL, errno);
233 
234 	mutex_args.owner = 123;
235 	mutex_args.count = 2;
236 	mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
237 	EXPECT_LE(0, mutex);
238 	check_mutex_state(mutex, 2, 123);
239 
240 	ret = unlock_mutex(mutex, 0, &count);
241 	EXPECT_EQ(-1, ret);
242 	EXPECT_EQ(EINVAL, errno);
243 
244 	ret = unlock_mutex(mutex, 456, &count);
245 	EXPECT_EQ(-1, ret);
246 	EXPECT_EQ(EPERM, errno);
247 	check_mutex_state(mutex, 2, 123);
248 
249 	ret = unlock_mutex(mutex, 123, &count);
250 	EXPECT_EQ(0, ret);
251 	EXPECT_EQ(2, count);
252 	check_mutex_state(mutex, 1, 123);
253 
254 	ret = unlock_mutex(mutex, 123, &count);
255 	EXPECT_EQ(0, ret);
256 	EXPECT_EQ(1, count);
257 	check_mutex_state(mutex, 0, 0);
258 
259 	ret = unlock_mutex(mutex, 123, &count);
260 	EXPECT_EQ(-1, ret);
261 	EXPECT_EQ(EPERM, errno);
262 
263 	ret = wait_any(fd, 1, &mutex, 456, &index);
264 	EXPECT_EQ(0, ret);
265 	EXPECT_EQ(0, index);
266 	check_mutex_state(mutex, 1, 456);
267 
268 	ret = wait_any(fd, 1, &mutex, 456, &index);
269 	EXPECT_EQ(0, ret);
270 	EXPECT_EQ(0, index);
271 	check_mutex_state(mutex, 2, 456);
272 
273 	ret = unlock_mutex(mutex, 456, &count);
274 	EXPECT_EQ(0, ret);
275 	EXPECT_EQ(2, count);
276 	check_mutex_state(mutex, 1, 456);
277 
278 	ret = wait_any(fd, 1, &mutex, 123, &index);
279 	EXPECT_EQ(-1, ret);
280 	EXPECT_EQ(ETIMEDOUT, errno);
281 
282 	owner = 0;
283 	ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
284 	EXPECT_EQ(-1, ret);
285 	EXPECT_EQ(EINVAL, errno);
286 
287 	owner = 123;
288 	ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
289 	EXPECT_EQ(-1, ret);
290 	EXPECT_EQ(EPERM, errno);
291 	check_mutex_state(mutex, 1, 456);
292 
293 	owner = 456;
294 	ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
295 	EXPECT_EQ(0, ret);
296 
297 	memset(&mutex_args, 0xcc, sizeof(mutex_args));
298 	ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &mutex_args);
299 	EXPECT_EQ(-1, ret);
300 	EXPECT_EQ(EOWNERDEAD, errno);
301 	EXPECT_EQ(0, mutex_args.count);
302 	EXPECT_EQ(0, mutex_args.owner);
303 
304 	memset(&mutex_args, 0xcc, sizeof(mutex_args));
305 	ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &mutex_args);
306 	EXPECT_EQ(-1, ret);
307 	EXPECT_EQ(EOWNERDEAD, errno);
308 	EXPECT_EQ(0, mutex_args.count);
309 	EXPECT_EQ(0, mutex_args.owner);
310 
311 	ret = wait_any(fd, 1, &mutex, 123, &index);
312 	EXPECT_EQ(-1, ret);
313 	EXPECT_EQ(EOWNERDEAD, errno);
314 	EXPECT_EQ(0, index);
315 	check_mutex_state(mutex, 1, 123);
316 
317 	owner = 123;
318 	ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
319 	EXPECT_EQ(0, ret);
320 
321 	memset(&mutex_args, 0xcc, sizeof(mutex_args));
322 	ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &mutex_args);
323 	EXPECT_EQ(-1, ret);
324 	EXPECT_EQ(EOWNERDEAD, errno);
325 	EXPECT_EQ(0, mutex_args.count);
326 	EXPECT_EQ(0, mutex_args.owner);
327 
328 	ret = wait_any(fd, 1, &mutex, 123, &index);
329 	EXPECT_EQ(-1, ret);
330 	EXPECT_EQ(EOWNERDEAD, errno);
331 	EXPECT_EQ(0, index);
332 	check_mutex_state(mutex, 1, 123);
333 
334 	close(mutex);
335 
336 	mutex_args.owner = 0;
337 	mutex_args.count = 0;
338 	mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
339 	EXPECT_LE(0, mutex);
340 	check_mutex_state(mutex, 0, 0);
341 
342 	ret = wait_any(fd, 1, &mutex, 123, &index);
343 	EXPECT_EQ(0, ret);
344 	EXPECT_EQ(0, index);
345 	check_mutex_state(mutex, 1, 123);
346 
347 	close(mutex);
348 
349 	mutex_args.owner = 123;
350 	mutex_args.count = ~0u;
351 	mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
352 	EXPECT_LE(0, mutex);
353 	check_mutex_state(mutex, ~0u, 123);
354 
355 	ret = wait_any(fd, 1, &mutex, 123, &index);
356 	EXPECT_EQ(-1, ret);
357 	EXPECT_EQ(ETIMEDOUT, errno);
358 
359 	close(mutex);
360 
361 	close(fd);
362 }
363 
364 TEST(manual_event_state)
365 {
366 	struct ntsync_event_args event_args;
367 	__u32 index, signaled;
368 	int fd, event, ret;
369 
370 	fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
371 	ASSERT_LE(0, fd);
372 
373 	event_args.manual = 1;
374 	event_args.signaled = 0;
375 	event = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
376 	EXPECT_LE(0, event);
377 	check_event_state(event, 0, 1);
378 
379 	signaled = 0xdeadbeef;
380 	ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
381 	EXPECT_EQ(0, ret);
382 	EXPECT_EQ(0, signaled);
383 	check_event_state(event, 1, 1);
384 
385 	ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
386 	EXPECT_EQ(0, ret);
387 	EXPECT_EQ(1, signaled);
388 	check_event_state(event, 1, 1);
389 
390 	ret = wait_any(fd, 1, &event, 123, &index);
391 	EXPECT_EQ(0, ret);
392 	EXPECT_EQ(0, index);
393 	check_event_state(event, 1, 1);
394 
395 	signaled = 0xdeadbeef;
396 	ret = ioctl(event, NTSYNC_IOC_EVENT_RESET, &signaled);
397 	EXPECT_EQ(0, ret);
398 	EXPECT_EQ(1, signaled);
399 	check_event_state(event, 0, 1);
400 
401 	ret = ioctl(event, NTSYNC_IOC_EVENT_RESET, &signaled);
402 	EXPECT_EQ(0, ret);
403 	EXPECT_EQ(0, signaled);
404 	check_event_state(event, 0, 1);
405 
406 	ret = wait_any(fd, 1, &event, 123, &index);
407 	EXPECT_EQ(-1, ret);
408 	EXPECT_EQ(ETIMEDOUT, errno);
409 
410 	ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
411 	EXPECT_EQ(0, ret);
412 	EXPECT_EQ(0, signaled);
413 
414 	ret = ioctl(event, NTSYNC_IOC_EVENT_PULSE, &signaled);
415 	EXPECT_EQ(0, ret);
416 	EXPECT_EQ(1, signaled);
417 	check_event_state(event, 0, 1);
418 
419 	ret = ioctl(event, NTSYNC_IOC_EVENT_PULSE, &signaled);
420 	EXPECT_EQ(0, ret);
421 	EXPECT_EQ(0, signaled);
422 	check_event_state(event, 0, 1);
423 
424 	close(event);
425 
426 	close(fd);
427 }
428 
429 TEST(auto_event_state)
430 {
431 	struct ntsync_event_args event_args;
432 	__u32 index, signaled;
433 	int fd, event, ret;
434 
435 	fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
436 	ASSERT_LE(0, fd);
437 
438 	event_args.manual = 0;
439 	event_args.signaled = 1;
440 	event = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
441 	EXPECT_LE(0, event);
442 
443 	check_event_state(event, 1, 0);
444 
445 	signaled = 0xdeadbeef;
446 	ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
447 	EXPECT_EQ(0, ret);
448 	EXPECT_EQ(1, signaled);
449 	check_event_state(event, 1, 0);
450 
451 	ret = wait_any(fd, 1, &event, 123, &index);
452 	EXPECT_EQ(0, ret);
453 	EXPECT_EQ(0, index);
454 	check_event_state(event, 0, 0);
455 
456 	signaled = 0xdeadbeef;
457 	ret = ioctl(event, NTSYNC_IOC_EVENT_RESET, &signaled);
458 	EXPECT_EQ(0, ret);
459 	EXPECT_EQ(0, signaled);
460 	check_event_state(event, 0, 0);
461 
462 	ret = wait_any(fd, 1, &event, 123, &index);
463 	EXPECT_EQ(-1, ret);
464 	EXPECT_EQ(ETIMEDOUT, errno);
465 
466 	ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
467 	EXPECT_EQ(0, ret);
468 	EXPECT_EQ(0, signaled);
469 
470 	ret = ioctl(event, NTSYNC_IOC_EVENT_PULSE, &signaled);
471 	EXPECT_EQ(0, ret);
472 	EXPECT_EQ(1, signaled);
473 	check_event_state(event, 0, 0);
474 
475 	ret = ioctl(event, NTSYNC_IOC_EVENT_PULSE, &signaled);
476 	EXPECT_EQ(0, ret);
477 	EXPECT_EQ(0, signaled);
478 	check_event_state(event, 0, 0);
479 
480 	close(event);
481 
482 	close(fd);
483 }
484 
485 TEST(test_wait_any)
486 {
487 	int objs[NTSYNC_MAX_WAIT_COUNT + 1], fd, ret;
488 	struct ntsync_mutex_args mutex_args = {0};
489 	struct ntsync_sem_args sem_args = {0};
490 	__u32 owner, index, count, i;
491 	struct timespec timeout;
492 
493 	clock_gettime(CLOCK_MONOTONIC, &timeout);
494 
495 	fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
496 	ASSERT_LE(0, fd);
497 
498 	sem_args.count = 2;
499 	sem_args.max = 3;
500 	objs[0] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
501 	EXPECT_LE(0, objs[0]);
502 
503 	mutex_args.owner = 0;
504 	mutex_args.count = 0;
505 	objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
506 	EXPECT_LE(0, objs[1]);
507 
508 	ret = wait_any(fd, 2, objs, 123, &index);
509 	EXPECT_EQ(0, ret);
510 	EXPECT_EQ(0, index);
511 	check_sem_state(objs[0], 1, 3);
512 	check_mutex_state(objs[1], 0, 0);
513 
514 	ret = wait_any(fd, 2, objs, 123, &index);
515 	EXPECT_EQ(0, ret);
516 	EXPECT_EQ(0, index);
517 	check_sem_state(objs[0], 0, 3);
518 	check_mutex_state(objs[1], 0, 0);
519 
520 	ret = wait_any(fd, 2, objs, 123, &index);
521 	EXPECT_EQ(0, ret);
522 	EXPECT_EQ(1, index);
523 	check_sem_state(objs[0], 0, 3);
524 	check_mutex_state(objs[1], 1, 123);
525 
526 	count = 1;
527 	ret = release_sem(objs[0], &count);
528 	EXPECT_EQ(0, ret);
529 	EXPECT_EQ(0, count);
530 
531 	ret = wait_any(fd, 2, objs, 123, &index);
532 	EXPECT_EQ(0, ret);
533 	EXPECT_EQ(0, index);
534 	check_sem_state(objs[0], 0, 3);
535 	check_mutex_state(objs[1], 1, 123);
536 
537 	ret = wait_any(fd, 2, objs, 123, &index);
538 	EXPECT_EQ(0, ret);
539 	EXPECT_EQ(1, index);
540 	check_sem_state(objs[0], 0, 3);
541 	check_mutex_state(objs[1], 2, 123);
542 
543 	ret = wait_any(fd, 2, objs, 456, &index);
544 	EXPECT_EQ(-1, ret);
545 	EXPECT_EQ(ETIMEDOUT, errno);
546 
547 	owner = 123;
548 	ret = ioctl(objs[1], NTSYNC_IOC_MUTEX_KILL, &owner);
549 	EXPECT_EQ(0, ret);
550 
551 	ret = wait_any(fd, 2, objs, 456, &index);
552 	EXPECT_EQ(-1, ret);
553 	EXPECT_EQ(EOWNERDEAD, errno);
554 	EXPECT_EQ(1, index);
555 
556 	ret = wait_any(fd, 2, objs, 456, &index);
557 	EXPECT_EQ(0, ret);
558 	EXPECT_EQ(1, index);
559 
560 	close(objs[1]);
561 
562 	/* test waiting on the same object twice */
563 
564 	count = 2;
565 	ret = release_sem(objs[0], &count);
566 	EXPECT_EQ(0, ret);
567 	EXPECT_EQ(0, count);
568 
569 	objs[1] = objs[0];
570 	ret = wait_any(fd, 2, objs, 456, &index);
571 	EXPECT_EQ(0, ret);
572 	EXPECT_EQ(0, index);
573 	check_sem_state(objs[0], 1, 3);
574 
575 	ret = wait_any(fd, 0, NULL, 456, &index);
576 	EXPECT_EQ(-1, ret);
577 	EXPECT_EQ(ETIMEDOUT, errno);
578 
579 	for (i = 1; i < NTSYNC_MAX_WAIT_COUNT + 1; ++i)
580 		objs[i] = objs[0];
581 
582 	ret = wait_any(fd, NTSYNC_MAX_WAIT_COUNT, objs, 123, &index);
583 	EXPECT_EQ(0, ret);
584 	EXPECT_EQ(0, index);
585 
586 	ret = wait_any(fd, NTSYNC_MAX_WAIT_COUNT + 1, objs, 123, &index);
587 	EXPECT_EQ(-1, ret);
588 	EXPECT_EQ(EINVAL, errno);
589 
590 	ret = wait_any(fd, -1, objs, 123, &index);
591 	EXPECT_EQ(-1, ret);
592 	EXPECT_EQ(EINVAL, errno);
593 
594 	close(objs[0]);
595 
596 	close(fd);
597 }
598 
599 TEST(test_wait_all)
600 {
601 	struct ntsync_mutex_args mutex_args = {0};
602 	struct ntsync_sem_args sem_args = {0};
603 	__u32 owner, index, count;
604 	int objs[2], fd, ret;
605 
606 	fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
607 	ASSERT_LE(0, fd);
608 
609 	sem_args.count = 2;
610 	sem_args.max = 3;
611 	objs[0] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
612 	EXPECT_LE(0, objs[0]);
613 
614 	mutex_args.owner = 0;
615 	mutex_args.count = 0;
616 	objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
617 	EXPECT_LE(0, objs[1]);
618 
619 	ret = wait_all(fd, 2, objs, 123, &index);
620 	EXPECT_EQ(0, ret);
621 	EXPECT_EQ(0, index);
622 	check_sem_state(objs[0], 1, 3);
623 	check_mutex_state(objs[1], 1, 123);
624 
625 	ret = wait_all(fd, 2, objs, 456, &index);
626 	EXPECT_EQ(-1, ret);
627 	EXPECT_EQ(ETIMEDOUT, errno);
628 	check_sem_state(objs[0], 1, 3);
629 	check_mutex_state(objs[1], 1, 123);
630 
631 	ret = wait_all(fd, 2, objs, 123, &index);
632 	EXPECT_EQ(0, ret);
633 	EXPECT_EQ(0, index);
634 	check_sem_state(objs[0], 0, 3);
635 	check_mutex_state(objs[1], 2, 123);
636 
637 	ret = wait_all(fd, 2, objs, 123, &index);
638 	EXPECT_EQ(-1, ret);
639 	EXPECT_EQ(ETIMEDOUT, errno);
640 	check_sem_state(objs[0], 0, 3);
641 	check_mutex_state(objs[1], 2, 123);
642 
643 	count = 3;
644 	ret = release_sem(objs[0], &count);
645 	EXPECT_EQ(0, ret);
646 	EXPECT_EQ(0, count);
647 
648 	ret = wait_all(fd, 2, objs, 123, &index);
649 	EXPECT_EQ(0, ret);
650 	EXPECT_EQ(0, index);
651 	check_sem_state(objs[0], 2, 3);
652 	check_mutex_state(objs[1], 3, 123);
653 
654 	owner = 123;
655 	ret = ioctl(objs[1], NTSYNC_IOC_MUTEX_KILL, &owner);
656 	EXPECT_EQ(0, ret);
657 
658 	ret = wait_all(fd, 2, objs, 123, &index);
659 	EXPECT_EQ(-1, ret);
660 	EXPECT_EQ(EOWNERDEAD, errno);
661 	check_sem_state(objs[0], 1, 3);
662 	check_mutex_state(objs[1], 1, 123);
663 
664 	close(objs[1]);
665 
666 	/* test waiting on the same object twice */
667 	objs[1] = objs[0];
668 	ret = wait_all(fd, 2, objs, 123, &index);
669 	EXPECT_EQ(-1, ret);
670 	EXPECT_EQ(EINVAL, errno);
671 
672 	close(objs[0]);
673 
674 	close(fd);
675 }
676 
677 struct wake_args {
678 	int fd;
679 	int obj;
680 };
681 
682 struct wait_args {
683 	int fd;
684 	unsigned long request;
685 	struct ntsync_wait_args *args;
686 	int ret;
687 	int err;
688 };
689 
690 static void *wait_thread(void *arg)
691 {
692 	struct wait_args *args = arg;
693 
694 	args->ret = ioctl(args->fd, args->request, args->args);
695 	args->err = errno;
696 	return NULL;
697 }
698 
699 static __u64 get_abs_timeout(unsigned int ms)
700 {
701 	struct timespec timeout;
702 	clock_gettime(CLOCK_MONOTONIC, &timeout);
703 	return (timeout.tv_sec * 1000000000) + timeout.tv_nsec + (ms * 1000000);
704 }
705 
706 static int wait_for_thread(pthread_t thread, unsigned int ms)
707 {
708 	struct timespec timeout;
709 
710 	clock_gettime(CLOCK_REALTIME, &timeout);
711 	timeout.tv_nsec += ms * 1000000;
712 	timeout.tv_sec += (timeout.tv_nsec / 1000000000);
713 	timeout.tv_nsec %= 1000000000;
714 	return pthread_timedjoin_np(thread, NULL, &timeout);
715 }
716 
717 TEST(wake_any)
718 {
719 	struct ntsync_mutex_args mutex_args = {0};
720 	struct ntsync_wait_args wait_args = {0};
721 	struct ntsync_sem_args sem_args = {0};
722 	struct wait_args thread_args;
723 	int objs[2], fd, ret;
724 	__u32 count, index;
725 	pthread_t thread;
726 
727 	fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
728 	ASSERT_LE(0, fd);
729 
730 	sem_args.count = 0;
731 	sem_args.max = 3;
732 	objs[0] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
733 	EXPECT_LE(0, objs[0]);
734 
735 	mutex_args.owner = 123;
736 	mutex_args.count = 1;
737 	objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
738 	EXPECT_LE(0, objs[1]);
739 
740 	/* test waking the semaphore */
741 
742 	wait_args.timeout = get_abs_timeout(1000);
743 	wait_args.objs = (uintptr_t)objs;
744 	wait_args.count = 2;
745 	wait_args.owner = 456;
746 	wait_args.index = 0xdeadbeef;
747 	thread_args.fd = fd;
748 	thread_args.args = &wait_args;
749 	thread_args.request = NTSYNC_IOC_WAIT_ANY;
750 	ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
751 	EXPECT_EQ(0, ret);
752 
753 	ret = wait_for_thread(thread, 100);
754 	EXPECT_EQ(ETIMEDOUT, ret);
755 
756 	count = 1;
757 	ret = release_sem(objs[0], &count);
758 	EXPECT_EQ(0, ret);
759 	EXPECT_EQ(0, count);
760 	check_sem_state(objs[0], 0, 3);
761 
762 	ret = wait_for_thread(thread, 100);
763 	EXPECT_EQ(0, ret);
764 	EXPECT_EQ(0, thread_args.ret);
765 	EXPECT_EQ(0, wait_args.index);
766 
767 	/* test waking the mutex */
768 
769 	/* first grab it again for owner 123 */
770 	ret = wait_any(fd, 1, &objs[1], 123, &index);
771 	EXPECT_EQ(0, ret);
772 	EXPECT_EQ(0, index);
773 
774 	wait_args.timeout = get_abs_timeout(1000);
775 	wait_args.owner = 456;
776 	ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
777 	EXPECT_EQ(0, ret);
778 
779 	ret = wait_for_thread(thread, 100);
780 	EXPECT_EQ(ETIMEDOUT, ret);
781 
782 	ret = unlock_mutex(objs[1], 123, &count);
783 	EXPECT_EQ(0, ret);
784 	EXPECT_EQ(2, count);
785 
786 	ret = pthread_tryjoin_np(thread, NULL);
787 	EXPECT_EQ(EBUSY, ret);
788 
789 	ret = unlock_mutex(objs[1], 123, &count);
790 	EXPECT_EQ(0, ret);
791 	EXPECT_EQ(1, mutex_args.count);
792 	check_mutex_state(objs[1], 1, 456);
793 
794 	ret = wait_for_thread(thread, 100);
795 	EXPECT_EQ(0, ret);
796 	EXPECT_EQ(0, thread_args.ret);
797 	EXPECT_EQ(1, wait_args.index);
798 
799 	/* delete an object while it's being waited on */
800 
801 	wait_args.timeout = get_abs_timeout(200);
802 	wait_args.owner = 123;
803 	ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
804 	EXPECT_EQ(0, ret);
805 
806 	ret = wait_for_thread(thread, 100);
807 	EXPECT_EQ(ETIMEDOUT, ret);
808 
809 	close(objs[0]);
810 	close(objs[1]);
811 
812 	ret = wait_for_thread(thread, 200);
813 	EXPECT_EQ(0, ret);
814 	EXPECT_EQ(-1, thread_args.ret);
815 	EXPECT_EQ(ETIMEDOUT, thread_args.err);
816 
817 	close(fd);
818 }
819 
820 TEST(wake_all)
821 {
822 	struct ntsync_mutex_args mutex_args = {0};
823 	struct ntsync_wait_args wait_args = {0};
824 	struct ntsync_sem_args sem_args = {0};
825 	struct wait_args thread_args;
826 	int objs[2], fd, ret;
827 	__u32 count, index;
828 	pthread_t thread;
829 
830 	fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
831 	ASSERT_LE(0, fd);
832 
833 	sem_args.count = 0;
834 	sem_args.max = 3;
835 	objs[0] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
836 	EXPECT_LE(0, objs[0]);
837 
838 	mutex_args.owner = 123;
839 	mutex_args.count = 1;
840 	objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
841 	EXPECT_LE(0, objs[1]);
842 
843 	wait_args.timeout = get_abs_timeout(1000);
844 	wait_args.objs = (uintptr_t)objs;
845 	wait_args.count = 2;
846 	wait_args.owner = 456;
847 	thread_args.fd = fd;
848 	thread_args.args = &wait_args;
849 	thread_args.request = NTSYNC_IOC_WAIT_ALL;
850 	ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
851 	EXPECT_EQ(0, ret);
852 
853 	ret = wait_for_thread(thread, 100);
854 	EXPECT_EQ(ETIMEDOUT, ret);
855 
856 	count = 1;
857 	ret = release_sem(objs[0], &count);
858 	EXPECT_EQ(0, ret);
859 	EXPECT_EQ(0, count);
860 
861 	ret = pthread_tryjoin_np(thread, NULL);
862 	EXPECT_EQ(EBUSY, ret);
863 
864 	check_sem_state(objs[0], 1, 3);
865 
866 	ret = wait_any(fd, 1, &objs[0], 123, &index);
867 	EXPECT_EQ(0, ret);
868 	EXPECT_EQ(0, index);
869 
870 	ret = unlock_mutex(objs[1], 123, &count);
871 	EXPECT_EQ(0, ret);
872 	EXPECT_EQ(1, count);
873 
874 	ret = pthread_tryjoin_np(thread, NULL);
875 	EXPECT_EQ(EBUSY, ret);
876 
877 	check_mutex_state(objs[1], 0, 0);
878 
879 	count = 2;
880 	ret = release_sem(objs[0], &count);
881 	EXPECT_EQ(0, ret);
882 	EXPECT_EQ(0, count);
883 	check_sem_state(objs[0], 1, 3);
884 	check_mutex_state(objs[1], 1, 456);
885 
886 	ret = wait_for_thread(thread, 100);
887 	EXPECT_EQ(0, ret);
888 	EXPECT_EQ(0, thread_args.ret);
889 
890 	/* delete an object while it's being waited on */
891 
892 	wait_args.timeout = get_abs_timeout(200);
893 	wait_args.owner = 123;
894 	ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
895 	EXPECT_EQ(0, ret);
896 
897 	ret = wait_for_thread(thread, 100);
898 	EXPECT_EQ(ETIMEDOUT, ret);
899 
900 	close(objs[0]);
901 	close(objs[1]);
902 
903 	ret = wait_for_thread(thread, 200);
904 	EXPECT_EQ(0, ret);
905 	EXPECT_EQ(-1, thread_args.ret);
906 	EXPECT_EQ(ETIMEDOUT, thread_args.err);
907 
908 	close(fd);
909 }
910 
911 TEST_HARNESS_MAIN
912