1 #ifndef _LINUX_WAIT_H 2 #define _LINUX_WAIT_H 3 4 5 #include <linux/list.h> 6 #include <linux/stddef.h> 7 #include <linux/spinlock.h> 8 #include <asm/current.h> 9 #include <uapi/linux/wait.h> 10 11 typedef struct __wait_queue wait_queue_t; 12 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); 13 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); 14 15 struct __wait_queue { 16 unsigned int flags; 17 #define WQ_FLAG_EXCLUSIVE 0x01 18 void *private; 19 wait_queue_func_t func; 20 struct list_head task_list; 21 }; 22 23 struct wait_bit_key { 24 void *flags; 25 int bit_nr; 26 }; 27 28 struct wait_bit_queue { 29 struct wait_bit_key key; 30 wait_queue_t wait; 31 }; 32 33 struct __wait_queue_head { 34 spinlock_t lock; 35 struct list_head task_list; 36 }; 37 typedef struct __wait_queue_head wait_queue_head_t; 38 39 struct task_struct; 40 41 /* 42 * Macros for declaration and initialisaton of the datatypes 43 */ 44 45 #define __WAITQUEUE_INITIALIZER(name, tsk) { \ 46 .private = tsk, \ 47 .func = default_wake_function, \ 48 .task_list = { NULL, NULL } } 49 50 #define DECLARE_WAITQUEUE(name, tsk) \ 51 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) 52 53 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ 54 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 55 .task_list = { &(name).task_list, &(name).task_list } } 56 57 #define DECLARE_WAIT_QUEUE_HEAD(name) \ 58 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) 59 60 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 61 { .flags = word, .bit_nr = bit, } 62 63 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); 64 65 #define init_waitqueue_head(q) \ 66 do { \ 67 static struct lock_class_key __key; \ 68 \ 69 __init_waitqueue_head((q), #q, &__key); \ 70 } while (0) 71 72 #ifdef CONFIG_LOCKDEP 73 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ 74 ({ init_waitqueue_head(&name); name; }) 75 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ 76 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) 77 #else 78 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) 79 #endif 80 81 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) 82 { 83 q->flags = 0; 84 q->private = p; 85 q->func = default_wake_function; 86 } 87 88 static inline void init_waitqueue_func_entry(wait_queue_t *q, 89 wait_queue_func_t func) 90 { 91 q->flags = 0; 92 q->private = NULL; 93 q->func = func; 94 } 95 96 static inline int waitqueue_active(wait_queue_head_t *q) 97 { 98 return !list_empty(&q->task_list); 99 } 100 101 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 102 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); 103 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 104 105 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) 106 { 107 list_add(&new->task_list, &head->task_list); 108 } 109 110 /* 111 * Used for wake-one threads: 112 */ 113 static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, 114 wait_queue_t *wait) 115 { 116 wait->flags |= WQ_FLAG_EXCLUSIVE; 117 __add_wait_queue(q, wait); 118 } 119 120 static inline void __add_wait_queue_tail(wait_queue_head_t *head, 121 wait_queue_t *new) 122 { 123 list_add_tail(&new->task_list, &head->task_list); 124 } 125 126 static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, 127 wait_queue_t *wait) 128 { 129 wait->flags |= WQ_FLAG_EXCLUSIVE; 130 __add_wait_queue_tail(q, wait); 131 } 132 133 static inline void __remove_wait_queue(wait_queue_head_t *head, 134 wait_queue_t *old) 135 { 136 list_del(&old->task_list); 137 } 138 139 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 140 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 141 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, 142 void *key); 143 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 144 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 145 void __wake_up_bit(wait_queue_head_t *, void *, int); 146 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 147 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 148 void wake_up_bit(void *, int); 149 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); 150 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); 151 wait_queue_head_t *bit_waitqueue(void *, int); 152 153 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) 154 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) 155 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) 156 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) 157 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) 158 159 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) 160 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) 161 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) 162 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) 163 164 /* 165 * Wakeup macros to be used to report events to the targets. 166 */ 167 #define wake_up_poll(x, m) \ 168 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 169 #define wake_up_locked_poll(x, m) \ 170 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) 171 #define wake_up_interruptible_poll(x, m) \ 172 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 173 #define wake_up_interruptible_sync_poll(x, m) \ 174 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) 175 176 #define __wait_event(wq, condition) \ 177 do { \ 178 DEFINE_WAIT(__wait); \ 179 \ 180 for (;;) { \ 181 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 182 if (condition) \ 183 break; \ 184 schedule(); \ 185 } \ 186 finish_wait(&wq, &__wait); \ 187 } while (0) 188 189 /** 190 * wait_event - sleep until a condition gets true 191 * @wq: the waitqueue to wait on 192 * @condition: a C expression for the event to wait for 193 * 194 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 195 * @condition evaluates to true. The @condition is checked each time 196 * the waitqueue @wq is woken up. 197 * 198 * wake_up() has to be called after changing any variable that could 199 * change the result of the wait condition. 200 */ 201 #define wait_event(wq, condition) \ 202 do { \ 203 if (condition) \ 204 break; \ 205 __wait_event(wq, condition); \ 206 } while (0) 207 208 #define __wait_event_timeout(wq, condition, ret) \ 209 do { \ 210 DEFINE_WAIT(__wait); \ 211 \ 212 for (;;) { \ 213 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 214 if (condition) \ 215 break; \ 216 ret = schedule_timeout(ret); \ 217 if (!ret) \ 218 break; \ 219 } \ 220 finish_wait(&wq, &__wait); \ 221 } while (0) 222 223 /** 224 * wait_event_timeout - sleep until a condition gets true or a timeout elapses 225 * @wq: the waitqueue to wait on 226 * @condition: a C expression for the event to wait for 227 * @timeout: timeout, in jiffies 228 * 229 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 230 * @condition evaluates to true. The @condition is checked each time 231 * the waitqueue @wq is woken up. 232 * 233 * wake_up() has to be called after changing any variable that could 234 * change the result of the wait condition. 235 * 236 * The function returns 0 if the @timeout elapsed, and the remaining 237 * jiffies if the condition evaluated to true before the timeout elapsed. 238 */ 239 #define wait_event_timeout(wq, condition, timeout) \ 240 ({ \ 241 long __ret = timeout; \ 242 if (!(condition)) \ 243 __wait_event_timeout(wq, condition, __ret); \ 244 __ret; \ 245 }) 246 247 #define __wait_event_interruptible(wq, condition, ret) \ 248 do { \ 249 DEFINE_WAIT(__wait); \ 250 \ 251 for (;;) { \ 252 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 253 if (condition) \ 254 break; \ 255 if (!signal_pending(current)) { \ 256 schedule(); \ 257 continue; \ 258 } \ 259 ret = -ERESTARTSYS; \ 260 break; \ 261 } \ 262 finish_wait(&wq, &__wait); \ 263 } while (0) 264 265 /** 266 * wait_event_interruptible - sleep until a condition gets true 267 * @wq: the waitqueue to wait on 268 * @condition: a C expression for the event to wait for 269 * 270 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 271 * @condition evaluates to true or a signal is received. 272 * The @condition is checked each time the waitqueue @wq is woken up. 273 * 274 * wake_up() has to be called after changing any variable that could 275 * change the result of the wait condition. 276 * 277 * The function will return -ERESTARTSYS if it was interrupted by a 278 * signal and 0 if @condition evaluated to true. 279 */ 280 #define wait_event_interruptible(wq, condition) \ 281 ({ \ 282 int __ret = 0; \ 283 if (!(condition)) \ 284 __wait_event_interruptible(wq, condition, __ret); \ 285 __ret; \ 286 }) 287 288 #define __wait_event_interruptible_timeout(wq, condition, ret) \ 289 do { \ 290 DEFINE_WAIT(__wait); \ 291 \ 292 for (;;) { \ 293 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 294 if (condition) \ 295 break; \ 296 if (!signal_pending(current)) { \ 297 ret = schedule_timeout(ret); \ 298 if (!ret) \ 299 break; \ 300 continue; \ 301 } \ 302 ret = -ERESTARTSYS; \ 303 break; \ 304 } \ 305 finish_wait(&wq, &__wait); \ 306 } while (0) 307 308 /** 309 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses 310 * @wq: the waitqueue to wait on 311 * @condition: a C expression for the event to wait for 312 * @timeout: timeout, in jiffies 313 * 314 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 315 * @condition evaluates to true or a signal is received. 316 * The @condition is checked each time the waitqueue @wq is woken up. 317 * 318 * wake_up() has to be called after changing any variable that could 319 * change the result of the wait condition. 320 * 321 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it 322 * was interrupted by a signal, and the remaining jiffies otherwise 323 * if the condition evaluated to true before the timeout elapsed. 324 */ 325 #define wait_event_interruptible_timeout(wq, condition, timeout) \ 326 ({ \ 327 long __ret = timeout; \ 328 if (!(condition)) \ 329 __wait_event_interruptible_timeout(wq, condition, __ret); \ 330 __ret; \ 331 }) 332 333 #define __wait_event_interruptible_exclusive(wq, condition, ret) \ 334 do { \ 335 DEFINE_WAIT(__wait); \ 336 \ 337 for (;;) { \ 338 prepare_to_wait_exclusive(&wq, &__wait, \ 339 TASK_INTERRUPTIBLE); \ 340 if (condition) { \ 341 finish_wait(&wq, &__wait); \ 342 break; \ 343 } \ 344 if (!signal_pending(current)) { \ 345 schedule(); \ 346 continue; \ 347 } \ 348 ret = -ERESTARTSYS; \ 349 abort_exclusive_wait(&wq, &__wait, \ 350 TASK_INTERRUPTIBLE, NULL); \ 351 break; \ 352 } \ 353 } while (0) 354 355 #define wait_event_interruptible_exclusive(wq, condition) \ 356 ({ \ 357 int __ret = 0; \ 358 if (!(condition)) \ 359 __wait_event_interruptible_exclusive(wq, condition, __ret);\ 360 __ret; \ 361 }) 362 363 364 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ 365 ({ \ 366 int __ret = 0; \ 367 DEFINE_WAIT(__wait); \ 368 if (exclusive) \ 369 __wait.flags |= WQ_FLAG_EXCLUSIVE; \ 370 do { \ 371 if (likely(list_empty(&__wait.task_list))) \ 372 __add_wait_queue_tail(&(wq), &__wait); \ 373 set_current_state(TASK_INTERRUPTIBLE); \ 374 if (signal_pending(current)) { \ 375 __ret = -ERESTARTSYS; \ 376 break; \ 377 } \ 378 if (irq) \ 379 spin_unlock_irq(&(wq).lock); \ 380 else \ 381 spin_unlock(&(wq).lock); \ 382 schedule(); \ 383 if (irq) \ 384 spin_lock_irq(&(wq).lock); \ 385 else \ 386 spin_lock(&(wq).lock); \ 387 } while (!(condition)); \ 388 __remove_wait_queue(&(wq), &__wait); \ 389 __set_current_state(TASK_RUNNING); \ 390 __ret; \ 391 }) 392 393 394 /** 395 * wait_event_interruptible_locked - sleep until a condition gets true 396 * @wq: the waitqueue to wait on 397 * @condition: a C expression for the event to wait for 398 * 399 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 400 * @condition evaluates to true or a signal is received. 401 * The @condition is checked each time the waitqueue @wq is woken up. 402 * 403 * It must be called with wq.lock being held. This spinlock is 404 * unlocked while sleeping but @condition testing is done while lock 405 * is held and when this macro exits the lock is held. 406 * 407 * The lock is locked/unlocked using spin_lock()/spin_unlock() 408 * functions which must match the way they are locked/unlocked outside 409 * of this macro. 410 * 411 * wake_up_locked() has to be called after changing any variable that could 412 * change the result of the wait condition. 413 * 414 * The function will return -ERESTARTSYS if it was interrupted by a 415 * signal and 0 if @condition evaluated to true. 416 */ 417 #define wait_event_interruptible_locked(wq, condition) \ 418 ((condition) \ 419 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) 420 421 /** 422 * wait_event_interruptible_locked_irq - sleep until a condition gets true 423 * @wq: the waitqueue to wait on 424 * @condition: a C expression for the event to wait for 425 * 426 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 427 * @condition evaluates to true or a signal is received. 428 * The @condition is checked each time the waitqueue @wq is woken up. 429 * 430 * It must be called with wq.lock being held. This spinlock is 431 * unlocked while sleeping but @condition testing is done while lock 432 * is held and when this macro exits the lock is held. 433 * 434 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() 435 * functions which must match the way they are locked/unlocked outside 436 * of this macro. 437 * 438 * wake_up_locked() has to be called after changing any variable that could 439 * change the result of the wait condition. 440 * 441 * The function will return -ERESTARTSYS if it was interrupted by a 442 * signal and 0 if @condition evaluated to true. 443 */ 444 #define wait_event_interruptible_locked_irq(wq, condition) \ 445 ((condition) \ 446 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) 447 448 /** 449 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true 450 * @wq: the waitqueue to wait on 451 * @condition: a C expression for the event to wait for 452 * 453 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 454 * @condition evaluates to true or a signal is received. 455 * The @condition is checked each time the waitqueue @wq is woken up. 456 * 457 * It must be called with wq.lock being held. This spinlock is 458 * unlocked while sleeping but @condition testing is done while lock 459 * is held and when this macro exits the lock is held. 460 * 461 * The lock is locked/unlocked using spin_lock()/spin_unlock() 462 * functions which must match the way they are locked/unlocked outside 463 * of this macro. 464 * 465 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag 466 * set thus when other process waits process on the list if this 467 * process is awaken further processes are not considered. 468 * 469 * wake_up_locked() has to be called after changing any variable that could 470 * change the result of the wait condition. 471 * 472 * The function will return -ERESTARTSYS if it was interrupted by a 473 * signal and 0 if @condition evaluated to true. 474 */ 475 #define wait_event_interruptible_exclusive_locked(wq, condition) \ 476 ((condition) \ 477 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) 478 479 /** 480 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true 481 * @wq: the waitqueue to wait on 482 * @condition: a C expression for the event to wait for 483 * 484 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 485 * @condition evaluates to true or a signal is received. 486 * The @condition is checked each time the waitqueue @wq is woken up. 487 * 488 * It must be called with wq.lock being held. This spinlock is 489 * unlocked while sleeping but @condition testing is done while lock 490 * is held and when this macro exits the lock is held. 491 * 492 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() 493 * functions which must match the way they are locked/unlocked outside 494 * of this macro. 495 * 496 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag 497 * set thus when other process waits process on the list if this 498 * process is awaken further processes are not considered. 499 * 500 * wake_up_locked() has to be called after changing any variable that could 501 * change the result of the wait condition. 502 * 503 * The function will return -ERESTARTSYS if it was interrupted by a 504 * signal and 0 if @condition evaluated to true. 505 */ 506 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ 507 ((condition) \ 508 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) 509 510 511 512 #define __wait_event_killable(wq, condition, ret) \ 513 do { \ 514 DEFINE_WAIT(__wait); \ 515 \ 516 for (;;) { \ 517 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \ 518 if (condition) \ 519 break; \ 520 if (!fatal_signal_pending(current)) { \ 521 schedule(); \ 522 continue; \ 523 } \ 524 ret = -ERESTARTSYS; \ 525 break; \ 526 } \ 527 finish_wait(&wq, &__wait); \ 528 } while (0) 529 530 /** 531 * wait_event_killable - sleep until a condition gets true 532 * @wq: the waitqueue to wait on 533 * @condition: a C expression for the event to wait for 534 * 535 * The process is put to sleep (TASK_KILLABLE) until the 536 * @condition evaluates to true or a signal is received. 537 * The @condition is checked each time the waitqueue @wq is woken up. 538 * 539 * wake_up() has to be called after changing any variable that could 540 * change the result of the wait condition. 541 * 542 * The function will return -ERESTARTSYS if it was interrupted by a 543 * signal and 0 if @condition evaluated to true. 544 */ 545 #define wait_event_killable(wq, condition) \ 546 ({ \ 547 int __ret = 0; \ 548 if (!(condition)) \ 549 __wait_event_killable(wq, condition, __ret); \ 550 __ret; \ 551 }) 552 553 /* 554 * These are the old interfaces to sleep waiting for an event. 555 * They are racy. DO NOT use them, use the wait_event* interfaces above. 556 * We plan to remove these interfaces. 557 */ 558 extern void sleep_on(wait_queue_head_t *q); 559 extern long sleep_on_timeout(wait_queue_head_t *q, 560 signed long timeout); 561 extern void interruptible_sleep_on(wait_queue_head_t *q); 562 extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, 563 signed long timeout); 564 565 /* 566 * Waitqueues which are removed from the waitqueue_head at wakeup time 567 */ 568 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); 569 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); 570 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 571 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, 572 unsigned int mode, void *key); 573 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 574 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 575 576 #define DEFINE_WAIT_FUNC(name, function) \ 577 wait_queue_t name = { \ 578 .private = current, \ 579 .func = function, \ 580 .task_list = LIST_HEAD_INIT((name).task_list), \ 581 } 582 583 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) 584 585 #define DEFINE_WAIT_BIT(name, word, bit) \ 586 struct wait_bit_queue name = { \ 587 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ 588 .wait = { \ 589 .private = current, \ 590 .func = wake_bit_function, \ 591 .task_list = \ 592 LIST_HEAD_INIT((name).wait.task_list), \ 593 }, \ 594 } 595 596 #define init_wait(wait) \ 597 do { \ 598 (wait)->private = current; \ 599 (wait)->func = autoremove_wake_function; \ 600 INIT_LIST_HEAD(&(wait)->task_list); \ 601 (wait)->flags = 0; \ 602 } while (0) 603 604 /** 605 * wait_on_bit - wait for a bit to be cleared 606 * @word: the word being waited on, a kernel virtual address 607 * @bit: the bit of the word being waited on 608 * @action: the function used to sleep, which may take special actions 609 * @mode: the task state to sleep in 610 * 611 * There is a standard hashed waitqueue table for generic use. This 612 * is the part of the hashtable's accessor API that waits on a bit. 613 * For instance, if one were to have waiters on a bitflag, one would 614 * call wait_on_bit() in threads waiting for the bit to clear. 615 * One uses wait_on_bit() where one is waiting for the bit to clear, 616 * but has no intention of setting it. 617 */ 618 static inline int wait_on_bit(void *word, int bit, 619 int (*action)(void *), unsigned mode) 620 { 621 if (!test_bit(bit, word)) 622 return 0; 623 return out_of_line_wait_on_bit(word, bit, action, mode); 624 } 625 626 /** 627 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it 628 * @word: the word being waited on, a kernel virtual address 629 * @bit: the bit of the word being waited on 630 * @action: the function used to sleep, which may take special actions 631 * @mode: the task state to sleep in 632 * 633 * There is a standard hashed waitqueue table for generic use. This 634 * is the part of the hashtable's accessor API that waits on a bit 635 * when one intends to set it, for instance, trying to lock bitflags. 636 * For instance, if one were to have waiters trying to set bitflag 637 * and waiting for it to clear before setting it, one would call 638 * wait_on_bit() in threads waiting to be able to set the bit. 639 * One uses wait_on_bit_lock() where one is waiting for the bit to 640 * clear with the intention of setting it, and when done, clearing it. 641 */ 642 static inline int wait_on_bit_lock(void *word, int bit, 643 int (*action)(void *), unsigned mode) 644 { 645 if (!test_and_set_bit(bit, word)) 646 return 0; 647 return out_of_line_wait_on_bit_lock(word, bit, action, mode); 648 } 649 650 #endif 651