xref: /linux-6.15/include/linux/call_once.h (revision 916b7f42)
1931656b9SKeith Busch #ifndef _LINUX_CALL_ONCE_H
2931656b9SKeith Busch #define _LINUX_CALL_ONCE_H
3931656b9SKeith Busch 
4931656b9SKeith Busch #include <linux/types.h>
5931656b9SKeith Busch #include <linux/mutex.h>
6931656b9SKeith Busch 
7931656b9SKeith Busch #define ONCE_NOT_STARTED 0
8931656b9SKeith Busch #define ONCE_RUNNING     1
9931656b9SKeith Busch #define ONCE_COMPLETED   2
10931656b9SKeith Busch 
11931656b9SKeith Busch struct once {
12931656b9SKeith Busch         atomic_t state;
13931656b9SKeith Busch         struct mutex lock;
14931656b9SKeith Busch };
15931656b9SKeith Busch 
__once_init(struct once * once,const char * name,struct lock_class_key * key)16931656b9SKeith Busch static inline void __once_init(struct once *once, const char *name,
17931656b9SKeith Busch 			       struct lock_class_key *key)
18931656b9SKeith Busch {
19931656b9SKeith Busch         atomic_set(&once->state, ONCE_NOT_STARTED);
20931656b9SKeith Busch         __mutex_init(&once->lock, name, key);
21931656b9SKeith Busch }
22931656b9SKeith Busch 
23931656b9SKeith Busch #define once_init(once)							\
24931656b9SKeith Busch do {									\
25931656b9SKeith Busch 	static struct lock_class_key __key;				\
26931656b9SKeith Busch 	__once_init((once), #once, &__key);				\
27931656b9SKeith Busch } while (0)
28931656b9SKeith Busch 
29*916b7f42SKeith Busch /*
30*916b7f42SKeith Busch  * call_once - Ensure a function has been called exactly once
31*916b7f42SKeith Busch  *
32*916b7f42SKeith Busch  * @once: Tracking struct
33*916b7f42SKeith Busch  * @cb: Function to be called
34*916b7f42SKeith Busch  *
35*916b7f42SKeith Busch  * If @once has never completed successfully before, call @cb and, if
36*916b7f42SKeith Busch  * it returns a zero or positive value, mark @once as completed.  Return
37*916b7f42SKeith Busch  * the value returned by @cb
38*916b7f42SKeith Busch  *
39*916b7f42SKeith Busch  * If @once has completed succesfully before, return 0.
40*916b7f42SKeith Busch  *
41*916b7f42SKeith Busch  * The call to @cb is implicitly surrounded by a mutex, though for
42*916b7f42SKeith Busch  * efficiency the * function avoids taking it after the first call.
43*916b7f42SKeith Busch  */
call_once(struct once * once,int (* cb)(struct once *))44*916b7f42SKeith Busch static inline int call_once(struct once *once, int (*cb)(struct once *))
45931656b9SKeith Busch {
46*916b7f42SKeith Busch 	int r, state;
47*916b7f42SKeith Busch 
48931656b9SKeith Busch 	/* Pairs with atomic_set_release() below.  */
49931656b9SKeith Busch 	if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
50*916b7f42SKeith Busch 		return 0;
51931656b9SKeith Busch 
52931656b9SKeith Busch 	guard(mutex)(&once->lock);
53*916b7f42SKeith Busch 	state = atomic_read(&once->state);
54*916b7f42SKeith Busch 	if (unlikely(state != ONCE_NOT_STARTED))
55*916b7f42SKeith Busch 		return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0;
56931656b9SKeith Busch 
57931656b9SKeith Busch 	atomic_set(&once->state, ONCE_RUNNING);
58*916b7f42SKeith Busch 	r = cb(once);
59*916b7f42SKeith Busch 	if (r < 0)
60*916b7f42SKeith Busch 		atomic_set(&once->state, ONCE_NOT_STARTED);
61*916b7f42SKeith Busch 	else
62931656b9SKeith Busch 		atomic_set_release(&once->state, ONCE_COMPLETED);
63*916b7f42SKeith Busch 	return r;
64931656b9SKeith Busch }
65931656b9SKeith Busch 
66931656b9SKeith Busch #endif /* _LINUX_CALL_ONCE_H */
67