xref: /linux-6.15/include/linux/percpu-rwsem.h (revision 6fa79bca)
1 #ifndef _LINUX_PERCPU_RWSEM_H
2 #define _LINUX_PERCPU_RWSEM_H
3 
4 #include <linux/mutex.h>
5 #include <linux/percpu.h>
6 #include <linux/rcupdate.h>
7 #include <linux/delay.h>
8 
9 struct percpu_rw_semaphore {
10 	unsigned __percpu *counters;
11 	bool locked;
12 	struct mutex mtx;
13 };
14 
15 #define light_mb()	barrier()
16 #define heavy_mb()	synchronize_sched()
17 
18 static inline void percpu_down_read(struct percpu_rw_semaphore *p)
19 {
20 	rcu_read_lock_sched();
21 	if (unlikely(p->locked)) {
22 		rcu_read_unlock_sched();
23 		mutex_lock(&p->mtx);
24 		this_cpu_inc(*p->counters);
25 		mutex_unlock(&p->mtx);
26 		return;
27 	}
28 	this_cpu_inc(*p->counters);
29 	rcu_read_unlock_sched();
30 	light_mb(); /* A, between read of p->locked and read of data, paired with D */
31 }
32 
33 static inline void percpu_up_read(struct percpu_rw_semaphore *p)
34 {
35 	light_mb(); /* B, between read of the data and write to p->counter, paired with C */
36 	this_cpu_dec(*p->counters);
37 }
38 
39 static inline unsigned __percpu_count(unsigned __percpu *counters)
40 {
41 	unsigned total = 0;
42 	int cpu;
43 
44 	for_each_possible_cpu(cpu)
45 		total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
46 
47 	return total;
48 }
49 
50 static inline void percpu_down_write(struct percpu_rw_semaphore *p)
51 {
52 	mutex_lock(&p->mtx);
53 	p->locked = true;
54 	synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */
55 	while (__percpu_count(p->counters))
56 		msleep(1);
57 	heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
58 }
59 
60 static inline void percpu_up_write(struct percpu_rw_semaphore *p)
61 {
62 	heavy_mb(); /* D, between write to data and write to p->locked, paired with A */
63 	p->locked = false;
64 	mutex_unlock(&p->mtx);
65 }
66 
67 static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
68 {
69 	p->counters = alloc_percpu(unsigned);
70 	if (unlikely(!p->counters))
71 		return -ENOMEM;
72 	p->locked = false;
73 	mutex_init(&p->mtx);
74 	return 0;
75 }
76 
77 static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
78 {
79 	free_percpu(p->counters);
80 	p->counters = NULL; /* catch use after free bugs */
81 }
82 
83 #endif
84