xref: /linux-6.15/include/linux/percpu_counter.h (revision a115bc07)
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5  *
6  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
7  */
8 
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15 
16 #ifdef CONFIG_SMP
17 
18 struct percpu_counter {
19 	spinlock_t lock;
20 	s64 count;
21 #ifdef CONFIG_HOTPLUG_CPU
22 	struct list_head list;	/* All percpu_counters are on a list */
23 #endif
24 	s32 __percpu *counters;
25 };
26 
27 extern int percpu_counter_batch;
28 
29 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
30 			  struct lock_class_key *key);
31 
32 #define percpu_counter_init(fbc, value)					\
33 	({								\
34 		static struct lock_class_key __key;			\
35 									\
36 		__percpu_counter_init(fbc, value, &__key);		\
37 	})
38 
39 void percpu_counter_destroy(struct percpu_counter *fbc);
40 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
41 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
42 s64 __percpu_counter_sum(struct percpu_counter *fbc);
43 
44 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
45 {
46 	__percpu_counter_add(fbc, amount, percpu_counter_batch);
47 }
48 
49 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
50 {
51 	s64 ret = __percpu_counter_sum(fbc);
52 	return ret < 0 ? 0 : ret;
53 }
54 
55 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
56 {
57 	return __percpu_counter_sum(fbc);
58 }
59 
60 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
61 {
62 	return fbc->count;
63 }
64 
65 /*
66  * It is possible for the percpu_counter_read() to return a small negative
67  * number for some counter which should never be negative.
68  *
69  */
70 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
71 {
72 	s64 ret = fbc->count;
73 
74 	barrier();		/* Prevent reloads of fbc->count */
75 	if (ret >= 0)
76 		return ret;
77 	return 1;
78 }
79 
80 #else
81 
82 struct percpu_counter {
83 	s64 count;
84 };
85 
86 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
87 {
88 	fbc->count = amount;
89 	return 0;
90 }
91 
92 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
93 {
94 }
95 
96 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
97 {
98 	fbc->count = amount;
99 }
100 
101 static inline void
102 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
103 {
104 	preempt_disable();
105 	fbc->count += amount;
106 	preempt_enable();
107 }
108 
109 static inline void
110 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
111 {
112 	percpu_counter_add(fbc, amount);
113 }
114 
115 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
116 {
117 	return fbc->count;
118 }
119 
120 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
121 {
122 	return fbc->count;
123 }
124 
125 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
126 {
127 	return percpu_counter_read_positive(fbc);
128 }
129 
130 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
131 {
132 	return percpu_counter_read(fbc);
133 }
134 
135 #endif	/* CONFIG_SMP */
136 
137 static inline void percpu_counter_inc(struct percpu_counter *fbc)
138 {
139 	percpu_counter_add(fbc, 1);
140 }
141 
142 static inline void percpu_counter_dec(struct percpu_counter *fbc)
143 {
144 	percpu_counter_add(fbc, -1);
145 }
146 
147 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
148 {
149 	percpu_counter_add(fbc, -amount);
150 }
151 
152 #endif /* _LINUX_PERCPU_COUNTER_H */
153