xref: /linux-6.15/include/linux/percpu_counter.h (revision 179f7ebf)
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5  *
6  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
7  */
8 
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15 
16 #ifdef CONFIG_SMP
17 
18 struct percpu_counter {
19 	spinlock_t lock;
20 	s64 count;
21 #ifdef CONFIG_HOTPLUG_CPU
22 	struct list_head list;	/* All percpu_counters are on a list */
23 #endif
24 	s32 *counters;
25 };
26 
27 extern int percpu_counter_batch;
28 
29 int percpu_counter_init(struct percpu_counter *fbc, s64 amount);
30 int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
31 void percpu_counter_destroy(struct percpu_counter *fbc);
32 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
33 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
34 s64 __percpu_counter_sum(struct percpu_counter *fbc);
35 
36 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
37 {
38 	__percpu_counter_add(fbc, amount, percpu_counter_batch);
39 }
40 
41 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
42 {
43 	s64 ret = __percpu_counter_sum(fbc);
44 	return ret < 0 ? 0 : ret;
45 }
46 
47 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
48 {
49 	return __percpu_counter_sum(fbc);
50 }
51 
52 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
53 {
54 	return fbc->count;
55 }
56 
57 /*
58  * It is possible for the percpu_counter_read() to return a small negative
59  * number for some counter which should never be negative.
60  *
61  */
62 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
63 {
64 	s64 ret = fbc->count;
65 
66 	barrier();		/* Prevent reloads of fbc->count */
67 	if (ret >= 0)
68 		return ret;
69 	return 1;
70 }
71 
72 #else
73 
74 struct percpu_counter {
75 	s64 count;
76 };
77 
78 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
79 {
80 	fbc->count = amount;
81 	return 0;
82 }
83 
84 #define percpu_counter_init_irq percpu_counter_init
85 
86 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
87 {
88 }
89 
90 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
91 {
92 	fbc->count = amount;
93 }
94 
95 #define __percpu_counter_add(fbc, amount, batch) \
96 	percpu_counter_add(fbc, amount)
97 
98 static inline void
99 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
100 {
101 	preempt_disable();
102 	fbc->count += amount;
103 	preempt_enable();
104 }
105 
106 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
107 {
108 	return fbc->count;
109 }
110 
111 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
112 {
113 	return fbc->count;
114 }
115 
116 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
117 {
118 	return percpu_counter_read_positive(fbc);
119 }
120 
121 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
122 {
123 	return percpu_counter_read(fbc);
124 }
125 
126 #endif	/* CONFIG_SMP */
127 
128 static inline void percpu_counter_inc(struct percpu_counter *fbc)
129 {
130 	percpu_counter_add(fbc, 1);
131 }
132 
133 static inline void percpu_counter_dec(struct percpu_counter *fbc)
134 {
135 	percpu_counter_add(fbc, -1);
136 }
137 
138 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
139 {
140 	percpu_counter_add(fbc, -amount);
141 }
142 
143 #endif /* _LINUX_PERCPU_COUNTER_H */
144