1 #ifndef _LINUX_PERCPU_COUNTER_H 2 #define _LINUX_PERCPU_COUNTER_H 3 /* 4 * A simple "approximate counter" for use in ext2 and ext3 superblocks. 5 * 6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. 7 */ 8 9 #include <linux/spinlock.h> 10 #include <linux/smp.h> 11 #include <linux/list.h> 12 #include <linux/threads.h> 13 #include <linux/percpu.h> 14 #include <linux/types.h> 15 #include <linux/gfp.h> 16 17 #ifdef CONFIG_SMP 18 19 struct percpu_counter { 20 raw_spinlock_t lock; 21 s64 count; 22 #ifdef CONFIG_HOTPLUG_CPU 23 struct list_head list; /* All percpu_counters are on a list */ 24 #endif 25 s32 __percpu *counters; 26 }; 27 28 extern int percpu_counter_batch; 29 30 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, 31 struct lock_class_key *key); 32 33 #define percpu_counter_init(fbc, value, gfp) \ 34 ({ \ 35 static struct lock_class_key __key; \ 36 \ 37 __percpu_counter_init(fbc, value, gfp, &__key); \ 38 }) 39 40 void percpu_counter_destroy(struct percpu_counter *fbc); 41 void percpu_counter_set(struct percpu_counter *fbc, s64 amount); 42 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, 43 s32 batch); 44 s64 __percpu_counter_sum(struct percpu_counter *fbc); 45 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); 46 47 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 48 { 49 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); 50 } 51 52 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) 53 { 54 percpu_counter_add_batch(fbc, amount, percpu_counter_batch); 55 } 56 57 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 58 { 59 s64 ret = __percpu_counter_sum(fbc); 60 return ret < 0 ? 0 : ret; 61 } 62 63 static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 64 { 65 return __percpu_counter_sum(fbc); 66 } 67 68 static inline s64 percpu_counter_read(struct percpu_counter *fbc) 69 { 70 return fbc->count; 71 } 72 73 /* 74 * It is possible for the percpu_counter_read() to return a small negative 75 * number for some counter which should never be negative. 76 * 77 */ 78 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 79 { 80 s64 ret = fbc->count; 81 82 barrier(); /* Prevent reloads of fbc->count */ 83 if (ret >= 0) 84 return ret; 85 return 0; 86 } 87 88 static inline int percpu_counter_initialized(struct percpu_counter *fbc) 89 { 90 return (fbc->counters != NULL); 91 } 92 93 #else /* !CONFIG_SMP */ 94 95 struct percpu_counter { 96 s64 count; 97 }; 98 99 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, 100 gfp_t gfp) 101 { 102 fbc->count = amount; 103 return 0; 104 } 105 106 static inline void percpu_counter_destroy(struct percpu_counter *fbc) 107 { 108 } 109 110 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) 111 { 112 fbc->count = amount; 113 } 114 115 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 116 { 117 if (fbc->count > rhs) 118 return 1; 119 else if (fbc->count < rhs) 120 return -1; 121 else 122 return 0; 123 } 124 125 static inline int 126 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) 127 { 128 return percpu_counter_compare(fbc, rhs); 129 } 130 131 static inline void 132 percpu_counter_add(struct percpu_counter *fbc, s64 amount) 133 { 134 preempt_disable(); 135 fbc->count += amount; 136 preempt_enable(); 137 } 138 139 static inline void 140 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) 141 { 142 percpu_counter_add(fbc, amount); 143 } 144 145 static inline s64 percpu_counter_read(struct percpu_counter *fbc) 146 { 147 return fbc->count; 148 } 149 150 /* 151 * percpu_counter is intended to track positive numbers. In the UP case the 152 * number should never be negative. 153 */ 154 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 155 { 156 return fbc->count; 157 } 158 159 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 160 { 161 return percpu_counter_read_positive(fbc); 162 } 163 164 static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 165 { 166 return percpu_counter_read(fbc); 167 } 168 169 static inline int percpu_counter_initialized(struct percpu_counter *fbc) 170 { 171 return 1; 172 } 173 174 #endif /* CONFIG_SMP */ 175 176 static inline void percpu_counter_inc(struct percpu_counter *fbc) 177 { 178 percpu_counter_add(fbc, 1); 179 } 180 181 static inline void percpu_counter_dec(struct percpu_counter *fbc) 182 { 183 percpu_counter_add(fbc, -1); 184 } 185 186 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) 187 { 188 percpu_counter_add(fbc, -amount); 189 } 190 191 #endif /* _LINUX_PERCPU_COUNTER_H */ 192