1 #ifndef _LINUX_PERCPU_COUNTER_H 2 #define _LINUX_PERCPU_COUNTER_H 3 /* 4 * A simple "approximate counter" for use in ext2 and ext3 superblocks. 5 * 6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. 7 */ 8 9 #include <linux/spinlock.h> 10 #include <linux/smp.h> 11 #include <linux/list.h> 12 #include <linux/threads.h> 13 #include <linux/percpu.h> 14 #include <linux/types.h> 15 16 #ifdef CONFIG_SMP 17 18 struct percpu_counter { 19 spinlock_t lock; 20 s64 count; 21 #ifdef CONFIG_HOTPLUG_CPU 22 struct list_head list; /* All percpu_counters are on a list */ 23 #endif 24 s32 __percpu *counters; 25 }; 26 27 extern int percpu_counter_batch; 28 29 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 30 struct lock_class_key *key); 31 32 #define percpu_counter_init(fbc, value) \ 33 ({ \ 34 static struct lock_class_key __key; \ 35 \ 36 __percpu_counter_init(fbc, value, &__key); \ 37 }) 38 39 void percpu_counter_destroy(struct percpu_counter *fbc); 40 void percpu_counter_set(struct percpu_counter *fbc, s64 amount); 41 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); 42 s64 __percpu_counter_sum(struct percpu_counter *fbc); 43 int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); 44 45 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) 46 { 47 __percpu_counter_add(fbc, amount, percpu_counter_batch); 48 } 49 50 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 51 { 52 s64 ret = __percpu_counter_sum(fbc); 53 return ret < 0 ? 0 : ret; 54 } 55 56 static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 57 { 58 return __percpu_counter_sum(fbc); 59 } 60 61 static inline s64 percpu_counter_read(struct percpu_counter *fbc) 62 { 63 return fbc->count; 64 } 65 66 /* 67 * It is possible for the percpu_counter_read() to return a small negative 68 * number for some counter which should never be negative. 69 * 70 */ 71 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 72 { 73 s64 ret = fbc->count; 74 75 barrier(); /* Prevent reloads of fbc->count */ 76 if (ret >= 0) 77 return ret; 78 return 1; 79 } 80 81 #else 82 83 struct percpu_counter { 84 s64 count; 85 }; 86 87 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) 88 { 89 fbc->count = amount; 90 return 0; 91 } 92 93 static inline void percpu_counter_destroy(struct percpu_counter *fbc) 94 { 95 } 96 97 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) 98 { 99 fbc->count = amount; 100 } 101 102 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 103 { 104 if (fbc->count > rhs) 105 return 1; 106 else if (fbc->count < rhs) 107 return -1; 108 else 109 return 0; 110 } 111 112 static inline void 113 percpu_counter_add(struct percpu_counter *fbc, s64 amount) 114 { 115 preempt_disable(); 116 fbc->count += amount; 117 preempt_enable(); 118 } 119 120 static inline void 121 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) 122 { 123 percpu_counter_add(fbc, amount); 124 } 125 126 static inline s64 percpu_counter_read(struct percpu_counter *fbc) 127 { 128 return fbc->count; 129 } 130 131 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 132 { 133 return fbc->count; 134 } 135 136 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 137 { 138 return percpu_counter_read_positive(fbc); 139 } 140 141 static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 142 { 143 return percpu_counter_read(fbc); 144 } 145 146 #endif /* CONFIG_SMP */ 147 148 static inline void percpu_counter_inc(struct percpu_counter *fbc) 149 { 150 percpu_counter_add(fbc, 1); 151 } 152 153 static inline void percpu_counter_dec(struct percpu_counter *fbc) 154 { 155 percpu_counter_add(fbc, -1); 156 } 157 158 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) 159 { 160 percpu_counter_add(fbc, -amount); 161 } 162 163 #endif /* _LINUX_PERCPU_COUNTER_H */ 164