1 #ifndef _LINUX_VMSTAT_H 2 #define _LINUX_VMSTAT_H 3 4 #include <linux/types.h> 5 #include <linux/percpu.h> 6 #include <linux/mm.h> 7 #include <linux/mmzone.h> 8 #include <asm/atomic.h> 9 10 #ifdef CONFIG_ZONE_DMA 11 #define DMA_ZONE(xx) xx##_DMA, 12 #else 13 #define DMA_ZONE(xx) 14 #endif 15 16 #ifdef CONFIG_ZONE_DMA32 17 #define DMA32_ZONE(xx) xx##_DMA32, 18 #else 19 #define DMA32_ZONE(xx) 20 #endif 21 22 #ifdef CONFIG_HIGHMEM 23 #define HIGHMEM_ZONE(xx) , xx##_HIGH 24 #else 25 #define HIGHMEM_ZONE(xx) 26 #endif 27 28 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE 29 30 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 31 FOR_ALL_ZONES(PGALLOC), 32 PGFREE, PGACTIVATE, PGDEACTIVATE, 33 PGFAULT, PGMAJFAULT, 34 FOR_ALL_ZONES(PGREFILL), 35 FOR_ALL_ZONES(PGSTEAL), 36 FOR_ALL_ZONES(PGSCAN_KSWAPD), 37 FOR_ALL_ZONES(PGSCAN_DIRECT), 38 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 39 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 40 NR_VM_EVENT_ITEMS 41 }; 42 43 #ifdef CONFIG_VM_EVENT_COUNTERS 44 /* 45 * Light weight per cpu counter implementation. 46 * 47 * Counters should only be incremented and no critical kernel component 48 * should rely on the counter values. 49 * 50 * Counters are handled completely inline. On many platforms the code 51 * generated will simply be the increment of a global address. 52 */ 53 54 struct vm_event_state { 55 unsigned long event[NR_VM_EVENT_ITEMS]; 56 }; 57 58 DECLARE_PER_CPU(struct vm_event_state, vm_event_states); 59 60 static inline void __count_vm_event(enum vm_event_item item) 61 { 62 __get_cpu_var(vm_event_states).event[item]++; 63 } 64 65 static inline void count_vm_event(enum vm_event_item item) 66 { 67 get_cpu_var(vm_event_states).event[item]++; 68 put_cpu(); 69 } 70 71 static inline void __count_vm_events(enum vm_event_item item, long delta) 72 { 73 __get_cpu_var(vm_event_states).event[item] += delta; 74 } 75 76 static inline void count_vm_events(enum vm_event_item item, long delta) 77 { 78 get_cpu_var(vm_event_states).event[item] += delta; 79 put_cpu(); 80 } 81 82 extern void all_vm_events(unsigned long *); 83 #ifdef CONFIG_HOTPLUG 84 extern void vm_events_fold_cpu(int cpu); 85 #else 86 static inline void vm_events_fold_cpu(int cpu) 87 { 88 } 89 #endif 90 91 #else 92 93 /* Disable counters */ 94 static inline void count_vm_event(enum vm_event_item item) 95 { 96 } 97 static inline void count_vm_events(enum vm_event_item item, long delta) 98 { 99 } 100 static inline void __count_vm_event(enum vm_event_item item) 101 { 102 } 103 static inline void __count_vm_events(enum vm_event_item item, long delta) 104 { 105 } 106 static inline void all_vm_events(unsigned long *ret) 107 { 108 } 109 static inline void vm_events_fold_cpu(int cpu) 110 { 111 } 112 113 #endif /* CONFIG_VM_EVENT_COUNTERS */ 114 115 #define __count_zone_vm_events(item, zone, delta) \ 116 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ 117 zone_idx(zone), delta) 118 119 /* 120 * Zone based page accounting with per cpu differentials. 121 */ 122 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 123 124 static inline void zone_page_state_add(long x, struct zone *zone, 125 enum zone_stat_item item) 126 { 127 atomic_long_add(x, &zone->vm_stat[item]); 128 atomic_long_add(x, &vm_stat[item]); 129 } 130 131 static inline unsigned long global_page_state(enum zone_stat_item item) 132 { 133 long x = atomic_long_read(&vm_stat[item]); 134 #ifdef CONFIG_SMP 135 if (x < 0) 136 x = 0; 137 #endif 138 return x; 139 } 140 141 static inline unsigned long zone_page_state(struct zone *zone, 142 enum zone_stat_item item) 143 { 144 long x = atomic_long_read(&zone->vm_stat[item]); 145 #ifdef CONFIG_SMP 146 if (x < 0) 147 x = 0; 148 #endif 149 return x; 150 } 151 152 #ifdef CONFIG_NUMA 153 /* 154 * Determine the per node value of a stat item. This function 155 * is called frequently in a NUMA machine, so try to be as 156 * frugal as possible. 157 */ 158 static inline unsigned long node_page_state(int node, 159 enum zone_stat_item item) 160 { 161 struct zone *zones = NODE_DATA(node)->node_zones; 162 163 return 164 #ifdef CONFIG_ZONE_DMA 165 zone_page_state(&zones[ZONE_DMA], item) + 166 #endif 167 #ifdef CONFIG_ZONE_DMA32 168 zone_page_state(&zones[ZONE_DMA32], item) + 169 #endif 170 #ifdef CONFIG_HIGHMEM 171 zone_page_state(&zones[ZONE_HIGHMEM], item) + 172 #endif 173 zone_page_state(&zones[ZONE_NORMAL], item) + 174 zone_page_state(&zones[ZONE_MOVABLE], item); 175 } 176 177 extern void zone_statistics(struct zonelist *, struct zone *); 178 179 #else 180 181 #define node_page_state(node, item) global_page_state(item) 182 #define zone_statistics(_zl,_z) do { } while (0) 183 184 #endif /* CONFIG_NUMA */ 185 186 #define __add_zone_page_state(__z, __i, __d) \ 187 __mod_zone_page_state(__z, __i, __d) 188 #define __sub_zone_page_state(__z, __i, __d) \ 189 __mod_zone_page_state(__z, __i,-(__d)) 190 191 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) 192 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 193 194 static inline void zap_zone_vm_stats(struct zone *zone) 195 { 196 memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); 197 } 198 199 extern void inc_zone_state(struct zone *, enum zone_stat_item); 200 201 #ifdef CONFIG_SMP 202 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); 203 void __inc_zone_page_state(struct page *, enum zone_stat_item); 204 void __dec_zone_page_state(struct page *, enum zone_stat_item); 205 206 void mod_zone_page_state(struct zone *, enum zone_stat_item, int); 207 void inc_zone_page_state(struct page *, enum zone_stat_item); 208 void dec_zone_page_state(struct page *, enum zone_stat_item); 209 210 extern void inc_zone_state(struct zone *, enum zone_stat_item); 211 extern void __inc_zone_state(struct zone *, enum zone_stat_item); 212 extern void dec_zone_state(struct zone *, enum zone_stat_item); 213 extern void __dec_zone_state(struct zone *, enum zone_stat_item); 214 215 void refresh_cpu_vm_stats(int); 216 #else /* CONFIG_SMP */ 217 218 /* 219 * We do not maintain differentials in a single processor configuration. 220 * The functions directly modify the zone and global counters. 221 */ 222 static inline void __mod_zone_page_state(struct zone *zone, 223 enum zone_stat_item item, int delta) 224 { 225 zone_page_state_add(delta, zone, item); 226 } 227 228 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 229 { 230 atomic_long_inc(&zone->vm_stat[item]); 231 atomic_long_inc(&vm_stat[item]); 232 } 233 234 static inline void __inc_zone_page_state(struct page *page, 235 enum zone_stat_item item) 236 { 237 __inc_zone_state(page_zone(page), item); 238 } 239 240 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 241 { 242 atomic_long_dec(&zone->vm_stat[item]); 243 atomic_long_dec(&vm_stat[item]); 244 } 245 246 static inline void __dec_zone_page_state(struct page *page, 247 enum zone_stat_item item) 248 { 249 atomic_long_dec(&page_zone(page)->vm_stat[item]); 250 atomic_long_dec(&vm_stat[item]); 251 } 252 253 /* 254 * We only use atomic operations to update counters. So there is no need to 255 * disable interrupts. 256 */ 257 #define inc_zone_page_state __inc_zone_page_state 258 #define dec_zone_page_state __dec_zone_page_state 259 #define mod_zone_page_state __mod_zone_page_state 260 261 static inline void refresh_cpu_vm_stats(int cpu) { } 262 #endif 263 264 #endif /* _LINUX_VMSTAT_H */ 265