1 #ifndef _LINUX_VMSTAT_H 2 #define _LINUX_VMSTAT_H 3 4 #include <linux/types.h> 5 #include <linux/percpu.h> 6 #include <linux/mmzone.h> 7 #include <asm/atomic.h> 8 9 #ifdef CONFIG_VM_EVENT_COUNTERS 10 /* 11 * Light weight per cpu counter implementation. 12 * 13 * Counters should only be incremented. You need to set EMBEDDED 14 * to disable VM_EVENT_COUNTERS. Things like procps (vmstat, 15 * top, etc) use /proc/vmstat and depend on these counters. 16 * 17 * Counters are handled completely inline. On many platforms the code 18 * generated will simply be the increment of a global address. 19 */ 20 21 #ifdef CONFIG_ZONE_DMA32 22 #define DMA32_ZONE(xx) xx##_DMA32, 23 #else 24 #define DMA32_ZONE(xx) 25 #endif 26 27 #ifdef CONFIG_HIGHMEM 28 #define HIGHMEM_ZONE(xx) , xx##_HIGH 29 #else 30 #define HIGHMEM_ZONE(xx) 31 #endif 32 33 #define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) 34 35 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 36 FOR_ALL_ZONES(PGALLOC), 37 PGFREE, PGACTIVATE, PGDEACTIVATE, 38 PGFAULT, PGMAJFAULT, 39 FOR_ALL_ZONES(PGREFILL), 40 FOR_ALL_ZONES(PGSTEAL), 41 FOR_ALL_ZONES(PGSCAN_KSWAPD), 42 FOR_ALL_ZONES(PGSCAN_DIRECT), 43 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 44 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 45 NR_VM_EVENT_ITEMS 46 }; 47 48 struct vm_event_state { 49 unsigned long event[NR_VM_EVENT_ITEMS]; 50 }; 51 52 DECLARE_PER_CPU(struct vm_event_state, vm_event_states); 53 54 static inline void __count_vm_event(enum vm_event_item item) 55 { 56 __get_cpu_var(vm_event_states).event[item]++; 57 } 58 59 static inline void count_vm_event(enum vm_event_item item) 60 { 61 get_cpu_var(vm_event_states).event[item]++; 62 put_cpu(); 63 } 64 65 static inline void __count_vm_events(enum vm_event_item item, long delta) 66 { 67 __get_cpu_var(vm_event_states).event[item] += delta; 68 } 69 70 static inline void count_vm_events(enum vm_event_item item, long delta) 71 { 72 get_cpu_var(vm_event_states).event[item] += delta; 73 put_cpu(); 74 } 75 76 extern void all_vm_events(unsigned long *); 77 #ifdef CONFIG_HOTPLUG 78 extern void vm_events_fold_cpu(int cpu); 79 #else 80 static inline void vm_events_fold_cpu(int cpu) 81 { 82 } 83 #endif 84 85 #else 86 87 /* Disable counters */ 88 #define get_cpu_vm_events(e) 0L 89 #define count_vm_event(e) do { } while (0) 90 #define count_vm_events(e,d) do { } while (0) 91 #define __count_vm_event(e) do { } while (0) 92 #define __count_vm_events(e,d) do { } while (0) 93 #define vm_events_fold_cpu(x) do { } while (0) 94 95 #endif /* CONFIG_VM_EVENT_COUNTERS */ 96 97 #define __count_zone_vm_events(item, zone, delta) \ 98 __count_vm_events(item##_DMA + zone_idx(zone), delta) 99 100 /* 101 * Zone based page accounting with per cpu differentials. 102 */ 103 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 104 105 static inline void zone_page_state_add(long x, struct zone *zone, 106 enum zone_stat_item item) 107 { 108 atomic_long_add(x, &zone->vm_stat[item]); 109 atomic_long_add(x, &vm_stat[item]); 110 } 111 112 static inline unsigned long global_page_state(enum zone_stat_item item) 113 { 114 long x = atomic_long_read(&vm_stat[item]); 115 #ifdef CONFIG_SMP 116 if (x < 0) 117 x = 0; 118 #endif 119 return x; 120 } 121 122 static inline unsigned long zone_page_state(struct zone *zone, 123 enum zone_stat_item item) 124 { 125 long x = atomic_long_read(&zone->vm_stat[item]); 126 #ifdef CONFIG_SMP 127 if (x < 0) 128 x = 0; 129 #endif 130 return x; 131 } 132 133 #ifdef CONFIG_NUMA 134 /* 135 * Determine the per node value of a stat item. This function 136 * is called frequently in a NUMA machine, so try to be as 137 * frugal as possible. 138 */ 139 static inline unsigned long node_page_state(int node, 140 enum zone_stat_item item) 141 { 142 struct zone *zones = NODE_DATA(node)->node_zones; 143 144 return 145 #ifdef CONFIG_ZONE_DMA32 146 zone_page_state(&zones[ZONE_DMA32], item) + 147 #endif 148 zone_page_state(&zones[ZONE_NORMAL], item) + 149 #ifdef CONFIG_HIGHMEM 150 zone_page_state(&zones[ZONE_HIGHMEM], item) + 151 #endif 152 zone_page_state(&zones[ZONE_DMA], item); 153 } 154 155 extern void zone_statistics(struct zonelist *, struct zone *); 156 157 #else 158 159 #define node_page_state(node, item) global_page_state(item) 160 #define zone_statistics(_zl,_z) do { } while (0) 161 162 #endif /* CONFIG_NUMA */ 163 164 #define __add_zone_page_state(__z, __i, __d) \ 165 __mod_zone_page_state(__z, __i, __d) 166 #define __sub_zone_page_state(__z, __i, __d) \ 167 __mod_zone_page_state(__z, __i,-(__d)) 168 169 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) 170 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 171 172 static inline void zap_zone_vm_stats(struct zone *zone) 173 { 174 memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); 175 } 176 177 extern void inc_zone_state(struct zone *, enum zone_stat_item); 178 179 #ifdef CONFIG_SMP 180 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); 181 void __inc_zone_page_state(struct page *, enum zone_stat_item); 182 void __dec_zone_page_state(struct page *, enum zone_stat_item); 183 184 void mod_zone_page_state(struct zone *, enum zone_stat_item, int); 185 void inc_zone_page_state(struct page *, enum zone_stat_item); 186 void dec_zone_page_state(struct page *, enum zone_stat_item); 187 188 extern void inc_zone_state(struct zone *, enum zone_stat_item); 189 190 void refresh_cpu_vm_stats(int); 191 void refresh_vm_stats(void); 192 193 #else /* CONFIG_SMP */ 194 195 /* 196 * We do not maintain differentials in a single processor configuration. 197 * The functions directly modify the zone and global counters. 198 */ 199 static inline void __mod_zone_page_state(struct zone *zone, 200 enum zone_stat_item item, int delta) 201 { 202 zone_page_state_add(delta, zone, item); 203 } 204 205 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 206 { 207 atomic_long_inc(&zone->vm_stat[item]); 208 atomic_long_inc(&vm_stat[item]); 209 } 210 211 static inline void __inc_zone_page_state(struct page *page, 212 enum zone_stat_item item) 213 { 214 __inc_zone_state(page_zone(page), item); 215 } 216 217 static inline void __dec_zone_page_state(struct page *page, 218 enum zone_stat_item item) 219 { 220 atomic_long_dec(&page_zone(page)->vm_stat[item]); 221 atomic_long_dec(&vm_stat[item]); 222 } 223 224 /* 225 * We only use atomic operations to update counters. So there is no need to 226 * disable interrupts. 227 */ 228 #define inc_zone_page_state __inc_zone_page_state 229 #define dec_zone_page_state __dec_zone_page_state 230 #define mod_zone_page_state __mod_zone_page_state 231 232 static inline void refresh_cpu_vm_stats(int cpu) { } 233 static inline void refresh_vm_stats(void) { } 234 #endif 235 236 #endif /* _LINUX_VMSTAT_H */ 237