1 #ifndef _LINUX_VMSTAT_H 2 #define _LINUX_VMSTAT_H 3 4 #include <linux/types.h> 5 #include <linux/percpu.h> 6 #include <linux/mm.h> 7 #include <linux/mmzone.h> 8 #include <linux/vm_event_item.h> 9 #include <linux/atomic.h> 10 11 extern int sysctl_stat_interval; 12 13 #ifdef CONFIG_VM_EVENT_COUNTERS 14 /* 15 * Light weight per cpu counter implementation. 16 * 17 * Counters should only be incremented and no critical kernel component 18 * should rely on the counter values. 19 * 20 * Counters are handled completely inline. On many platforms the code 21 * generated will simply be the increment of a global address. 22 */ 23 24 struct vm_event_state { 25 unsigned long event[NR_VM_EVENT_ITEMS]; 26 }; 27 28 DECLARE_PER_CPU(struct vm_event_state, vm_event_states); 29 30 static inline void __count_vm_event(enum vm_event_item item) 31 { 32 __this_cpu_inc(vm_event_states.event[item]); 33 } 34 35 static inline void count_vm_event(enum vm_event_item item) 36 { 37 this_cpu_inc(vm_event_states.event[item]); 38 } 39 40 static inline void __count_vm_events(enum vm_event_item item, long delta) 41 { 42 __this_cpu_add(vm_event_states.event[item], delta); 43 } 44 45 static inline void count_vm_events(enum vm_event_item item, long delta) 46 { 47 this_cpu_add(vm_event_states.event[item], delta); 48 } 49 50 extern void all_vm_events(unsigned long *); 51 52 extern void vm_events_fold_cpu(int cpu); 53 54 #else 55 56 /* Disable counters */ 57 static inline void count_vm_event(enum vm_event_item item) 58 { 59 } 60 static inline void count_vm_events(enum vm_event_item item, long delta) 61 { 62 } 63 static inline void __count_vm_event(enum vm_event_item item) 64 { 65 } 66 static inline void __count_vm_events(enum vm_event_item item, long delta) 67 { 68 } 69 static inline void all_vm_events(unsigned long *ret) 70 { 71 } 72 static inline void vm_events_fold_cpu(int cpu) 73 { 74 } 75 76 #endif /* CONFIG_VM_EVENT_COUNTERS */ 77 78 #ifdef CONFIG_NUMA_BALANCING 79 #define count_vm_numa_event(x) count_vm_event(x) 80 #define count_vm_numa_events(x, y) count_vm_events(x, y) 81 #else 82 #define count_vm_numa_event(x) do {} while (0) 83 #define count_vm_numa_events(x, y) do { (void)(y); } while (0) 84 #endif /* CONFIG_NUMA_BALANCING */ 85 86 #ifdef CONFIG_DEBUG_TLBFLUSH 87 #define count_vm_tlb_event(x) count_vm_event(x) 88 #define count_vm_tlb_events(x, y) count_vm_events(x, y) 89 #else 90 #define count_vm_tlb_event(x) do {} while (0) 91 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) 92 #endif 93 94 #define __count_zone_vm_events(item, zone, delta) \ 95 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ 96 zone_idx(zone), delta) 97 98 /* 99 * Zone based page accounting with per cpu differentials. 100 */ 101 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 102 103 static inline void zone_page_state_add(long x, struct zone *zone, 104 enum zone_stat_item item) 105 { 106 atomic_long_add(x, &zone->vm_stat[item]); 107 atomic_long_add(x, &vm_stat[item]); 108 } 109 110 static inline unsigned long global_page_state(enum zone_stat_item item) 111 { 112 long x = atomic_long_read(&vm_stat[item]); 113 #ifdef CONFIG_SMP 114 if (x < 0) 115 x = 0; 116 #endif 117 return x; 118 } 119 120 static inline unsigned long zone_page_state(struct zone *zone, 121 enum zone_stat_item item) 122 { 123 long x = atomic_long_read(&zone->vm_stat[item]); 124 #ifdef CONFIG_SMP 125 if (x < 0) 126 x = 0; 127 #endif 128 return x; 129 } 130 131 /* 132 * More accurate version that also considers the currently pending 133 * deltas. For that we need to loop over all cpus to find the current 134 * deltas. There is no synchronization so the result cannot be 135 * exactly accurate either. 136 */ 137 static inline unsigned long zone_page_state_snapshot(struct zone *zone, 138 enum zone_stat_item item) 139 { 140 long x = atomic_long_read(&zone->vm_stat[item]); 141 142 #ifdef CONFIG_SMP 143 int cpu; 144 for_each_online_cpu(cpu) 145 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; 146 147 if (x < 0) 148 x = 0; 149 #endif 150 return x; 151 } 152 153 #ifdef CONFIG_NUMA 154 /* 155 * Determine the per node value of a stat item. This function 156 * is called frequently in a NUMA machine, so try to be as 157 * frugal as possible. 158 */ 159 static inline unsigned long node_page_state(int node, 160 enum zone_stat_item item) 161 { 162 struct zone *zones = NODE_DATA(node)->node_zones; 163 164 return 165 #ifdef CONFIG_ZONE_DMA 166 zone_page_state(&zones[ZONE_DMA], item) + 167 #endif 168 #ifdef CONFIG_ZONE_DMA32 169 zone_page_state(&zones[ZONE_DMA32], item) + 170 #endif 171 #ifdef CONFIG_HIGHMEM 172 zone_page_state(&zones[ZONE_HIGHMEM], item) + 173 #endif 174 zone_page_state(&zones[ZONE_NORMAL], item) + 175 zone_page_state(&zones[ZONE_MOVABLE], item); 176 } 177 178 extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); 179 180 #else 181 182 #define node_page_state(node, item) global_page_state(item) 183 #define zone_statistics(_zl, _z, gfp) do { } while (0) 184 185 #endif /* CONFIG_NUMA */ 186 187 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) 188 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 189 190 extern void inc_zone_state(struct zone *, enum zone_stat_item); 191 192 #ifdef CONFIG_SMP 193 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); 194 void __inc_zone_page_state(struct page *, enum zone_stat_item); 195 void __dec_zone_page_state(struct page *, enum zone_stat_item); 196 197 void mod_zone_page_state(struct zone *, enum zone_stat_item, int); 198 void inc_zone_page_state(struct page *, enum zone_stat_item); 199 void dec_zone_page_state(struct page *, enum zone_stat_item); 200 201 extern void inc_zone_state(struct zone *, enum zone_stat_item); 202 extern void __inc_zone_state(struct zone *, enum zone_stat_item); 203 extern void dec_zone_state(struct zone *, enum zone_stat_item); 204 extern void __dec_zone_state(struct zone *, enum zone_stat_item); 205 206 void cpu_vm_stats_fold(int cpu); 207 void refresh_zone_stat_thresholds(void); 208 209 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); 210 211 int calculate_pressure_threshold(struct zone *zone); 212 int calculate_normal_threshold(struct zone *zone); 213 void set_pgdat_percpu_threshold(pg_data_t *pgdat, 214 int (*calculate_pressure)(struct zone *)); 215 #else /* CONFIG_SMP */ 216 217 /* 218 * We do not maintain differentials in a single processor configuration. 219 * The functions directly modify the zone and global counters. 220 */ 221 static inline void __mod_zone_page_state(struct zone *zone, 222 enum zone_stat_item item, int delta) 223 { 224 zone_page_state_add(delta, zone, item); 225 } 226 227 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 228 { 229 atomic_long_inc(&zone->vm_stat[item]); 230 atomic_long_inc(&vm_stat[item]); 231 } 232 233 static inline void __inc_zone_page_state(struct page *page, 234 enum zone_stat_item item) 235 { 236 __inc_zone_state(page_zone(page), item); 237 } 238 239 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 240 { 241 atomic_long_dec(&zone->vm_stat[item]); 242 atomic_long_dec(&vm_stat[item]); 243 } 244 245 static inline void __dec_zone_page_state(struct page *page, 246 enum zone_stat_item item) 247 { 248 __dec_zone_state(page_zone(page), item); 249 } 250 251 /* 252 * We only use atomic operations to update counters. So there is no need to 253 * disable interrupts. 254 */ 255 #define inc_zone_page_state __inc_zone_page_state 256 #define dec_zone_page_state __dec_zone_page_state 257 #define mod_zone_page_state __mod_zone_page_state 258 259 #define set_pgdat_percpu_threshold(pgdat, callback) { } 260 261 static inline void refresh_cpu_vm_stats(int cpu) { } 262 static inline void refresh_zone_stat_thresholds(void) { } 263 static inline void cpu_vm_stats_fold(int cpu) { } 264 265 static inline void drain_zonestat(struct zone *zone, 266 struct per_cpu_pageset *pset) { } 267 #endif /* CONFIG_SMP */ 268 269 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, 270 int migratetype) 271 { 272 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 273 if (is_migrate_cma(migratetype)) 274 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 275 } 276 277 extern const char * const vmstat_text[]; 278 279 #endif /* _LINUX_VMSTAT_H */ 280