xref: /linux-6.15/include/linux/vmstat.h (revision 77d1c8eb)
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3 
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
9 
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
12 #else
13 #define DMA_ZONE(xx)
14 #endif
15 
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
18 #else
19 #define DMA32_ZONE(xx)
20 #endif
21 
22 #ifdef CONFIG_HIGHMEM
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
24 #else
25 #define HIGHMEM_ZONE(xx)
26 #endif
27 
28 
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
30 
31 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 		FOR_ALL_ZONES(PGALLOC),
33 		PGFREE, PGACTIVATE, PGDEACTIVATE,
34 		PGFAULT, PGMAJFAULT,
35 		FOR_ALL_ZONES(PGREFILL),
36 		FOR_ALL_ZONES(PGSTEAL),
37 		FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 		FOR_ALL_ZONES(PGSCAN_DIRECT),
39 #ifdef CONFIG_NUMA
40 		PGSCAN_ZONE_RECLAIM_FAILED,
41 #endif
42 		PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 		KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 		KSWAPD_SKIP_CONGESTION_WAIT,
45 		PAGEOUTRUN, ALLOCSTALL, PGROTATED,
46 #ifdef CONFIG_COMPACTION
47 		COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
48 		COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
49 #endif
50 #ifdef CONFIG_HUGETLB_PAGE
51 		HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
52 #endif
53 		UNEVICTABLE_PGCULLED,	/* culled to noreclaim list */
54 		UNEVICTABLE_PGSCANNED,	/* scanned for reclaimability */
55 		UNEVICTABLE_PGRESCUED,	/* rescued from noreclaim list */
56 		UNEVICTABLE_PGMLOCKED,
57 		UNEVICTABLE_PGMUNLOCKED,
58 		UNEVICTABLE_PGCLEARED,	/* on COW, page truncate */
59 		UNEVICTABLE_PGSTRANDED,	/* unable to isolate on unlock */
60 		UNEVICTABLE_MLOCKFREED,
61 		NR_VM_EVENT_ITEMS
62 };
63 
64 extern int sysctl_stat_interval;
65 
66 #ifdef CONFIG_VM_EVENT_COUNTERS
67 /*
68  * Light weight per cpu counter implementation.
69  *
70  * Counters should only be incremented and no critical kernel component
71  * should rely on the counter values.
72  *
73  * Counters are handled completely inline. On many platforms the code
74  * generated will simply be the increment of a global address.
75  */
76 
77 struct vm_event_state {
78 	unsigned long event[NR_VM_EVENT_ITEMS];
79 };
80 
81 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
82 
83 static inline void __count_vm_event(enum vm_event_item item)
84 {
85 	__this_cpu_inc(vm_event_states.event[item]);
86 }
87 
88 static inline void count_vm_event(enum vm_event_item item)
89 {
90 	this_cpu_inc(vm_event_states.event[item]);
91 }
92 
93 static inline void __count_vm_events(enum vm_event_item item, long delta)
94 {
95 	__this_cpu_add(vm_event_states.event[item], delta);
96 }
97 
98 static inline void count_vm_events(enum vm_event_item item, long delta)
99 {
100 	this_cpu_add(vm_event_states.event[item], delta);
101 }
102 
103 extern void all_vm_events(unsigned long *);
104 #ifdef CONFIG_HOTPLUG
105 extern void vm_events_fold_cpu(int cpu);
106 #else
107 static inline void vm_events_fold_cpu(int cpu)
108 {
109 }
110 #endif
111 
112 #else
113 
114 /* Disable counters */
115 static inline void count_vm_event(enum vm_event_item item)
116 {
117 }
118 static inline void count_vm_events(enum vm_event_item item, long delta)
119 {
120 }
121 static inline void __count_vm_event(enum vm_event_item item)
122 {
123 }
124 static inline void __count_vm_events(enum vm_event_item item, long delta)
125 {
126 }
127 static inline void all_vm_events(unsigned long *ret)
128 {
129 }
130 static inline void vm_events_fold_cpu(int cpu)
131 {
132 }
133 
134 #endif /* CONFIG_VM_EVENT_COUNTERS */
135 
136 #define __count_zone_vm_events(item, zone, delta) \
137 		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
138 		zone_idx(zone), delta)
139 
140 /*
141  * Zone based page accounting with per cpu differentials.
142  */
143 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
144 
145 static inline void zone_page_state_add(long x, struct zone *zone,
146 				 enum zone_stat_item item)
147 {
148 	atomic_long_add(x, &zone->vm_stat[item]);
149 	atomic_long_add(x, &vm_stat[item]);
150 }
151 
152 static inline unsigned long global_page_state(enum zone_stat_item item)
153 {
154 	long x = atomic_long_read(&vm_stat[item]);
155 #ifdef CONFIG_SMP
156 	if (x < 0)
157 		x = 0;
158 #endif
159 	return x;
160 }
161 
162 static inline unsigned long zone_page_state(struct zone *zone,
163 					enum zone_stat_item item)
164 {
165 	long x = atomic_long_read(&zone->vm_stat[item]);
166 #ifdef CONFIG_SMP
167 	if (x < 0)
168 		x = 0;
169 #endif
170 	return x;
171 }
172 
173 /*
174  * More accurate version that also considers the currently pending
175  * deltas. For that we need to loop over all cpus to find the current
176  * deltas. There is no synchronization so the result cannot be
177  * exactly accurate either.
178  */
179 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
180 					enum zone_stat_item item)
181 {
182 	long x = atomic_long_read(&zone->vm_stat[item]);
183 
184 #ifdef CONFIG_SMP
185 	int cpu;
186 	for_each_online_cpu(cpu)
187 		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
188 
189 	if (x < 0)
190 		x = 0;
191 #endif
192 	return x;
193 }
194 
195 extern unsigned long global_reclaimable_pages(void);
196 extern unsigned long zone_reclaimable_pages(struct zone *zone);
197 
198 #ifdef CONFIG_NUMA
199 /*
200  * Determine the per node value of a stat item. This function
201  * is called frequently in a NUMA machine, so try to be as
202  * frugal as possible.
203  */
204 static inline unsigned long node_page_state(int node,
205 				 enum zone_stat_item item)
206 {
207 	struct zone *zones = NODE_DATA(node)->node_zones;
208 
209 	return
210 #ifdef CONFIG_ZONE_DMA
211 		zone_page_state(&zones[ZONE_DMA], item) +
212 #endif
213 #ifdef CONFIG_ZONE_DMA32
214 		zone_page_state(&zones[ZONE_DMA32], item) +
215 #endif
216 #ifdef CONFIG_HIGHMEM
217 		zone_page_state(&zones[ZONE_HIGHMEM], item) +
218 #endif
219 		zone_page_state(&zones[ZONE_NORMAL], item) +
220 		zone_page_state(&zones[ZONE_MOVABLE], item);
221 }
222 
223 extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
224 
225 #else
226 
227 #define node_page_state(node, item) global_page_state(item)
228 #define zone_statistics(_zl, _z, gfp) do { } while (0)
229 
230 #endif /* CONFIG_NUMA */
231 
232 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
233 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
234 
235 static inline void zap_zone_vm_stats(struct zone *zone)
236 {
237 	memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
238 }
239 
240 extern void inc_zone_state(struct zone *, enum zone_stat_item);
241 
242 #ifdef CONFIG_SMP
243 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
244 void __inc_zone_page_state(struct page *, enum zone_stat_item);
245 void __dec_zone_page_state(struct page *, enum zone_stat_item);
246 
247 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
248 void inc_zone_page_state(struct page *, enum zone_stat_item);
249 void dec_zone_page_state(struct page *, enum zone_stat_item);
250 
251 extern void inc_zone_state(struct zone *, enum zone_stat_item);
252 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
253 extern void dec_zone_state(struct zone *, enum zone_stat_item);
254 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
255 
256 void refresh_cpu_vm_stats(int);
257 
258 int calculate_pressure_threshold(struct zone *zone);
259 int calculate_normal_threshold(struct zone *zone);
260 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
261 				int (*calculate_pressure)(struct zone *));
262 #else /* CONFIG_SMP */
263 
264 /*
265  * We do not maintain differentials in a single processor configuration.
266  * The functions directly modify the zone and global counters.
267  */
268 static inline void __mod_zone_page_state(struct zone *zone,
269 			enum zone_stat_item item, int delta)
270 {
271 	zone_page_state_add(delta, zone, item);
272 }
273 
274 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
275 {
276 	atomic_long_inc(&zone->vm_stat[item]);
277 	atomic_long_inc(&vm_stat[item]);
278 }
279 
280 static inline void __inc_zone_page_state(struct page *page,
281 			enum zone_stat_item item)
282 {
283 	__inc_zone_state(page_zone(page), item);
284 }
285 
286 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
287 {
288 	atomic_long_dec(&zone->vm_stat[item]);
289 	atomic_long_dec(&vm_stat[item]);
290 }
291 
292 static inline void __dec_zone_page_state(struct page *page,
293 			enum zone_stat_item item)
294 {
295 	__dec_zone_state(page_zone(page), item);
296 }
297 
298 /*
299  * We only use atomic operations to update counters. So there is no need to
300  * disable interrupts.
301  */
302 #define inc_zone_page_state __inc_zone_page_state
303 #define dec_zone_page_state __dec_zone_page_state
304 #define mod_zone_page_state __mod_zone_page_state
305 
306 #define set_pgdat_percpu_threshold(pgdat, callback) { }
307 
308 static inline void refresh_cpu_vm_stats(int cpu) { }
309 #endif
310 
311 #endif /* _LINUX_VMSTAT_H */
312