xref: /linux-6.15/include/linux/vmstat.h (revision 509edd95)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMSTAT_H
3 #define _LINUX_VMSTAT_H
4 
5 #include <linux/types.h>
6 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 #include <linux/static_key.h>
11 #include <linux/mmdebug.h>
12 
13 extern int sysctl_stat_interval;
14 
15 #ifdef CONFIG_NUMA
16 #define ENABLE_NUMA_STAT   1
17 #define DISABLE_NUMA_STAT   0
18 extern int sysctl_vm_numa_stat;
19 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
20 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
21 		void *buffer, size_t *length, loff_t *ppos);
22 #endif
23 
24 struct reclaim_stat {
25 	unsigned nr_dirty;
26 	unsigned nr_unqueued_dirty;
27 	unsigned nr_congested;
28 	unsigned nr_writeback;
29 	unsigned nr_immediate;
30 	unsigned nr_pageout;
31 	unsigned nr_activate[2];
32 	unsigned nr_ref_keep;
33 	unsigned nr_unmap_fail;
34 	unsigned nr_lazyfree_fail;
35 };
36 
37 enum writeback_stat_item {
38 	NR_DIRTY_THRESHOLD,
39 	NR_DIRTY_BG_THRESHOLD,
40 	NR_VM_WRITEBACK_STAT_ITEMS,
41 };
42 
43 #ifdef CONFIG_VM_EVENT_COUNTERS
44 /*
45  * Light weight per cpu counter implementation.
46  *
47  * Counters should only be incremented and no critical kernel component
48  * should rely on the counter values.
49  *
50  * Counters are handled completely inline. On many platforms the code
51  * generated will simply be the increment of a global address.
52  */
53 
54 struct vm_event_state {
55 	unsigned long event[NR_VM_EVENT_ITEMS];
56 };
57 
58 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
59 
60 /*
61  * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
62  * local_irq_disable overhead.
63  */
64 static inline void __count_vm_event(enum vm_event_item item)
65 {
66 	raw_cpu_inc(vm_event_states.event[item]);
67 }
68 
69 static inline void count_vm_event(enum vm_event_item item)
70 {
71 	this_cpu_inc(vm_event_states.event[item]);
72 }
73 
74 static inline void __count_vm_events(enum vm_event_item item, long delta)
75 {
76 	raw_cpu_add(vm_event_states.event[item], delta);
77 }
78 
79 static inline void count_vm_events(enum vm_event_item item, long delta)
80 {
81 	this_cpu_add(vm_event_states.event[item], delta);
82 }
83 
84 extern void all_vm_events(unsigned long *);
85 
86 extern void vm_events_fold_cpu(int cpu);
87 
88 #else
89 
90 /* Disable counters */
91 static inline void count_vm_event(enum vm_event_item item)
92 {
93 }
94 static inline void count_vm_events(enum vm_event_item item, long delta)
95 {
96 }
97 static inline void __count_vm_event(enum vm_event_item item)
98 {
99 }
100 static inline void __count_vm_events(enum vm_event_item item, long delta)
101 {
102 }
103 static inline void all_vm_events(unsigned long *ret)
104 {
105 }
106 static inline void vm_events_fold_cpu(int cpu)
107 {
108 }
109 
110 #endif /* CONFIG_VM_EVENT_COUNTERS */
111 
112 #ifdef CONFIG_NUMA_BALANCING
113 #define count_vm_numa_event(x)     count_vm_event(x)
114 #define count_vm_numa_events(x, y) count_vm_events(x, y)
115 #else
116 #define count_vm_numa_event(x) do {} while (0)
117 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
118 #endif /* CONFIG_NUMA_BALANCING */
119 
120 #ifdef CONFIG_DEBUG_TLBFLUSH
121 #define count_vm_tlb_event(x)	   count_vm_event(x)
122 #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
123 #else
124 #define count_vm_tlb_event(x)     do {} while (0)
125 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
126 #endif
127 
128 #ifdef CONFIG_DEBUG_VM_VMACACHE
129 #define count_vm_vmacache_event(x) count_vm_event(x)
130 #else
131 #define count_vm_vmacache_event(x) do {} while (0)
132 #endif
133 
134 #define __count_zid_vm_events(item, zid, delta) \
135 	__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
136 
137 /*
138  * Zone and node-based page accounting with per cpu differentials.
139  */
140 extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
141 extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
142 extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
143 
144 #ifdef CONFIG_NUMA
145 static inline void zone_numa_state_add(long x, struct zone *zone,
146 				 enum numa_stat_item item)
147 {
148 	atomic_long_add(x, &zone->vm_numa_stat[item]);
149 	atomic_long_add(x, &vm_numa_stat[item]);
150 }
151 
152 static inline unsigned long global_numa_state(enum numa_stat_item item)
153 {
154 	long x = atomic_long_read(&vm_numa_stat[item]);
155 
156 	return x;
157 }
158 
159 static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
160 					enum numa_stat_item item)
161 {
162 	long x = atomic_long_read(&zone->vm_numa_stat[item]);
163 	int cpu;
164 
165 	for_each_online_cpu(cpu)
166 		x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
167 
168 	return x;
169 }
170 #endif /* CONFIG_NUMA */
171 
172 static inline void zone_page_state_add(long x, struct zone *zone,
173 				 enum zone_stat_item item)
174 {
175 	atomic_long_add(x, &zone->vm_stat[item]);
176 	atomic_long_add(x, &vm_zone_stat[item]);
177 }
178 
179 static inline void node_page_state_add(long x, struct pglist_data *pgdat,
180 				 enum node_stat_item item)
181 {
182 	atomic_long_add(x, &pgdat->vm_stat[item]);
183 	atomic_long_add(x, &vm_node_stat[item]);
184 }
185 
186 static inline unsigned long global_zone_page_state(enum zone_stat_item item)
187 {
188 	long x = atomic_long_read(&vm_zone_stat[item]);
189 #ifdef CONFIG_SMP
190 	if (x < 0)
191 		x = 0;
192 #endif
193 	return x;
194 }
195 
196 static inline
197 unsigned long global_node_page_state_pages(enum node_stat_item item)
198 {
199 	long x = atomic_long_read(&vm_node_stat[item]);
200 #ifdef CONFIG_SMP
201 	if (x < 0)
202 		x = 0;
203 #endif
204 	return x;
205 }
206 
207 static inline unsigned long global_node_page_state(enum node_stat_item item)
208 {
209 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
210 
211 	return global_node_page_state_pages(item);
212 }
213 
214 static inline unsigned long zone_page_state(struct zone *zone,
215 					enum zone_stat_item item)
216 {
217 	long x = atomic_long_read(&zone->vm_stat[item]);
218 #ifdef CONFIG_SMP
219 	if (x < 0)
220 		x = 0;
221 #endif
222 	return x;
223 }
224 
225 /*
226  * More accurate version that also considers the currently pending
227  * deltas. For that we need to loop over all cpus to find the current
228  * deltas. There is no synchronization so the result cannot be
229  * exactly accurate either.
230  */
231 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
232 					enum zone_stat_item item)
233 {
234 	long x = atomic_long_read(&zone->vm_stat[item]);
235 
236 #ifdef CONFIG_SMP
237 	int cpu;
238 	for_each_online_cpu(cpu)
239 		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
240 
241 	if (x < 0)
242 		x = 0;
243 #endif
244 	return x;
245 }
246 
247 #ifdef CONFIG_NUMA
248 extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
249 extern unsigned long sum_zone_node_page_state(int node,
250 					      enum zone_stat_item item);
251 extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
252 extern unsigned long node_page_state(struct pglist_data *pgdat,
253 						enum node_stat_item item);
254 extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
255 					   enum node_stat_item item);
256 #else
257 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
258 #define node_page_state(node, item) global_node_page_state(item)
259 #define node_page_state_pages(node, item) global_node_page_state_pages(item)
260 #endif /* CONFIG_NUMA */
261 
262 #ifdef CONFIG_SMP
263 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
264 void __inc_zone_page_state(struct page *, enum zone_stat_item);
265 void __dec_zone_page_state(struct page *, enum zone_stat_item);
266 
267 void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
268 void __inc_node_page_state(struct page *, enum node_stat_item);
269 void __dec_node_page_state(struct page *, enum node_stat_item);
270 
271 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
272 void inc_zone_page_state(struct page *, enum zone_stat_item);
273 void dec_zone_page_state(struct page *, enum zone_stat_item);
274 
275 void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
276 void inc_node_page_state(struct page *, enum node_stat_item);
277 void dec_node_page_state(struct page *, enum node_stat_item);
278 
279 extern void inc_node_state(struct pglist_data *, enum node_stat_item);
280 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
281 extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
282 extern void dec_zone_state(struct zone *, enum zone_stat_item);
283 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
284 extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
285 
286 void quiet_vmstat(void);
287 void cpu_vm_stats_fold(int cpu);
288 void refresh_zone_stat_thresholds(void);
289 
290 struct ctl_table;
291 int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
292 		loff_t *ppos);
293 
294 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
295 
296 int calculate_pressure_threshold(struct zone *zone);
297 int calculate_normal_threshold(struct zone *zone);
298 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
299 				int (*calculate_pressure)(struct zone *));
300 #else /* CONFIG_SMP */
301 
302 /*
303  * We do not maintain differentials in a single processor configuration.
304  * The functions directly modify the zone and global counters.
305  */
306 static inline void __mod_zone_page_state(struct zone *zone,
307 			enum zone_stat_item item, long delta)
308 {
309 	zone_page_state_add(delta, zone, item);
310 }
311 
312 static inline void __mod_node_page_state(struct pglist_data *pgdat,
313 			enum node_stat_item item, int delta)
314 {
315 	node_page_state_add(delta, pgdat, item);
316 }
317 
318 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
319 {
320 	atomic_long_inc(&zone->vm_stat[item]);
321 	atomic_long_inc(&vm_zone_stat[item]);
322 }
323 
324 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
325 {
326 	atomic_long_inc(&pgdat->vm_stat[item]);
327 	atomic_long_inc(&vm_node_stat[item]);
328 }
329 
330 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
331 {
332 	atomic_long_dec(&zone->vm_stat[item]);
333 	atomic_long_dec(&vm_zone_stat[item]);
334 }
335 
336 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
337 {
338 	atomic_long_dec(&pgdat->vm_stat[item]);
339 	atomic_long_dec(&vm_node_stat[item]);
340 }
341 
342 static inline void __inc_zone_page_state(struct page *page,
343 			enum zone_stat_item item)
344 {
345 	__inc_zone_state(page_zone(page), item);
346 }
347 
348 static inline void __inc_node_page_state(struct page *page,
349 			enum node_stat_item item)
350 {
351 	__inc_node_state(page_pgdat(page), item);
352 }
353 
354 
355 static inline void __dec_zone_page_state(struct page *page,
356 			enum zone_stat_item item)
357 {
358 	__dec_zone_state(page_zone(page), item);
359 }
360 
361 static inline void __dec_node_page_state(struct page *page,
362 			enum node_stat_item item)
363 {
364 	__dec_node_state(page_pgdat(page), item);
365 }
366 
367 
368 /*
369  * We only use atomic operations to update counters. So there is no need to
370  * disable interrupts.
371  */
372 #define inc_zone_page_state __inc_zone_page_state
373 #define dec_zone_page_state __dec_zone_page_state
374 #define mod_zone_page_state __mod_zone_page_state
375 
376 #define inc_node_page_state __inc_node_page_state
377 #define dec_node_page_state __dec_node_page_state
378 #define mod_node_page_state __mod_node_page_state
379 
380 #define inc_zone_state __inc_zone_state
381 #define inc_node_state __inc_node_state
382 #define dec_zone_state __dec_zone_state
383 
384 #define set_pgdat_percpu_threshold(pgdat, callback) { }
385 
386 static inline void refresh_zone_stat_thresholds(void) { }
387 static inline void cpu_vm_stats_fold(int cpu) { }
388 static inline void quiet_vmstat(void) { }
389 
390 static inline void drain_zonestat(struct zone *zone,
391 			struct per_cpu_pageset *pset) { }
392 #endif		/* CONFIG_SMP */
393 
394 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
395 					     int migratetype)
396 {
397 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
398 	if (is_migrate_cma(migratetype))
399 		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
400 }
401 
402 extern const char * const vmstat_text[];
403 
404 static inline const char *zone_stat_name(enum zone_stat_item item)
405 {
406 	return vmstat_text[item];
407 }
408 
409 #ifdef CONFIG_NUMA
410 static inline const char *numa_stat_name(enum numa_stat_item item)
411 {
412 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
413 			   item];
414 }
415 #endif /* CONFIG_NUMA */
416 
417 static inline const char *node_stat_name(enum node_stat_item item)
418 {
419 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
420 			   NR_VM_NUMA_STAT_ITEMS +
421 			   item];
422 }
423 
424 static inline const char *lru_list_name(enum lru_list lru)
425 {
426 	return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
427 }
428 
429 static inline const char *writeback_stat_name(enum writeback_stat_item item)
430 {
431 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
432 			   NR_VM_NUMA_STAT_ITEMS +
433 			   NR_VM_NODE_STAT_ITEMS +
434 			   item];
435 }
436 
437 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
438 static inline const char *vm_event_name(enum vm_event_item item)
439 {
440 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
441 			   NR_VM_NUMA_STAT_ITEMS +
442 			   NR_VM_NODE_STAT_ITEMS +
443 			   NR_VM_WRITEBACK_STAT_ITEMS +
444 			   item];
445 }
446 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
447 
448 #endif /* _LINUX_VMSTAT_H */
449