xref: /linux-6.15/include/linux/vmstat.h (revision 9d64fc08)
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3 
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mmzone.h>
7 #include <linux/vm_event_item.h>
8 #include <linux/atomic.h>
9 
10 extern int sysctl_stat_interval;
11 
12 #ifdef CONFIG_VM_EVENT_COUNTERS
13 /*
14  * Light weight per cpu counter implementation.
15  *
16  * Counters should only be incremented and no critical kernel component
17  * should rely on the counter values.
18  *
19  * Counters are handled completely inline. On many platforms the code
20  * generated will simply be the increment of a global address.
21  */
22 
23 struct vm_event_state {
24 	unsigned long event[NR_VM_EVENT_ITEMS];
25 };
26 
27 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
28 
29 /*
30  * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
31  * local_irq_disable overhead.
32  */
33 static inline void __count_vm_event(enum vm_event_item item)
34 {
35 	raw_cpu_inc(vm_event_states.event[item]);
36 }
37 
38 static inline void count_vm_event(enum vm_event_item item)
39 {
40 	this_cpu_inc(vm_event_states.event[item]);
41 }
42 
43 static inline void __count_vm_events(enum vm_event_item item, long delta)
44 {
45 	raw_cpu_add(vm_event_states.event[item], delta);
46 }
47 
48 static inline void count_vm_events(enum vm_event_item item, long delta)
49 {
50 	this_cpu_add(vm_event_states.event[item], delta);
51 }
52 
53 extern void all_vm_events(unsigned long *);
54 
55 extern void vm_events_fold_cpu(int cpu);
56 
57 #else
58 
59 /* Disable counters */
60 static inline void count_vm_event(enum vm_event_item item)
61 {
62 }
63 static inline void count_vm_events(enum vm_event_item item, long delta)
64 {
65 }
66 static inline void __count_vm_event(enum vm_event_item item)
67 {
68 }
69 static inline void __count_vm_events(enum vm_event_item item, long delta)
70 {
71 }
72 static inline void all_vm_events(unsigned long *ret)
73 {
74 }
75 static inline void vm_events_fold_cpu(int cpu)
76 {
77 }
78 
79 #endif /* CONFIG_VM_EVENT_COUNTERS */
80 
81 #ifdef CONFIG_NUMA_BALANCING
82 #define count_vm_numa_event(x)     count_vm_event(x)
83 #define count_vm_numa_events(x, y) count_vm_events(x, y)
84 #else
85 #define count_vm_numa_event(x) do {} while (0)
86 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
87 #endif /* CONFIG_NUMA_BALANCING */
88 
89 #ifdef CONFIG_DEBUG_TLBFLUSH
90 #define count_vm_tlb_event(x)	   count_vm_event(x)
91 #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
92 #else
93 #define count_vm_tlb_event(x)     do {} while (0)
94 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
95 #endif
96 
97 #ifdef CONFIG_DEBUG_VM_VMACACHE
98 #define count_vm_vmacache_event(x) count_vm_event(x)
99 #else
100 #define count_vm_vmacache_event(x) do {} while (0)
101 #endif
102 
103 #define __count_zid_vm_events(item, zid, delta) \
104 	__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
105 
106 /*
107  * Zone and node-based page accounting with per cpu differentials.
108  */
109 extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
110 extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
111 extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
112 
113 #ifdef CONFIG_NUMA
114 static inline void zone_numa_state_add(long x, struct zone *zone,
115 				 enum numa_stat_item item)
116 {
117 	atomic_long_add(x, &zone->vm_numa_stat[item]);
118 	atomic_long_add(x, &vm_numa_stat[item]);
119 }
120 
121 static inline unsigned long global_numa_state(enum numa_stat_item item)
122 {
123 	long x = atomic_long_read(&vm_numa_stat[item]);
124 
125 	return x;
126 }
127 
128 static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
129 					enum numa_stat_item item)
130 {
131 	long x = atomic_long_read(&zone->vm_numa_stat[item]);
132 	int cpu;
133 
134 	for_each_online_cpu(cpu)
135 		x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
136 
137 	return x;
138 }
139 #endif /* CONFIG_NUMA */
140 
141 static inline void zone_page_state_add(long x, struct zone *zone,
142 				 enum zone_stat_item item)
143 {
144 	atomic_long_add(x, &zone->vm_stat[item]);
145 	atomic_long_add(x, &vm_zone_stat[item]);
146 }
147 
148 static inline void node_page_state_add(long x, struct pglist_data *pgdat,
149 				 enum node_stat_item item)
150 {
151 	atomic_long_add(x, &pgdat->vm_stat[item]);
152 	atomic_long_add(x, &vm_node_stat[item]);
153 }
154 
155 static inline unsigned long global_zone_page_state(enum zone_stat_item item)
156 {
157 	long x = atomic_long_read(&vm_zone_stat[item]);
158 #ifdef CONFIG_SMP
159 	if (x < 0)
160 		x = 0;
161 #endif
162 	return x;
163 }
164 
165 static inline unsigned long global_node_page_state(enum node_stat_item item)
166 {
167 	long x = atomic_long_read(&vm_node_stat[item]);
168 #ifdef CONFIG_SMP
169 	if (x < 0)
170 		x = 0;
171 #endif
172 	return x;
173 }
174 
175 static inline unsigned long zone_page_state(struct zone *zone,
176 					enum zone_stat_item item)
177 {
178 	long x = atomic_long_read(&zone->vm_stat[item]);
179 #ifdef CONFIG_SMP
180 	if (x < 0)
181 		x = 0;
182 #endif
183 	return x;
184 }
185 
186 /*
187  * More accurate version that also considers the currently pending
188  * deltas. For that we need to loop over all cpus to find the current
189  * deltas. There is no synchronization so the result cannot be
190  * exactly accurate either.
191  */
192 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
193 					enum zone_stat_item item)
194 {
195 	long x = atomic_long_read(&zone->vm_stat[item]);
196 
197 #ifdef CONFIG_SMP
198 	int cpu;
199 	for_each_online_cpu(cpu)
200 		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
201 
202 	if (x < 0)
203 		x = 0;
204 #endif
205 	return x;
206 }
207 
208 static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
209 					enum node_stat_item item)
210 {
211 	long x = atomic_long_read(&pgdat->vm_stat[item]);
212 
213 #ifdef CONFIG_SMP
214 	int cpu;
215 	for_each_online_cpu(cpu)
216 		x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
217 
218 	if (x < 0)
219 		x = 0;
220 #endif
221 	return x;
222 }
223 
224 
225 #ifdef CONFIG_NUMA
226 extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
227 extern unsigned long sum_zone_node_page_state(int node,
228 					      enum zone_stat_item item);
229 extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
230 extern unsigned long node_page_state(struct pglist_data *pgdat,
231 						enum node_stat_item item);
232 #else
233 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
234 #define node_page_state(node, item) global_node_page_state(item)
235 #endif /* CONFIG_NUMA */
236 
237 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
238 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
239 #define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
240 #define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
241 
242 #ifdef CONFIG_SMP
243 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
244 void __inc_zone_page_state(struct page *, enum zone_stat_item);
245 void __dec_zone_page_state(struct page *, enum zone_stat_item);
246 
247 void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
248 void __inc_node_page_state(struct page *, enum node_stat_item);
249 void __dec_node_page_state(struct page *, enum node_stat_item);
250 
251 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
252 void inc_zone_page_state(struct page *, enum zone_stat_item);
253 void dec_zone_page_state(struct page *, enum zone_stat_item);
254 
255 void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
256 void inc_node_page_state(struct page *, enum node_stat_item);
257 void dec_node_page_state(struct page *, enum node_stat_item);
258 
259 extern void inc_node_state(struct pglist_data *, enum node_stat_item);
260 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
261 extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
262 extern void dec_zone_state(struct zone *, enum zone_stat_item);
263 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
264 extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
265 
266 void quiet_vmstat(void);
267 void cpu_vm_stats_fold(int cpu);
268 void refresh_zone_stat_thresholds(void);
269 
270 struct ctl_table;
271 int vmstat_refresh(struct ctl_table *, int write,
272 		   void __user *buffer, size_t *lenp, loff_t *ppos);
273 
274 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
275 
276 int calculate_pressure_threshold(struct zone *zone);
277 int calculate_normal_threshold(struct zone *zone);
278 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
279 				int (*calculate_pressure)(struct zone *));
280 #else /* CONFIG_SMP */
281 
282 /*
283  * We do not maintain differentials in a single processor configuration.
284  * The functions directly modify the zone and global counters.
285  */
286 static inline void __mod_zone_page_state(struct zone *zone,
287 			enum zone_stat_item item, long delta)
288 {
289 	zone_page_state_add(delta, zone, item);
290 }
291 
292 static inline void __mod_node_page_state(struct pglist_data *pgdat,
293 			enum node_stat_item item, int delta)
294 {
295 	node_page_state_add(delta, pgdat, item);
296 }
297 
298 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
299 {
300 	atomic_long_inc(&zone->vm_stat[item]);
301 	atomic_long_inc(&vm_zone_stat[item]);
302 }
303 
304 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
305 {
306 	atomic_long_inc(&pgdat->vm_stat[item]);
307 	atomic_long_inc(&vm_node_stat[item]);
308 }
309 
310 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
311 {
312 	atomic_long_dec(&zone->vm_stat[item]);
313 	atomic_long_dec(&vm_zone_stat[item]);
314 }
315 
316 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
317 {
318 	atomic_long_dec(&pgdat->vm_stat[item]);
319 	atomic_long_dec(&vm_node_stat[item]);
320 }
321 
322 static inline void __inc_zone_page_state(struct page *page,
323 			enum zone_stat_item item)
324 {
325 	__inc_zone_state(page_zone(page), item);
326 }
327 
328 static inline void __inc_node_page_state(struct page *page,
329 			enum node_stat_item item)
330 {
331 	__inc_node_state(page_pgdat(page), item);
332 }
333 
334 
335 static inline void __dec_zone_page_state(struct page *page,
336 			enum zone_stat_item item)
337 {
338 	__dec_zone_state(page_zone(page), item);
339 }
340 
341 static inline void __dec_node_page_state(struct page *page,
342 			enum node_stat_item item)
343 {
344 	__dec_node_state(page_pgdat(page), item);
345 }
346 
347 
348 /*
349  * We only use atomic operations to update counters. So there is no need to
350  * disable interrupts.
351  */
352 #define inc_zone_page_state __inc_zone_page_state
353 #define dec_zone_page_state __dec_zone_page_state
354 #define mod_zone_page_state __mod_zone_page_state
355 
356 #define inc_node_page_state __inc_node_page_state
357 #define dec_node_page_state __dec_node_page_state
358 #define mod_node_page_state __mod_node_page_state
359 
360 #define inc_zone_state __inc_zone_state
361 #define inc_node_state __inc_node_state
362 #define dec_zone_state __dec_zone_state
363 
364 #define set_pgdat_percpu_threshold(pgdat, callback) { }
365 
366 static inline void refresh_zone_stat_thresholds(void) { }
367 static inline void cpu_vm_stats_fold(int cpu) { }
368 static inline void quiet_vmstat(void) { }
369 
370 static inline void drain_zonestat(struct zone *zone,
371 			struct per_cpu_pageset *pset) { }
372 #endif		/* CONFIG_SMP */
373 
374 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
375 					     int migratetype)
376 {
377 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
378 	if (is_migrate_cma(migratetype))
379 		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
380 }
381 
382 extern const char * const vmstat_text[];
383 
384 #endif /* _LINUX_VMSTAT_H */
385