xref: /linux-6.15/include/linux/vmstat.h (revision cee2cfb7)
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3 
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 
11 extern int sysctl_stat_interval;
12 
13 #ifdef CONFIG_VM_EVENT_COUNTERS
14 /*
15  * Light weight per cpu counter implementation.
16  *
17  * Counters should only be incremented and no critical kernel component
18  * should rely on the counter values.
19  *
20  * Counters are handled completely inline. On many platforms the code
21  * generated will simply be the increment of a global address.
22  */
23 
24 struct vm_event_state {
25 	unsigned long event[NR_VM_EVENT_ITEMS];
26 };
27 
28 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29 
30 /*
31  * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
32  * local_irq_disable overhead.
33  */
34 static inline void __count_vm_event(enum vm_event_item item)
35 {
36 	raw_cpu_inc(vm_event_states.event[item]);
37 }
38 
39 static inline void count_vm_event(enum vm_event_item item)
40 {
41 	this_cpu_inc(vm_event_states.event[item]);
42 }
43 
44 static inline void __count_vm_events(enum vm_event_item item, long delta)
45 {
46 	raw_cpu_add(vm_event_states.event[item], delta);
47 }
48 
49 static inline void count_vm_events(enum vm_event_item item, long delta)
50 {
51 	this_cpu_add(vm_event_states.event[item], delta);
52 }
53 
54 extern void all_vm_events(unsigned long *);
55 
56 extern void vm_events_fold_cpu(int cpu);
57 
58 #else
59 
60 /* Disable counters */
61 static inline void count_vm_event(enum vm_event_item item)
62 {
63 }
64 static inline void count_vm_events(enum vm_event_item item, long delta)
65 {
66 }
67 static inline void __count_vm_event(enum vm_event_item item)
68 {
69 }
70 static inline void __count_vm_events(enum vm_event_item item, long delta)
71 {
72 }
73 static inline void all_vm_events(unsigned long *ret)
74 {
75 }
76 static inline void vm_events_fold_cpu(int cpu)
77 {
78 }
79 
80 #endif /* CONFIG_VM_EVENT_COUNTERS */
81 
82 #ifdef CONFIG_NUMA_BALANCING
83 #define count_vm_numa_event(x)     count_vm_event(x)
84 #define count_vm_numa_events(x, y) count_vm_events(x, y)
85 #else
86 #define count_vm_numa_event(x) do {} while (0)
87 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
88 #endif /* CONFIG_NUMA_BALANCING */
89 
90 #ifdef CONFIG_DEBUG_TLBFLUSH
91 #define count_vm_tlb_event(x)	   count_vm_event(x)
92 #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
93 #else
94 #define count_vm_tlb_event(x)     do {} while (0)
95 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
96 #endif
97 
98 #ifdef CONFIG_DEBUG_VM_VMACACHE
99 #define count_vm_vmacache_event(x) count_vm_event(x)
100 #else
101 #define count_vm_vmacache_event(x) do {} while (0)
102 #endif
103 
104 #define __count_zid_vm_events(item, zid, delta) \
105 	__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
106 
107 /*
108  * Zone and node-based page accounting with per cpu differentials.
109  */
110 extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
111 extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
112 
113 static inline void zone_page_state_add(long x, struct zone *zone,
114 				 enum zone_stat_item item)
115 {
116 	atomic_long_add(x, &zone->vm_stat[item]);
117 	atomic_long_add(x, &vm_zone_stat[item]);
118 }
119 
120 static inline void node_page_state_add(long x, struct pglist_data *pgdat,
121 				 enum node_stat_item item)
122 {
123 	atomic_long_add(x, &pgdat->vm_stat[item]);
124 	atomic_long_add(x, &vm_node_stat[item]);
125 }
126 
127 static inline unsigned long global_page_state(enum zone_stat_item item)
128 {
129 	long x = atomic_long_read(&vm_zone_stat[item]);
130 #ifdef CONFIG_SMP
131 	if (x < 0)
132 		x = 0;
133 #endif
134 	return x;
135 }
136 
137 static inline unsigned long global_node_page_state(enum node_stat_item item)
138 {
139 	long x = atomic_long_read(&vm_node_stat[item]);
140 #ifdef CONFIG_SMP
141 	if (x < 0)
142 		x = 0;
143 #endif
144 	return x;
145 }
146 
147 static inline unsigned long zone_page_state(struct zone *zone,
148 					enum zone_stat_item item)
149 {
150 	long x = atomic_long_read(&zone->vm_stat[item]);
151 #ifdef CONFIG_SMP
152 	if (x < 0)
153 		x = 0;
154 #endif
155 	return x;
156 }
157 
158 /*
159  * More accurate version that also considers the currently pending
160  * deltas. For that we need to loop over all cpus to find the current
161  * deltas. There is no synchronization so the result cannot be
162  * exactly accurate either.
163  */
164 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
165 					enum zone_stat_item item)
166 {
167 	long x = atomic_long_read(&zone->vm_stat[item]);
168 
169 #ifdef CONFIG_SMP
170 	int cpu;
171 	for_each_online_cpu(cpu)
172 		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
173 
174 	if (x < 0)
175 		x = 0;
176 #endif
177 	return x;
178 }
179 
180 static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
181 					enum node_stat_item item)
182 {
183 	long x = atomic_long_read(&pgdat->vm_stat[item]);
184 
185 #ifdef CONFIG_SMP
186 	int cpu;
187 	for_each_online_cpu(cpu)
188 		x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
189 
190 	if (x < 0)
191 		x = 0;
192 #endif
193 	return x;
194 }
195 
196 
197 #ifdef CONFIG_NUMA
198 extern unsigned long sum_zone_node_page_state(int node,
199 						enum zone_stat_item item);
200 extern unsigned long node_page_state(struct pglist_data *pgdat,
201 						enum node_stat_item item);
202 #else
203 #define sum_zone_node_page_state(node, item) global_page_state(item)
204 #define node_page_state(node, item) global_node_page_state(item)
205 #endif /* CONFIG_NUMA */
206 
207 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
208 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
209 #define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
210 #define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
211 
212 #ifdef CONFIG_SMP
213 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
214 void __inc_zone_page_state(struct page *, enum zone_stat_item);
215 void __dec_zone_page_state(struct page *, enum zone_stat_item);
216 
217 void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
218 void __inc_node_page_state(struct page *, enum node_stat_item);
219 void __dec_node_page_state(struct page *, enum node_stat_item);
220 
221 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
222 void inc_zone_page_state(struct page *, enum zone_stat_item);
223 void dec_zone_page_state(struct page *, enum zone_stat_item);
224 
225 void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
226 void inc_node_page_state(struct page *, enum node_stat_item);
227 void dec_node_page_state(struct page *, enum node_stat_item);
228 
229 extern void inc_node_state(struct pglist_data *, enum node_stat_item);
230 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
231 extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
232 extern void dec_zone_state(struct zone *, enum zone_stat_item);
233 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
234 extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
235 
236 void quiet_vmstat(void);
237 void cpu_vm_stats_fold(int cpu);
238 void refresh_zone_stat_thresholds(void);
239 
240 struct ctl_table;
241 int vmstat_refresh(struct ctl_table *, int write,
242 		   void __user *buffer, size_t *lenp, loff_t *ppos);
243 
244 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
245 
246 int calculate_pressure_threshold(struct zone *zone);
247 int calculate_normal_threshold(struct zone *zone);
248 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
249 				int (*calculate_pressure)(struct zone *));
250 #else /* CONFIG_SMP */
251 
252 /*
253  * We do not maintain differentials in a single processor configuration.
254  * The functions directly modify the zone and global counters.
255  */
256 static inline void __mod_zone_page_state(struct zone *zone,
257 			enum zone_stat_item item, long delta)
258 {
259 	zone_page_state_add(delta, zone, item);
260 }
261 
262 static inline void __mod_node_page_state(struct pglist_data *pgdat,
263 			enum node_stat_item item, int delta)
264 {
265 	node_page_state_add(delta, pgdat, item);
266 }
267 
268 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
269 {
270 	atomic_long_inc(&zone->vm_stat[item]);
271 	atomic_long_inc(&vm_zone_stat[item]);
272 }
273 
274 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
275 {
276 	atomic_long_inc(&pgdat->vm_stat[item]);
277 	atomic_long_inc(&vm_node_stat[item]);
278 }
279 
280 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
281 {
282 	atomic_long_dec(&zone->vm_stat[item]);
283 	atomic_long_dec(&vm_zone_stat[item]);
284 }
285 
286 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
287 {
288 	atomic_long_dec(&pgdat->vm_stat[item]);
289 	atomic_long_dec(&vm_node_stat[item]);
290 }
291 
292 static inline void __inc_zone_page_state(struct page *page,
293 			enum zone_stat_item item)
294 {
295 	__inc_zone_state(page_zone(page), item);
296 }
297 
298 static inline void __inc_node_page_state(struct page *page,
299 			enum node_stat_item item)
300 {
301 	__inc_node_state(page_pgdat(page), item);
302 }
303 
304 
305 static inline void __dec_zone_page_state(struct page *page,
306 			enum zone_stat_item item)
307 {
308 	__dec_zone_state(page_zone(page), item);
309 }
310 
311 static inline void __dec_node_page_state(struct page *page,
312 			enum node_stat_item item)
313 {
314 	__dec_node_state(page_pgdat(page), item);
315 }
316 
317 
318 /*
319  * We only use atomic operations to update counters. So there is no need to
320  * disable interrupts.
321  */
322 #define inc_zone_page_state __inc_zone_page_state
323 #define dec_zone_page_state __dec_zone_page_state
324 #define mod_zone_page_state __mod_zone_page_state
325 
326 #define inc_node_page_state __inc_node_page_state
327 #define dec_node_page_state __dec_node_page_state
328 #define mod_node_page_state __mod_node_page_state
329 
330 #define inc_zone_state __inc_zone_state
331 #define inc_node_state __inc_node_state
332 #define dec_zone_state __dec_zone_state
333 
334 #define set_pgdat_percpu_threshold(pgdat, callback) { }
335 
336 static inline void refresh_zone_stat_thresholds(void) { }
337 static inline void cpu_vm_stats_fold(int cpu) { }
338 static inline void quiet_vmstat(void) { }
339 
340 static inline void drain_zonestat(struct zone *zone,
341 			struct per_cpu_pageset *pset) { }
342 #endif		/* CONFIG_SMP */
343 
344 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
345 					     int migratetype)
346 {
347 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
348 	if (is_migrate_cma(migratetype))
349 		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
350 }
351 
352 extern const char * const vmstat_text[];
353 
354 #endif /* _LINUX_VMSTAT_H */
355