xref: /linux-6.15/include/linux/vmstat.h (revision e756bc56)
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3 
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 
11 extern int sysctl_stat_interval;
12 
13 #ifdef CONFIG_VM_EVENT_COUNTERS
14 /*
15  * Light weight per cpu counter implementation.
16  *
17  * Counters should only be incremented and no critical kernel component
18  * should rely on the counter values.
19  *
20  * Counters are handled completely inline. On many platforms the code
21  * generated will simply be the increment of a global address.
22  */
23 
24 struct vm_event_state {
25 	unsigned long event[NR_VM_EVENT_ITEMS];
26 };
27 
28 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29 
30 static inline void __count_vm_event(enum vm_event_item item)
31 {
32 	__this_cpu_inc(vm_event_states.event[item]);
33 }
34 
35 static inline void count_vm_event(enum vm_event_item item)
36 {
37 	this_cpu_inc(vm_event_states.event[item]);
38 }
39 
40 static inline void __count_vm_events(enum vm_event_item item, long delta)
41 {
42 	__this_cpu_add(vm_event_states.event[item], delta);
43 }
44 
45 static inline void count_vm_events(enum vm_event_item item, long delta)
46 {
47 	this_cpu_add(vm_event_states.event[item], delta);
48 }
49 
50 extern void all_vm_events(unsigned long *);
51 
52 extern void vm_events_fold_cpu(int cpu);
53 
54 #else
55 
56 /* Disable counters */
57 static inline void count_vm_event(enum vm_event_item item)
58 {
59 }
60 static inline void count_vm_events(enum vm_event_item item, long delta)
61 {
62 }
63 static inline void __count_vm_event(enum vm_event_item item)
64 {
65 }
66 static inline void __count_vm_events(enum vm_event_item item, long delta)
67 {
68 }
69 static inline void all_vm_events(unsigned long *ret)
70 {
71 }
72 static inline void vm_events_fold_cpu(int cpu)
73 {
74 }
75 
76 #endif /* CONFIG_VM_EVENT_COUNTERS */
77 
78 #ifdef CONFIG_NUMA_BALANCING
79 #define count_vm_numa_event(x)     count_vm_event(x)
80 #define count_vm_numa_events(x, y) count_vm_events(x, y)
81 #else
82 #define count_vm_numa_event(x) do {} while (0)
83 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
84 #endif /* CONFIG_NUMA_BALANCING */
85 
86 #define __count_zone_vm_events(item, zone, delta) \
87 		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
88 		zone_idx(zone), delta)
89 
90 /*
91  * Zone based page accounting with per cpu differentials.
92  */
93 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
94 
95 static inline void zone_page_state_add(long x, struct zone *zone,
96 				 enum zone_stat_item item)
97 {
98 	atomic_long_add(x, &zone->vm_stat[item]);
99 	atomic_long_add(x, &vm_stat[item]);
100 }
101 
102 static inline unsigned long global_page_state(enum zone_stat_item item)
103 {
104 	long x = atomic_long_read(&vm_stat[item]);
105 #ifdef CONFIG_SMP
106 	if (x < 0)
107 		x = 0;
108 #endif
109 	return x;
110 }
111 
112 static inline unsigned long zone_page_state(struct zone *zone,
113 					enum zone_stat_item item)
114 {
115 	long x = atomic_long_read(&zone->vm_stat[item]);
116 #ifdef CONFIG_SMP
117 	if (x < 0)
118 		x = 0;
119 #endif
120 	return x;
121 }
122 
123 /*
124  * More accurate version that also considers the currently pending
125  * deltas. For that we need to loop over all cpus to find the current
126  * deltas. There is no synchronization so the result cannot be
127  * exactly accurate either.
128  */
129 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
130 					enum zone_stat_item item)
131 {
132 	long x = atomic_long_read(&zone->vm_stat[item]);
133 
134 #ifdef CONFIG_SMP
135 	int cpu;
136 	for_each_online_cpu(cpu)
137 		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
138 
139 	if (x < 0)
140 		x = 0;
141 #endif
142 	return x;
143 }
144 
145 extern unsigned long global_reclaimable_pages(void);
146 
147 #ifdef CONFIG_NUMA
148 /*
149  * Determine the per node value of a stat item. This function
150  * is called frequently in a NUMA machine, so try to be as
151  * frugal as possible.
152  */
153 static inline unsigned long node_page_state(int node,
154 				 enum zone_stat_item item)
155 {
156 	struct zone *zones = NODE_DATA(node)->node_zones;
157 
158 	return
159 #ifdef CONFIG_ZONE_DMA
160 		zone_page_state(&zones[ZONE_DMA], item) +
161 #endif
162 #ifdef CONFIG_ZONE_DMA32
163 		zone_page_state(&zones[ZONE_DMA32], item) +
164 #endif
165 #ifdef CONFIG_HIGHMEM
166 		zone_page_state(&zones[ZONE_HIGHMEM], item) +
167 #endif
168 		zone_page_state(&zones[ZONE_NORMAL], item) +
169 		zone_page_state(&zones[ZONE_MOVABLE], item);
170 }
171 
172 extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
173 
174 #else
175 
176 #define node_page_state(node, item) global_page_state(item)
177 #define zone_statistics(_zl, _z, gfp) do { } while (0)
178 
179 #endif /* CONFIG_NUMA */
180 
181 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
182 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
183 
184 extern void inc_zone_state(struct zone *, enum zone_stat_item);
185 
186 #ifdef CONFIG_SMP
187 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
188 void __inc_zone_page_state(struct page *, enum zone_stat_item);
189 void __dec_zone_page_state(struct page *, enum zone_stat_item);
190 
191 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
192 void inc_zone_page_state(struct page *, enum zone_stat_item);
193 void dec_zone_page_state(struct page *, enum zone_stat_item);
194 
195 extern void inc_zone_state(struct zone *, enum zone_stat_item);
196 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
197 extern void dec_zone_state(struct zone *, enum zone_stat_item);
198 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
199 
200 void cpu_vm_stats_fold(int cpu);
201 void refresh_zone_stat_thresholds(void);
202 
203 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
204 
205 int calculate_pressure_threshold(struct zone *zone);
206 int calculate_normal_threshold(struct zone *zone);
207 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
208 				int (*calculate_pressure)(struct zone *));
209 #else /* CONFIG_SMP */
210 
211 /*
212  * We do not maintain differentials in a single processor configuration.
213  * The functions directly modify the zone and global counters.
214  */
215 static inline void __mod_zone_page_state(struct zone *zone,
216 			enum zone_stat_item item, int delta)
217 {
218 	zone_page_state_add(delta, zone, item);
219 }
220 
221 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
222 {
223 	atomic_long_inc(&zone->vm_stat[item]);
224 	atomic_long_inc(&vm_stat[item]);
225 }
226 
227 static inline void __inc_zone_page_state(struct page *page,
228 			enum zone_stat_item item)
229 {
230 	__inc_zone_state(page_zone(page), item);
231 }
232 
233 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
234 {
235 	atomic_long_dec(&zone->vm_stat[item]);
236 	atomic_long_dec(&vm_stat[item]);
237 }
238 
239 static inline void __dec_zone_page_state(struct page *page,
240 			enum zone_stat_item item)
241 {
242 	__dec_zone_state(page_zone(page), item);
243 }
244 
245 /*
246  * We only use atomic operations to update counters. So there is no need to
247  * disable interrupts.
248  */
249 #define inc_zone_page_state __inc_zone_page_state
250 #define dec_zone_page_state __dec_zone_page_state
251 #define mod_zone_page_state __mod_zone_page_state
252 
253 #define set_pgdat_percpu_threshold(pgdat, callback) { }
254 
255 static inline void refresh_cpu_vm_stats(int cpu) { }
256 static inline void refresh_zone_stat_thresholds(void) { }
257 static inline void cpu_vm_stats_fold(int cpu) { }
258 
259 static inline void drain_zonestat(struct zone *zone,
260 			struct per_cpu_pageset *pset) { }
261 #endif		/* CONFIG_SMP */
262 
263 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
264 					     int migratetype)
265 {
266 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
267 	if (is_migrate_cma(migratetype))
268 		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
269 }
270 
271 extern const char * const vmstat_text[];
272 
273 #endif /* _LINUX_VMSTAT_H */
274