xref: /linux-6.15/include/linux/vmstat.h (revision 4dc7ccf7)
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3 
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
9 
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
12 #else
13 #define DMA_ZONE(xx)
14 #endif
15 
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
18 #else
19 #define DMA32_ZONE(xx)
20 #endif
21 
22 #ifdef CONFIG_HIGHMEM
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
24 #else
25 #define HIGHMEM_ZONE(xx)
26 #endif
27 
28 
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
30 
31 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 		FOR_ALL_ZONES(PGALLOC),
33 		PGFREE, PGACTIVATE, PGDEACTIVATE,
34 		PGFAULT, PGMAJFAULT,
35 		FOR_ALL_ZONES(PGREFILL),
36 		FOR_ALL_ZONES(PGSTEAL),
37 		FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 		FOR_ALL_ZONES(PGSCAN_DIRECT),
39 #ifdef CONFIG_NUMA
40 		PGSCAN_ZONE_RECLAIM_FAILED,
41 #endif
42 		PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 		KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 		KSWAPD_SKIP_CONGESTION_WAIT,
45 		PAGEOUTRUN, ALLOCSTALL, PGROTATED,
46 #ifdef CONFIG_HUGETLB_PAGE
47 		HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
48 #endif
49 		UNEVICTABLE_PGCULLED,	/* culled to noreclaim list */
50 		UNEVICTABLE_PGSCANNED,	/* scanned for reclaimability */
51 		UNEVICTABLE_PGRESCUED,	/* rescued from noreclaim list */
52 		UNEVICTABLE_PGMLOCKED,
53 		UNEVICTABLE_PGMUNLOCKED,
54 		UNEVICTABLE_PGCLEARED,	/* on COW, page truncate */
55 		UNEVICTABLE_PGSTRANDED,	/* unable to isolate on unlock */
56 		UNEVICTABLE_MLOCKFREED,
57 		NR_VM_EVENT_ITEMS
58 };
59 
60 extern int sysctl_stat_interval;
61 
62 #ifdef CONFIG_VM_EVENT_COUNTERS
63 /*
64  * Light weight per cpu counter implementation.
65  *
66  * Counters should only be incremented and no critical kernel component
67  * should rely on the counter values.
68  *
69  * Counters are handled completely inline. On many platforms the code
70  * generated will simply be the increment of a global address.
71  */
72 
73 struct vm_event_state {
74 	unsigned long event[NR_VM_EVENT_ITEMS];
75 };
76 
77 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
78 
79 static inline void __count_vm_event(enum vm_event_item item)
80 {
81 	__this_cpu_inc(vm_event_states.event[item]);
82 }
83 
84 static inline void count_vm_event(enum vm_event_item item)
85 {
86 	this_cpu_inc(vm_event_states.event[item]);
87 }
88 
89 static inline void __count_vm_events(enum vm_event_item item, long delta)
90 {
91 	__this_cpu_add(vm_event_states.event[item], delta);
92 }
93 
94 static inline void count_vm_events(enum vm_event_item item, long delta)
95 {
96 	this_cpu_add(vm_event_states.event[item], delta);
97 }
98 
99 extern void all_vm_events(unsigned long *);
100 #ifdef CONFIG_HOTPLUG
101 extern void vm_events_fold_cpu(int cpu);
102 #else
103 static inline void vm_events_fold_cpu(int cpu)
104 {
105 }
106 #endif
107 
108 #else
109 
110 /* Disable counters */
111 static inline void count_vm_event(enum vm_event_item item)
112 {
113 }
114 static inline void count_vm_events(enum vm_event_item item, long delta)
115 {
116 }
117 static inline void __count_vm_event(enum vm_event_item item)
118 {
119 }
120 static inline void __count_vm_events(enum vm_event_item item, long delta)
121 {
122 }
123 static inline void all_vm_events(unsigned long *ret)
124 {
125 }
126 static inline void vm_events_fold_cpu(int cpu)
127 {
128 }
129 
130 #endif /* CONFIG_VM_EVENT_COUNTERS */
131 
132 #define __count_zone_vm_events(item, zone, delta) \
133 		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
134 		zone_idx(zone), delta)
135 
136 /*
137  * Zone based page accounting with per cpu differentials.
138  */
139 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
140 
141 static inline void zone_page_state_add(long x, struct zone *zone,
142 				 enum zone_stat_item item)
143 {
144 	atomic_long_add(x, &zone->vm_stat[item]);
145 	atomic_long_add(x, &vm_stat[item]);
146 }
147 
148 static inline unsigned long global_page_state(enum zone_stat_item item)
149 {
150 	long x = atomic_long_read(&vm_stat[item]);
151 #ifdef CONFIG_SMP
152 	if (x < 0)
153 		x = 0;
154 #endif
155 	return x;
156 }
157 
158 static inline unsigned long zone_page_state(struct zone *zone,
159 					enum zone_stat_item item)
160 {
161 	long x = atomic_long_read(&zone->vm_stat[item]);
162 #ifdef CONFIG_SMP
163 	if (x < 0)
164 		x = 0;
165 #endif
166 	return x;
167 }
168 
169 extern unsigned long global_reclaimable_pages(void);
170 extern unsigned long zone_reclaimable_pages(struct zone *zone);
171 
172 #ifdef CONFIG_NUMA
173 /*
174  * Determine the per node value of a stat item. This function
175  * is called frequently in a NUMA machine, so try to be as
176  * frugal as possible.
177  */
178 static inline unsigned long node_page_state(int node,
179 				 enum zone_stat_item item)
180 {
181 	struct zone *zones = NODE_DATA(node)->node_zones;
182 
183 	return
184 #ifdef CONFIG_ZONE_DMA
185 		zone_page_state(&zones[ZONE_DMA], item) +
186 #endif
187 #ifdef CONFIG_ZONE_DMA32
188 		zone_page_state(&zones[ZONE_DMA32], item) +
189 #endif
190 #ifdef CONFIG_HIGHMEM
191 		zone_page_state(&zones[ZONE_HIGHMEM], item) +
192 #endif
193 		zone_page_state(&zones[ZONE_NORMAL], item) +
194 		zone_page_state(&zones[ZONE_MOVABLE], item);
195 }
196 
197 extern void zone_statistics(struct zone *, struct zone *);
198 
199 #else
200 
201 #define node_page_state(node, item) global_page_state(item)
202 #define zone_statistics(_zl,_z) do { } while (0)
203 
204 #endif /* CONFIG_NUMA */
205 
206 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
207 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
208 
209 static inline void zap_zone_vm_stats(struct zone *zone)
210 {
211 	memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
212 }
213 
214 extern void inc_zone_state(struct zone *, enum zone_stat_item);
215 
216 #ifdef CONFIG_SMP
217 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
218 void __inc_zone_page_state(struct page *, enum zone_stat_item);
219 void __dec_zone_page_state(struct page *, enum zone_stat_item);
220 
221 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
222 void inc_zone_page_state(struct page *, enum zone_stat_item);
223 void dec_zone_page_state(struct page *, enum zone_stat_item);
224 
225 extern void inc_zone_state(struct zone *, enum zone_stat_item);
226 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
227 extern void dec_zone_state(struct zone *, enum zone_stat_item);
228 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
229 
230 void refresh_cpu_vm_stats(int);
231 #else /* CONFIG_SMP */
232 
233 /*
234  * We do not maintain differentials in a single processor configuration.
235  * The functions directly modify the zone and global counters.
236  */
237 static inline void __mod_zone_page_state(struct zone *zone,
238 			enum zone_stat_item item, int delta)
239 {
240 	zone_page_state_add(delta, zone, item);
241 }
242 
243 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
244 {
245 	atomic_long_inc(&zone->vm_stat[item]);
246 	atomic_long_inc(&vm_stat[item]);
247 }
248 
249 static inline void __inc_zone_page_state(struct page *page,
250 			enum zone_stat_item item)
251 {
252 	__inc_zone_state(page_zone(page), item);
253 }
254 
255 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
256 {
257 	atomic_long_dec(&zone->vm_stat[item]);
258 	atomic_long_dec(&vm_stat[item]);
259 }
260 
261 static inline void __dec_zone_page_state(struct page *page,
262 			enum zone_stat_item item)
263 {
264 	__dec_zone_state(page_zone(page), item);
265 }
266 
267 /*
268  * We only use atomic operations to update counters. So there is no need to
269  * disable interrupts.
270  */
271 #define inc_zone_page_state __inc_zone_page_state
272 #define dec_zone_page_state __dec_zone_page_state
273 #define mod_zone_page_state __mod_zone_page_state
274 
275 static inline void refresh_cpu_vm_stats(int cpu) { }
276 #endif
277 
278 #endif /* _LINUX_VMSTAT_H */
279