xref: /linux-6.15/include/linux/vmstat.h (revision 173d6681)
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3 
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mmzone.h>
7 #include <asm/atomic.h>
8 
9 #ifdef CONFIG_VM_EVENT_COUNTERS
10 /*
11  * Light weight per cpu counter implementation.
12  *
13  * Counters should only be incremented and no critical kernel component
14  * should rely on the counter values.
15  *
16  * Counters are handled completely inline. On many platforms the code
17  * generated will simply be the increment of a global address.
18  */
19 
20 #ifdef CONFIG_ZONE_DMA32
21 #define DMA32_ZONE(xx) xx##_DMA32,
22 #else
23 #define DMA32_ZONE(xx)
24 #endif
25 
26 #ifdef CONFIG_HIGHMEM
27 #define HIGHMEM_ZONE(xx) , xx##_HIGH
28 #else
29 #define HIGHMEM_ZONE(xx)
30 #endif
31 
32 #define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
33 
34 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
35 		FOR_ALL_ZONES(PGALLOC),
36 		PGFREE, PGACTIVATE, PGDEACTIVATE,
37 		PGFAULT, PGMAJFAULT,
38 		FOR_ALL_ZONES(PGREFILL),
39 		FOR_ALL_ZONES(PGSTEAL),
40 		FOR_ALL_ZONES(PGSCAN_KSWAPD),
41 		FOR_ALL_ZONES(PGSCAN_DIRECT),
42 		PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 		PAGEOUTRUN, ALLOCSTALL, PGROTATED,
44 		NR_VM_EVENT_ITEMS
45 };
46 
47 struct vm_event_state {
48 	unsigned long event[NR_VM_EVENT_ITEMS];
49 };
50 
51 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
52 
53 static inline void __count_vm_event(enum vm_event_item item)
54 {
55 	__get_cpu_var(vm_event_states).event[item]++;
56 }
57 
58 static inline void count_vm_event(enum vm_event_item item)
59 {
60 	get_cpu_var(vm_event_states).event[item]++;
61 	put_cpu();
62 }
63 
64 static inline void __count_vm_events(enum vm_event_item item, long delta)
65 {
66 	__get_cpu_var(vm_event_states).event[item] += delta;
67 }
68 
69 static inline void count_vm_events(enum vm_event_item item, long delta)
70 {
71 	get_cpu_var(vm_event_states).event[item] += delta;
72 	put_cpu();
73 }
74 
75 extern void all_vm_events(unsigned long *);
76 extern void vm_events_fold_cpu(int cpu);
77 
78 #else
79 
80 /* Disable counters */
81 #define get_cpu_vm_events(e)	0L
82 #define count_vm_event(e)	do { } while (0)
83 #define count_vm_events(e,d)	do { } while (0)
84 #define __count_vm_event(e)	do { } while (0)
85 #define __count_vm_events(e,d)	do { } while (0)
86 #define vm_events_fold_cpu(x)	do { } while (0)
87 
88 #endif /* CONFIG_VM_EVENT_COUNTERS */
89 
90 #define __count_zone_vm_events(item, zone, delta) \
91 			__count_vm_events(item##_DMA + zone_idx(zone), delta)
92 
93 /*
94  * Zone based page accounting with per cpu differentials.
95  */
96 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
97 
98 static inline void zone_page_state_add(long x, struct zone *zone,
99 				 enum zone_stat_item item)
100 {
101 	atomic_long_add(x, &zone->vm_stat[item]);
102 	atomic_long_add(x, &vm_stat[item]);
103 }
104 
105 static inline unsigned long global_page_state(enum zone_stat_item item)
106 {
107 	long x = atomic_long_read(&vm_stat[item]);
108 #ifdef CONFIG_SMP
109 	if (x < 0)
110 		x = 0;
111 #endif
112 	return x;
113 }
114 
115 static inline unsigned long zone_page_state(struct zone *zone,
116 					enum zone_stat_item item)
117 {
118 	long x = atomic_long_read(&zone->vm_stat[item]);
119 #ifdef CONFIG_SMP
120 	if (x < 0)
121 		x = 0;
122 #endif
123 	return x;
124 }
125 
126 #ifdef CONFIG_NUMA
127 /*
128  * Determine the per node value of a stat item. This function
129  * is called frequently in a NUMA machine, so try to be as
130  * frugal as possible.
131  */
132 static inline unsigned long node_page_state(int node,
133 				 enum zone_stat_item item)
134 {
135 	struct zone *zones = NODE_DATA(node)->node_zones;
136 
137 	return
138 #ifdef CONFIG_ZONE_DMA32
139 		zone_page_state(&zones[ZONE_DMA32], item) +
140 #endif
141 		zone_page_state(&zones[ZONE_NORMAL], item) +
142 #ifdef CONFIG_HIGHMEM
143 		zone_page_state(&zones[ZONE_HIGHMEM], item) +
144 #endif
145 		zone_page_state(&zones[ZONE_DMA], item);
146 }
147 
148 extern void zone_statistics(struct zonelist *, struct zone *);
149 
150 #else
151 
152 #define node_page_state(node, item) global_page_state(item)
153 #define zone_statistics(_zl,_z) do { } while (0)
154 
155 #endif /* CONFIG_NUMA */
156 
157 #define __add_zone_page_state(__z, __i, __d)	\
158 		__mod_zone_page_state(__z, __i, __d)
159 #define __sub_zone_page_state(__z, __i, __d)	\
160 		__mod_zone_page_state(__z, __i,-(__d))
161 
162 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
163 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
164 
165 static inline void zap_zone_vm_stats(struct zone *zone)
166 {
167 	memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
168 }
169 
170 extern void inc_zone_state(struct zone *, enum zone_stat_item);
171 
172 #ifdef CONFIG_SMP
173 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
174 void __inc_zone_page_state(struct page *, enum zone_stat_item);
175 void __dec_zone_page_state(struct page *, enum zone_stat_item);
176 
177 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
178 void inc_zone_page_state(struct page *, enum zone_stat_item);
179 void dec_zone_page_state(struct page *, enum zone_stat_item);
180 
181 extern void inc_zone_state(struct zone *, enum zone_stat_item);
182 
183 void refresh_cpu_vm_stats(int);
184 void refresh_vm_stats(void);
185 
186 #else /* CONFIG_SMP */
187 
188 /*
189  * We do not maintain differentials in a single processor configuration.
190  * The functions directly modify the zone and global counters.
191  */
192 static inline void __mod_zone_page_state(struct zone *zone,
193 			enum zone_stat_item item, int delta)
194 {
195 	zone_page_state_add(delta, zone, item);
196 }
197 
198 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
199 {
200 	atomic_long_inc(&zone->vm_stat[item]);
201 	atomic_long_inc(&vm_stat[item]);
202 }
203 
204 static inline void __inc_zone_page_state(struct page *page,
205 			enum zone_stat_item item)
206 {
207 	__inc_zone_state(page_zone(page), item);
208 }
209 
210 static inline void __dec_zone_page_state(struct page *page,
211 			enum zone_stat_item item)
212 {
213 	atomic_long_dec(&page_zone(page)->vm_stat[item]);
214 	atomic_long_dec(&vm_stat[item]);
215 }
216 
217 /*
218  * We only use atomic operations to update counters. So there is no need to
219  * disable interrupts.
220  */
221 #define inc_zone_page_state __inc_zone_page_state
222 #define dec_zone_page_state __dec_zone_page_state
223 #define mod_zone_page_state __mod_zone_page_state
224 
225 static inline void refresh_cpu_vm_stats(int cpu) { }
226 static inline void refresh_vm_stats(void) { }
227 #endif
228 
229 #endif /* _LINUX_VMSTAT_H */
230