xref: /linux-6.15/include/linux/vmstat.h (revision 5e8d780d)
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3 
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/config.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
9 
10 #ifdef CONFIG_VM_EVENT_COUNTERS
11 /*
12  * Light weight per cpu counter implementation.
13  *
14  * Counters should only be incremented and no critical kernel component
15  * should rely on the counter values.
16  *
17  * Counters are handled completely inline. On many platforms the code
18  * generated will simply be the increment of a global address.
19  */
20 
21 #define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH
22 
23 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
24 		FOR_ALL_ZONES(PGALLOC),
25 		PGFREE, PGACTIVATE, PGDEACTIVATE,
26 		PGFAULT, PGMAJFAULT,
27 		FOR_ALL_ZONES(PGREFILL),
28 		FOR_ALL_ZONES(PGSTEAL),
29 		FOR_ALL_ZONES(PGSCAN_KSWAPD),
30 		FOR_ALL_ZONES(PGSCAN_DIRECT),
31 		PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
32 		PAGEOUTRUN, ALLOCSTALL, PGROTATED,
33 		NR_VM_EVENT_ITEMS
34 };
35 
36 struct vm_event_state {
37 	unsigned long event[NR_VM_EVENT_ITEMS];
38 };
39 
40 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
41 
42 static inline void __count_vm_event(enum vm_event_item item)
43 {
44 	__get_cpu_var(vm_event_states.event[item])++;
45 }
46 
47 static inline void count_vm_event(enum vm_event_item item)
48 {
49 	get_cpu_var(vm_event_states.event[item])++;
50 	put_cpu();
51 }
52 
53 static inline void __count_vm_events(enum vm_event_item item, long delta)
54 {
55 	__get_cpu_var(vm_event_states.event[item]) += delta;
56 }
57 
58 static inline void count_vm_events(enum vm_event_item item, long delta)
59 {
60 	get_cpu_var(vm_event_states.event[item])++;
61 	put_cpu();
62 }
63 
64 extern void all_vm_events(unsigned long *);
65 extern void vm_events_fold_cpu(int cpu);
66 
67 #else
68 
69 /* Disable counters */
70 #define get_cpu_vm_events(e)	0L
71 #define count_vm_event(e)	do { } while (0)
72 #define count_vm_events(e,d)	do { } while (0)
73 #define __count_vm_event(e)	do { } while (0)
74 #define __count_vm_events(e,d)	do { } while (0)
75 #define vm_events_fold_cpu(x)	do { } while (0)
76 
77 #endif /* CONFIG_VM_EVENT_COUNTERS */
78 
79 #define __count_zone_vm_events(item, zone, delta) \
80 			__count_vm_events(item##_DMA + zone_idx(zone), delta)
81 
82 /*
83  * Zone based page accounting with per cpu differentials.
84  */
85 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86 
87 static inline void zone_page_state_add(long x, struct zone *zone,
88 				 enum zone_stat_item item)
89 {
90 	atomic_long_add(x, &zone->vm_stat[item]);
91 	atomic_long_add(x, &vm_stat[item]);
92 }
93 
94 static inline unsigned long global_page_state(enum zone_stat_item item)
95 {
96 	long x = atomic_long_read(&vm_stat[item]);
97 #ifdef CONFIG_SMP
98 	if (x < 0)
99 		x = 0;
100 #endif
101 	return x;
102 }
103 
104 static inline unsigned long zone_page_state(struct zone *zone,
105 					enum zone_stat_item item)
106 {
107 	long x = atomic_long_read(&zone->vm_stat[item]);
108 #ifdef CONFIG_SMP
109 	if (x < 0)
110 		x = 0;
111 #endif
112 	return x;
113 }
114 
115 #ifdef CONFIG_NUMA
116 /*
117  * Determine the per node value of a stat item. This function
118  * is called frequently in a NUMA machine, so try to be as
119  * frugal as possible.
120  */
121 static inline unsigned long node_page_state(int node,
122 				 enum zone_stat_item item)
123 {
124 	struct zone *zones = NODE_DATA(node)->node_zones;
125 
126 	return
127 #ifndef CONFIG_DMA_IS_NORMAL
128 #if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
129 		zone_page_state(&zones[ZONE_DMA32], item) +
130 #endif
131 		zone_page_state(&zones[ZONE_NORMAL], item) +
132 #endif
133 #ifdef CONFIG_HIGHMEM
134 		zone_page_state(&zones[ZONE_HIGHMEM], item) +
135 #endif
136 		zone_page_state(&zones[ZONE_DMA], item);
137 }
138 
139 extern void zone_statistics(struct zonelist *, struct zone *);
140 
141 #else
142 
143 #define node_page_state(node, item) global_page_state(item)
144 #define zone_statistics(_zl,_z) do { } while (0)
145 
146 #endif /* CONFIG_NUMA */
147 
148 #define __add_zone_page_state(__z, __i, __d)	\
149 		__mod_zone_page_state(__z, __i, __d)
150 #define __sub_zone_page_state(__z, __i, __d)	\
151 		__mod_zone_page_state(__z, __i,-(__d))
152 
153 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
154 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
155 
156 static inline void zap_zone_vm_stats(struct zone *zone)
157 {
158 	memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
159 }
160 
161 extern void inc_zone_state(struct zone *, enum zone_stat_item);
162 
163 #ifdef CONFIG_SMP
164 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
165 void __inc_zone_page_state(struct page *, enum zone_stat_item);
166 void __dec_zone_page_state(struct page *, enum zone_stat_item);
167 
168 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
169 void inc_zone_page_state(struct page *, enum zone_stat_item);
170 void dec_zone_page_state(struct page *, enum zone_stat_item);
171 
172 extern void inc_zone_state(struct zone *, enum zone_stat_item);
173 
174 void refresh_cpu_vm_stats(int);
175 void refresh_vm_stats(void);
176 
177 #else /* CONFIG_SMP */
178 
179 /*
180  * We do not maintain differentials in a single processor configuration.
181  * The functions directly modify the zone and global counters.
182  */
183 static inline void __mod_zone_page_state(struct zone *zone,
184 			enum zone_stat_item item, int delta)
185 {
186 	zone_page_state_add(delta, zone, item);
187 }
188 
189 static inline void __inc_zone_page_state(struct page *page,
190 			enum zone_stat_item item)
191 {
192 	atomic_long_inc(&page_zone(page)->vm_stat[item]);
193 	atomic_long_inc(&vm_stat[item]);
194 }
195 
196 static inline void __dec_zone_page_state(struct page *page,
197 			enum zone_stat_item item)
198 {
199 	atomic_long_dec(&page_zone(page)->vm_stat[item]);
200 	atomic_long_dec(&vm_stat[item]);
201 }
202 
203 /*
204  * We only use atomic operations to update counters. So there is no need to
205  * disable interrupts.
206  */
207 #define inc_zone_page_state __inc_zone_page_state
208 #define dec_zone_page_state __dec_zone_page_state
209 #define mod_zone_page_state __mod_zone_page_state
210 
211 static inline void refresh_cpu_vm_stats(int cpu) { }
212 static inline void refresh_vm_stats(void) { }
213 #endif
214 
215 #endif /* _LINUX_VMSTAT_H */
216