xref: /linux-6.15/include/linux/memcontrol.h (revision c54ea491)
1 /* memcontrol.h - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <[email protected]>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 struct mem_cgroup;
24 struct page_cgroup;
25 struct page;
26 struct mm_struct;
27 
28 /* Stats that can be updated by kernel. */
29 enum mem_cgroup_page_stat_item {
30 	MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
31 };
32 
33 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
34 					struct list_head *dst,
35 					unsigned long *scanned, int order,
36 					int mode, struct zone *z,
37 					struct mem_cgroup *mem_cont,
38 					int active, int file);
39 
40 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
41 /*
42  * All "charge" functions with gfp_mask should use GFP_KERNEL or
43  * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
44  * alloc memory but reclaims memory from all available zones. So, "where I want
45  * memory from" bits of gfp_mask has no meaning. So any bits of that field is
46  * available but adding a rule is better. charge functions' gfp_mask should
47  * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
48  * codes.
49  * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
50  */
51 
52 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
53 				gfp_t gfp_mask);
54 /* for swap handling */
55 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
56 		struct page *page, gfp_t mask, struct mem_cgroup **ptr);
57 extern void mem_cgroup_commit_charge_swapin(struct page *page,
58 					struct mem_cgroup *ptr);
59 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
60 
61 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
62 					gfp_t gfp_mask);
63 extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
64 extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
65 extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
66 extern void mem_cgroup_del_lru(struct page *page);
67 extern void mem_cgroup_move_lists(struct page *page,
68 				  enum lru_list from, enum lru_list to);
69 
70 /* For coalescing uncharge for reducing memcg' overhead*/
71 extern void mem_cgroup_uncharge_start(void);
72 extern void mem_cgroup_uncharge_end(void);
73 
74 extern void mem_cgroup_uncharge_page(struct page *page);
75 extern void mem_cgroup_uncharge_cache_page(struct page *page);
76 extern int mem_cgroup_shmem_charge_fallback(struct page *page,
77 			struct mm_struct *mm, gfp_t gfp_mask);
78 
79 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
80 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
81 
82 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
83 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
84 
85 static inline
86 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
87 {
88 	struct mem_cgroup *mem;
89 	rcu_read_lock();
90 	mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
91 	rcu_read_unlock();
92 	return cgroup == mem;
93 }
94 
95 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
96 
97 extern int
98 mem_cgroup_prepare_migration(struct page *page,
99 	struct page *newpage, struct mem_cgroup **ptr);
100 extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
101 	struct page *oldpage, struct page *newpage, bool migration_ok);
102 
103 /*
104  * For memory reclaim.
105  */
106 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
107 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
108 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
109 				       struct zone *zone,
110 				       enum lru_list lru);
111 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
112 						      struct zone *zone);
113 struct zone_reclaim_stat*
114 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
115 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
116 					struct task_struct *p);
117 
118 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
119 extern int do_swap_account;
120 #endif
121 
122 static inline bool mem_cgroup_disabled(void)
123 {
124 	if (mem_cgroup_subsys.disabled)
125 		return true;
126 	return false;
127 }
128 
129 void mem_cgroup_update_page_stat(struct page *page,
130 				 enum mem_cgroup_page_stat_item idx,
131 				 int val);
132 
133 static inline void mem_cgroup_inc_page_stat(struct page *page,
134 					    enum mem_cgroup_page_stat_item idx)
135 {
136 	mem_cgroup_update_page_stat(page, idx, 1);
137 }
138 
139 static inline void mem_cgroup_dec_page_stat(struct page *page,
140 					    enum mem_cgroup_page_stat_item idx)
141 {
142 	mem_cgroup_update_page_stat(page, idx, -1);
143 }
144 
145 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
146 						gfp_t gfp_mask);
147 u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
148 
149 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
150 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
151 #endif
152 
153 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
154 struct mem_cgroup;
155 
156 static inline int mem_cgroup_newpage_charge(struct page *page,
157 					struct mm_struct *mm, gfp_t gfp_mask)
158 {
159 	return 0;
160 }
161 
162 static inline int mem_cgroup_cache_charge(struct page *page,
163 					struct mm_struct *mm, gfp_t gfp_mask)
164 {
165 	return 0;
166 }
167 
168 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
169 		struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
170 {
171 	return 0;
172 }
173 
174 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
175 					  struct mem_cgroup *ptr)
176 {
177 }
178 
179 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
180 {
181 }
182 
183 static inline void mem_cgroup_uncharge_start(void)
184 {
185 }
186 
187 static inline void mem_cgroup_uncharge_end(void)
188 {
189 }
190 
191 static inline void mem_cgroup_uncharge_page(struct page *page)
192 {
193 }
194 
195 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
196 {
197 }
198 
199 static inline int mem_cgroup_shmem_charge_fallback(struct page *page,
200 			struct mm_struct *mm, gfp_t gfp_mask)
201 {
202 	return 0;
203 }
204 
205 static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
206 {
207 }
208 
209 static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
210 {
211 	return ;
212 }
213 
214 static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
215 {
216 	return ;
217 }
218 
219 static inline void mem_cgroup_del_lru(struct page *page)
220 {
221 	return ;
222 }
223 
224 static inline void
225 mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
226 {
227 }
228 
229 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
230 {
231 	return NULL;
232 }
233 
234 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
235 {
236 	return 1;
237 }
238 
239 static inline int task_in_mem_cgroup(struct task_struct *task,
240 				     const struct mem_cgroup *mem)
241 {
242 	return 1;
243 }
244 
245 static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
246 {
247 	return NULL;
248 }
249 
250 static inline int
251 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
252 	struct mem_cgroup **ptr)
253 {
254 	return 0;
255 }
256 
257 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
258 		struct page *oldpage, struct page *newpage, bool migration_ok)
259 {
260 }
261 
262 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
263 {
264 	return 0;
265 }
266 
267 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
268 						int priority)
269 {
270 }
271 
272 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
273 						int priority)
274 {
275 }
276 
277 static inline bool mem_cgroup_disabled(void)
278 {
279 	return true;
280 }
281 
282 static inline int
283 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
284 {
285 	return 1;
286 }
287 
288 static inline int
289 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
290 {
291 	return 1;
292 }
293 
294 static inline unsigned long
295 mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
296 			 enum lru_list lru)
297 {
298 	return 0;
299 }
300 
301 
302 static inline struct zone_reclaim_stat*
303 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
304 {
305 	return NULL;
306 }
307 
308 static inline struct zone_reclaim_stat*
309 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
310 {
311 	return NULL;
312 }
313 
314 static inline void
315 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
316 {
317 }
318 
319 static inline void mem_cgroup_inc_page_stat(struct page *page,
320 					    enum mem_cgroup_page_stat_item idx)
321 {
322 }
323 
324 static inline void mem_cgroup_dec_page_stat(struct page *page,
325 					    enum mem_cgroup_page_stat_item idx)
326 {
327 }
328 
329 static inline
330 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
331 					    gfp_t gfp_mask)
332 {
333 	return 0;
334 }
335 
336 static inline
337 u64 mem_cgroup_get_limit(struct mem_cgroup *mem)
338 {
339 	return 0;
340 }
341 
342 static inline void mem_cgroup_split_huge_fixup(struct page *head,
343 						struct page *tail)
344 {
345 }
346 
347 #endif /* CONFIG_CGROUP_MEM_CONT */
348 
349 #endif /* _LINUX_MEMCONTROL_H */
350 
351