xref: /linux-6.15/include/linux/memcontrol.h (revision 60063497)
1 /* memcontrol.h - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <[email protected]>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 
25 struct mem_cgroup;
26 struct page_cgroup;
27 struct page;
28 struct mm_struct;
29 
30 /* Stats that can be updated by kernel. */
31 enum mem_cgroup_page_stat_item {
32 	MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33 };
34 
35 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
36 					struct list_head *dst,
37 					unsigned long *scanned, int order,
38 					int mode, struct zone *z,
39 					struct mem_cgroup *mem_cont,
40 					int active, int file);
41 
42 struct memcg_scanrecord {
43 	struct mem_cgroup *mem; /* scanend memory cgroup */
44 	struct mem_cgroup *root; /* scan target hierarchy root */
45 	int context;		/* scanning context (see memcontrol.c) */
46 	unsigned long nr_scanned[2]; /* the number of scanned pages */
47 	unsigned long nr_rotated[2]; /* the number of rotated pages */
48 	unsigned long nr_freed[2]; /* the number of freed pages */
49 	unsigned long elapsed; /* nsec of time elapsed while scanning */
50 };
51 
52 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
53 /*
54  * All "charge" functions with gfp_mask should use GFP_KERNEL or
55  * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
56  * alloc memory but reclaims memory from all available zones. So, "where I want
57  * memory from" bits of gfp_mask has no meaning. So any bits of that field is
58  * available but adding a rule is better. charge functions' gfp_mask should
59  * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
60  * codes.
61  * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
62  */
63 
64 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
65 				gfp_t gfp_mask);
66 /* for swap handling */
67 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
68 		struct page *page, gfp_t mask, struct mem_cgroup **ptr);
69 extern void mem_cgroup_commit_charge_swapin(struct page *page,
70 					struct mem_cgroup *ptr);
71 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
72 
73 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
74 					gfp_t gfp_mask);
75 extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
76 extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
77 extern void mem_cgroup_rotate_reclaimable_page(struct page *page);
78 extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
79 extern void mem_cgroup_del_lru(struct page *page);
80 extern void mem_cgroup_move_lists(struct page *page,
81 				  enum lru_list from, enum lru_list to);
82 
83 /* For coalescing uncharge for reducing memcg' overhead*/
84 extern void mem_cgroup_uncharge_start(void);
85 extern void mem_cgroup_uncharge_end(void);
86 
87 extern void mem_cgroup_uncharge_page(struct page *page);
88 extern void mem_cgroup_uncharge_cache_page(struct page *page);
89 extern int mem_cgroup_shmem_charge_fallback(struct page *page,
90 			struct mm_struct *mm, gfp_t gfp_mask);
91 
92 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
93 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
94 
95 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
96 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
97 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
98 
99 static inline
100 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
101 {
102 	struct mem_cgroup *mem;
103 	rcu_read_lock();
104 	mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
105 	rcu_read_unlock();
106 	return cgroup == mem;
107 }
108 
109 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
110 
111 extern int
112 mem_cgroup_prepare_migration(struct page *page,
113 	struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask);
114 extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
115 	struct page *oldpage, struct page *newpage, bool migration_ok);
116 
117 /*
118  * For memory reclaim.
119  */
120 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
121 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
122 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
123 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
124 					int nid, int zid, unsigned int lrumask);
125 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
126 						      struct zone *zone);
127 struct zone_reclaim_stat*
128 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
129 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
130 					struct task_struct *p);
131 
132 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
133 						  gfp_t gfp_mask, bool noswap,
134 						  struct memcg_scanrecord *rec);
135 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
136 						gfp_t gfp_mask, bool noswap,
137 						struct zone *zone,
138 						struct memcg_scanrecord *rec,
139 						unsigned long *nr_scanned);
140 
141 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
142 extern int do_swap_account;
143 #endif
144 
145 static inline bool mem_cgroup_disabled(void)
146 {
147 	if (mem_cgroup_subsys.disabled)
148 		return true;
149 	return false;
150 }
151 
152 void mem_cgroup_update_page_stat(struct page *page,
153 				 enum mem_cgroup_page_stat_item idx,
154 				 int val);
155 
156 static inline void mem_cgroup_inc_page_stat(struct page *page,
157 					    enum mem_cgroup_page_stat_item idx)
158 {
159 	mem_cgroup_update_page_stat(page, idx, 1);
160 }
161 
162 static inline void mem_cgroup_dec_page_stat(struct page *page,
163 					    enum mem_cgroup_page_stat_item idx)
164 {
165 	mem_cgroup_update_page_stat(page, idx, -1);
166 }
167 
168 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
169 						gfp_t gfp_mask,
170 						unsigned long *total_scanned);
171 u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
172 
173 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
174 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
175 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
176 #endif
177 
178 #ifdef CONFIG_DEBUG_VM
179 bool mem_cgroup_bad_page_check(struct page *page);
180 void mem_cgroup_print_bad_page(struct page *page);
181 #endif
182 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
183 struct mem_cgroup;
184 
185 static inline int mem_cgroup_newpage_charge(struct page *page,
186 					struct mm_struct *mm, gfp_t gfp_mask)
187 {
188 	return 0;
189 }
190 
191 static inline int mem_cgroup_cache_charge(struct page *page,
192 					struct mm_struct *mm, gfp_t gfp_mask)
193 {
194 	return 0;
195 }
196 
197 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
198 		struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
199 {
200 	return 0;
201 }
202 
203 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
204 					  struct mem_cgroup *ptr)
205 {
206 }
207 
208 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
209 {
210 }
211 
212 static inline void mem_cgroup_uncharge_start(void)
213 {
214 }
215 
216 static inline void mem_cgroup_uncharge_end(void)
217 {
218 }
219 
220 static inline void mem_cgroup_uncharge_page(struct page *page)
221 {
222 }
223 
224 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
225 {
226 }
227 
228 static inline int mem_cgroup_shmem_charge_fallback(struct page *page,
229 			struct mm_struct *mm, gfp_t gfp_mask)
230 {
231 	return 0;
232 }
233 
234 static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
235 {
236 }
237 
238 static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
239 {
240 	return ;
241 }
242 
243 static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
244 {
245 	return ;
246 }
247 
248 static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
249 {
250 	return ;
251 }
252 
253 static inline void mem_cgroup_del_lru(struct page *page)
254 {
255 	return ;
256 }
257 
258 static inline void
259 mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
260 {
261 }
262 
263 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
264 {
265 	return NULL;
266 }
267 
268 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
269 {
270 	return NULL;
271 }
272 
273 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
274 {
275 	return 1;
276 }
277 
278 static inline int task_in_mem_cgroup(struct task_struct *task,
279 				     const struct mem_cgroup *mem)
280 {
281 	return 1;
282 }
283 
284 static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
285 {
286 	return NULL;
287 }
288 
289 static inline int
290 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
291 	struct mem_cgroup **ptr, gfp_t gfp_mask)
292 {
293 	return 0;
294 }
295 
296 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
297 		struct page *oldpage, struct page *newpage, bool migration_ok)
298 {
299 }
300 
301 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
302 {
303 	return 0;
304 }
305 
306 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
307 						int priority)
308 {
309 }
310 
311 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
312 						int priority)
313 {
314 }
315 
316 static inline bool mem_cgroup_disabled(void)
317 {
318 	return true;
319 }
320 
321 static inline int
322 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
323 {
324 	return 1;
325 }
326 
327 static inline int
328 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
329 {
330 	return 1;
331 }
332 
333 static inline unsigned long
334 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
335 				unsigned int lru_mask)
336 {
337 	return 0;
338 }
339 
340 
341 static inline struct zone_reclaim_stat*
342 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
343 {
344 	return NULL;
345 }
346 
347 static inline struct zone_reclaim_stat*
348 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
349 {
350 	return NULL;
351 }
352 
353 static inline void
354 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
355 {
356 }
357 
358 static inline void mem_cgroup_inc_page_stat(struct page *page,
359 					    enum mem_cgroup_page_stat_item idx)
360 {
361 }
362 
363 static inline void mem_cgroup_dec_page_stat(struct page *page,
364 					    enum mem_cgroup_page_stat_item idx)
365 {
366 }
367 
368 static inline
369 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
370 					    gfp_t gfp_mask,
371 					    unsigned long *total_scanned)
372 {
373 	return 0;
374 }
375 
376 static inline
377 u64 mem_cgroup_get_limit(struct mem_cgroup *mem)
378 {
379 	return 0;
380 }
381 
382 static inline void mem_cgroup_split_huge_fixup(struct page *head,
383 						struct page *tail)
384 {
385 }
386 
387 static inline
388 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
389 {
390 }
391 #endif /* CONFIG_CGROUP_MEM_CONT */
392 
393 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
394 static inline bool
395 mem_cgroup_bad_page_check(struct page *page)
396 {
397 	return false;
398 }
399 
400 static inline void
401 mem_cgroup_print_bad_page(struct page *page)
402 {
403 }
404 #endif
405 
406 #endif /* _LINUX_MEMCONTROL_H */
407 
408