xref: /linux-6.15/include/linux/memcontrol.h (revision a4f174de)
1 /* memcontrol.h - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <[email protected]>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 #include <linux/hardirq.h>
25 #include <linux/jump_label.h>
26 
27 struct mem_cgroup;
28 struct page_cgroup;
29 struct page;
30 struct mm_struct;
31 struct kmem_cache;
32 
33 /*
34  * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
35  * These two lists should keep in accord with each other.
36  */
37 enum mem_cgroup_stat_index {
38 	/*
39 	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
40 	 */
41 	MEM_CGROUP_STAT_CACHE,		/* # of pages charged as cache */
42 	MEM_CGROUP_STAT_RSS,		/* # of pages charged as anon rss */
43 	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
44 	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
45 	MEM_CGROUP_STAT_WRITEBACK,	/* # of pages under writeback */
46 	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
47 	MEM_CGROUP_STAT_NSTATS,
48 };
49 
50 struct mem_cgroup_reclaim_cookie {
51 	struct zone *zone;
52 	int priority;
53 	unsigned int generation;
54 };
55 
56 #ifdef CONFIG_MEMCG
57 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
58 			  gfp_t gfp_mask, struct mem_cgroup **memcgp);
59 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
60 			      bool lrucare);
61 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
62 void mem_cgroup_uncharge(struct page *page);
63 void mem_cgroup_uncharge_list(struct list_head *page_list);
64 
65 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
66 			bool lrucare);
67 
68 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
69 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
70 
71 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
72 				  struct mem_cgroup *memcg);
73 bool task_in_mem_cgroup(struct task_struct *task,
74 			const struct mem_cgroup *memcg);
75 
76 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
77 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
78 
79 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
80 extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
81 
82 static inline
83 bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
84 {
85 	struct mem_cgroup *task_memcg;
86 	bool match;
87 
88 	rcu_read_lock();
89 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
90 	match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
91 	rcu_read_unlock();
92 	return match;
93 }
94 
95 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
96 
97 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
98 				   struct mem_cgroup *,
99 				   struct mem_cgroup_reclaim_cookie *);
100 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
101 
102 /*
103  * For memory reclaim.
104  */
105 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
106 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
107 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
108 void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
109 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
110 					struct task_struct *p);
111 
112 static inline void mem_cgroup_oom_enable(void)
113 {
114 	WARN_ON(current->memcg_oom.may_oom);
115 	current->memcg_oom.may_oom = 1;
116 }
117 
118 static inline void mem_cgroup_oom_disable(void)
119 {
120 	WARN_ON(!current->memcg_oom.may_oom);
121 	current->memcg_oom.may_oom = 0;
122 }
123 
124 static inline bool task_in_memcg_oom(struct task_struct *p)
125 {
126 	return p->memcg_oom.memcg;
127 }
128 
129 bool mem_cgroup_oom_synchronize(bool wait);
130 
131 #ifdef CONFIG_MEMCG_SWAP
132 extern int do_swap_account;
133 #endif
134 
135 static inline bool mem_cgroup_disabled(void)
136 {
137 	if (memory_cgrp_subsys.disabled)
138 		return true;
139 	return false;
140 }
141 
142 void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
143 					 unsigned long *flags);
144 
145 extern atomic_t memcg_moving;
146 
147 static inline void mem_cgroup_begin_update_page_stat(struct page *page,
148 					bool *locked, unsigned long *flags)
149 {
150 	if (mem_cgroup_disabled())
151 		return;
152 	rcu_read_lock();
153 	*locked = false;
154 	if (atomic_read(&memcg_moving))
155 		__mem_cgroup_begin_update_page_stat(page, locked, flags);
156 }
157 
158 void __mem_cgroup_end_update_page_stat(struct page *page,
159 				unsigned long *flags);
160 static inline void mem_cgroup_end_update_page_stat(struct page *page,
161 					bool *locked, unsigned long *flags)
162 {
163 	if (mem_cgroup_disabled())
164 		return;
165 	if (*locked)
166 		__mem_cgroup_end_update_page_stat(page, flags);
167 	rcu_read_unlock();
168 }
169 
170 void mem_cgroup_update_page_stat(struct page *page,
171 				 enum mem_cgroup_stat_index idx,
172 				 int val);
173 
174 static inline void mem_cgroup_inc_page_stat(struct page *page,
175 					    enum mem_cgroup_stat_index idx)
176 {
177 	mem_cgroup_update_page_stat(page, idx, 1);
178 }
179 
180 static inline void mem_cgroup_dec_page_stat(struct page *page,
181 					    enum mem_cgroup_stat_index idx)
182 {
183 	mem_cgroup_update_page_stat(page, idx, -1);
184 }
185 
186 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
187 						gfp_t gfp_mask,
188 						unsigned long *total_scanned);
189 
190 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
191 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
192 					     enum vm_event_item idx)
193 {
194 	if (mem_cgroup_disabled())
195 		return;
196 	__mem_cgroup_count_vm_event(mm, idx);
197 }
198 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
199 void mem_cgroup_split_huge_fixup(struct page *head);
200 #endif
201 
202 #ifdef CONFIG_DEBUG_VM
203 bool mem_cgroup_bad_page_check(struct page *page);
204 void mem_cgroup_print_bad_page(struct page *page);
205 #endif
206 #else /* CONFIG_MEMCG */
207 struct mem_cgroup;
208 
209 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
210 					gfp_t gfp_mask,
211 					struct mem_cgroup **memcgp)
212 {
213 	*memcgp = NULL;
214 	return 0;
215 }
216 
217 static inline void mem_cgroup_commit_charge(struct page *page,
218 					    struct mem_cgroup *memcg,
219 					    bool lrucare)
220 {
221 }
222 
223 static inline void mem_cgroup_cancel_charge(struct page *page,
224 					    struct mem_cgroup *memcg)
225 {
226 }
227 
228 static inline void mem_cgroup_uncharge(struct page *page)
229 {
230 }
231 
232 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
233 {
234 }
235 
236 static inline void mem_cgroup_migrate(struct page *oldpage,
237 				      struct page *newpage,
238 				      bool lrucare)
239 {
240 }
241 
242 static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
243 						    struct mem_cgroup *memcg)
244 {
245 	return &zone->lruvec;
246 }
247 
248 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
249 						    struct zone *zone)
250 {
251 	return &zone->lruvec;
252 }
253 
254 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
255 {
256 	return NULL;
257 }
258 
259 static inline bool mm_match_cgroup(struct mm_struct *mm,
260 		struct mem_cgroup *memcg)
261 {
262 	return true;
263 }
264 
265 static inline bool task_in_mem_cgroup(struct task_struct *task,
266 				      const struct mem_cgroup *memcg)
267 {
268 	return true;
269 }
270 
271 static inline struct cgroup_subsys_state
272 		*mem_cgroup_css(struct mem_cgroup *memcg)
273 {
274 	return NULL;
275 }
276 
277 static inline struct mem_cgroup *
278 mem_cgroup_iter(struct mem_cgroup *root,
279 		struct mem_cgroup *prev,
280 		struct mem_cgroup_reclaim_cookie *reclaim)
281 {
282 	return NULL;
283 }
284 
285 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
286 					 struct mem_cgroup *prev)
287 {
288 }
289 
290 static inline bool mem_cgroup_disabled(void)
291 {
292 	return true;
293 }
294 
295 static inline int
296 mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
297 {
298 	return 1;
299 }
300 
301 static inline unsigned long
302 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
303 {
304 	return 0;
305 }
306 
307 static inline void
308 mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
309 			      int increment)
310 {
311 }
312 
313 static inline void
314 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
315 {
316 }
317 
318 static inline void mem_cgroup_begin_update_page_stat(struct page *page,
319 					bool *locked, unsigned long *flags)
320 {
321 }
322 
323 static inline void mem_cgroup_end_update_page_stat(struct page *page,
324 					bool *locked, unsigned long *flags)
325 {
326 }
327 
328 static inline void mem_cgroup_oom_enable(void)
329 {
330 }
331 
332 static inline void mem_cgroup_oom_disable(void)
333 {
334 }
335 
336 static inline bool task_in_memcg_oom(struct task_struct *p)
337 {
338 	return false;
339 }
340 
341 static inline bool mem_cgroup_oom_synchronize(bool wait)
342 {
343 	return false;
344 }
345 
346 static inline void mem_cgroup_inc_page_stat(struct page *page,
347 					    enum mem_cgroup_stat_index idx)
348 {
349 }
350 
351 static inline void mem_cgroup_dec_page_stat(struct page *page,
352 					    enum mem_cgroup_stat_index idx)
353 {
354 }
355 
356 static inline
357 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
358 					    gfp_t gfp_mask,
359 					    unsigned long *total_scanned)
360 {
361 	return 0;
362 }
363 
364 static inline void mem_cgroup_split_huge_fixup(struct page *head)
365 {
366 }
367 
368 static inline
369 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
370 {
371 }
372 #endif /* CONFIG_MEMCG */
373 
374 #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
375 static inline bool
376 mem_cgroup_bad_page_check(struct page *page)
377 {
378 	return false;
379 }
380 
381 static inline void
382 mem_cgroup_print_bad_page(struct page *page)
383 {
384 }
385 #endif
386 
387 enum {
388 	UNDER_LIMIT,
389 	SOFT_LIMIT,
390 	OVER_LIMIT,
391 };
392 
393 struct sock;
394 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
395 void sock_update_memcg(struct sock *sk);
396 void sock_release_memcg(struct sock *sk);
397 #else
398 static inline void sock_update_memcg(struct sock *sk)
399 {
400 }
401 static inline void sock_release_memcg(struct sock *sk)
402 {
403 }
404 #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
405 
406 #ifdef CONFIG_MEMCG_KMEM
407 extern struct static_key memcg_kmem_enabled_key;
408 
409 extern int memcg_limited_groups_array_size;
410 
411 /*
412  * Helper macro to loop through all memcg-specific caches. Callers must still
413  * check if the cache is valid (it is either valid or NULL).
414  * the slab_mutex must be held when looping through those caches
415  */
416 #define for_each_memcg_cache_index(_idx)	\
417 	for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
418 
419 static inline bool memcg_kmem_enabled(void)
420 {
421 	return static_key_false(&memcg_kmem_enabled_key);
422 }
423 
424 /*
425  * In general, we'll do everything in our power to not incur in any overhead
426  * for non-memcg users for the kmem functions. Not even a function call, if we
427  * can avoid it.
428  *
429  * Therefore, we'll inline all those functions so that in the best case, we'll
430  * see that kmemcg is off for everybody and proceed quickly.  If it is on,
431  * we'll still do most of the flag checking inline. We check a lot of
432  * conditions, but because they are pretty simple, they are expected to be
433  * fast.
434  */
435 bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
436 					int order);
437 void __memcg_kmem_commit_charge(struct page *page,
438 				       struct mem_cgroup *memcg, int order);
439 void __memcg_kmem_uncharge_pages(struct page *page, int order);
440 
441 int memcg_cache_id(struct mem_cgroup *memcg);
442 
443 void memcg_update_array_size(int num_groups);
444 
445 struct kmem_cache *
446 __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
447 
448 int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
449 void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
450 
451 int __memcg_cleanup_cache_params(struct kmem_cache *s);
452 
453 /**
454  * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
455  * @gfp: the gfp allocation flags.
456  * @memcg: a pointer to the memcg this was charged against.
457  * @order: allocation order.
458  *
459  * returns true if the memcg where the current task belongs can hold this
460  * allocation.
461  *
462  * We return true automatically if this allocation is not to be accounted to
463  * any memcg.
464  */
465 static inline bool
466 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
467 {
468 	if (!memcg_kmem_enabled())
469 		return true;
470 
471 	/*
472 	 * __GFP_NOFAIL allocations will move on even if charging is not
473 	 * possible. Therefore we don't even try, and have this allocation
474 	 * unaccounted. We could in theory charge it with
475 	 * res_counter_charge_nofail, but we hope those allocations are rare,
476 	 * and won't be worth the trouble.
477 	 */
478 	if (gfp & __GFP_NOFAIL)
479 		return true;
480 	if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
481 		return true;
482 
483 	/* If the test is dying, just let it go. */
484 	if (unlikely(fatal_signal_pending(current)))
485 		return true;
486 
487 	return __memcg_kmem_newpage_charge(gfp, memcg, order);
488 }
489 
490 /**
491  * memcg_kmem_uncharge_pages: uncharge pages from memcg
492  * @page: pointer to struct page being freed
493  * @order: allocation order.
494  *
495  * there is no need to specify memcg here, since it is embedded in page_cgroup
496  */
497 static inline void
498 memcg_kmem_uncharge_pages(struct page *page, int order)
499 {
500 	if (memcg_kmem_enabled())
501 		__memcg_kmem_uncharge_pages(page, order);
502 }
503 
504 /**
505  * memcg_kmem_commit_charge: embeds correct memcg in a page
506  * @page: pointer to struct page recently allocated
507  * @memcg: the memcg structure we charged against
508  * @order: allocation order.
509  *
510  * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
511  * failure of the allocation. if @page is NULL, this function will revert the
512  * charges. Otherwise, it will commit the memcg given by @memcg to the
513  * corresponding page_cgroup.
514  */
515 static inline void
516 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
517 {
518 	if (memcg_kmem_enabled() && memcg)
519 		__memcg_kmem_commit_charge(page, memcg, order);
520 }
521 
522 /**
523  * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
524  * @cachep: the original global kmem cache
525  * @gfp: allocation flags.
526  *
527  * All memory allocated from a per-memcg cache is charged to the owner memcg.
528  */
529 static __always_inline struct kmem_cache *
530 memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
531 {
532 	if (!memcg_kmem_enabled())
533 		return cachep;
534 	if (gfp & __GFP_NOFAIL)
535 		return cachep;
536 	if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
537 		return cachep;
538 	if (unlikely(fatal_signal_pending(current)))
539 		return cachep;
540 
541 	return __memcg_kmem_get_cache(cachep, gfp);
542 }
543 #else
544 #define for_each_memcg_cache_index(_idx)	\
545 	for (; NULL; )
546 
547 static inline bool memcg_kmem_enabled(void)
548 {
549 	return false;
550 }
551 
552 static inline bool
553 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
554 {
555 	return true;
556 }
557 
558 static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
559 {
560 }
561 
562 static inline void
563 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
564 {
565 }
566 
567 static inline int memcg_cache_id(struct mem_cgroup *memcg)
568 {
569 	return -1;
570 }
571 
572 static inline struct kmem_cache *
573 memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
574 {
575 	return cachep;
576 }
577 #endif /* CONFIG_MEMCG_KMEM */
578 #endif /* _LINUX_MEMCONTROL_H */
579 
580