xref: /linux-6.15/include/linux/memcontrol.h (revision ed3174d9)
1 /* memcontrol.h - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <[email protected]>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 
23 #include <linux/rcupdate.h>
24 #include <linux/mm.h>
25 
26 struct mem_cgroup;
27 struct page_cgroup;
28 struct page;
29 struct mm_struct;
30 
31 #ifdef CONFIG_CGROUP_MEM_CONT
32 
33 extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
34 extern void mm_free_cgroup(struct mm_struct *mm);
35 extern void page_assign_page_cgroup(struct page *page,
36 					struct page_cgroup *pc);
37 extern struct page_cgroup *page_get_page_cgroup(struct page *page);
38 extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
39 				gfp_t gfp_mask);
40 extern void mem_cgroup_uncharge(struct page_cgroup *pc);
41 extern void mem_cgroup_uncharge_page(struct page *page);
42 extern void mem_cgroup_move_lists(struct page_cgroup *pc, bool active);
43 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
44 					struct list_head *dst,
45 					unsigned long *scanned, int order,
46 					int mode, struct zone *z,
47 					struct mem_cgroup *mem_cont,
48 					int active);
49 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
50 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
51 					gfp_t gfp_mask);
52 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
53 
54 #define vm_match_cgroup(mm, cgroup)	\
55 	((cgroup) == rcu_dereference((mm)->mem_cgroup))
56 
57 extern int mem_cgroup_prepare_migration(struct page *page);
58 extern void mem_cgroup_end_migration(struct page *page);
59 extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
60 
61 /*
62  * For memory reclaim.
63  */
64 extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
65 extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
66 
67 extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
68 extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
69 							int priority);
70 extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
71 							int priority);
72 
73 extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
74 				struct zone *zone, int priority);
75 extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
76 				struct zone *zone, int priority);
77 
78 #else /* CONFIG_CGROUP_MEM_CONT */
79 static inline void mm_init_cgroup(struct mm_struct *mm,
80 					struct task_struct *p)
81 {
82 }
83 
84 static inline void mm_free_cgroup(struct mm_struct *mm)
85 {
86 }
87 
88 static inline void page_assign_page_cgroup(struct page *page,
89 						struct page_cgroup *pc)
90 {
91 }
92 
93 static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
94 {
95 	return NULL;
96 }
97 
98 static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
99 					gfp_t gfp_mask)
100 {
101 	return 0;
102 }
103 
104 static inline void mem_cgroup_uncharge(struct page_cgroup *pc)
105 {
106 }
107 
108 static inline void mem_cgroup_uncharge_page(struct page *page)
109 {
110 }
111 
112 static inline void mem_cgroup_move_lists(struct page_cgroup *pc,
113 						bool active)
114 {
115 }
116 
117 static inline int mem_cgroup_cache_charge(struct page *page,
118 						struct mm_struct *mm,
119 						gfp_t gfp_mask)
120 {
121 	return 0;
122 }
123 
124 static inline int vm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
125 {
126 	return 1;
127 }
128 
129 static inline int task_in_mem_cgroup(struct task_struct *task,
130 				     const struct mem_cgroup *mem)
131 {
132 	return 1;
133 }
134 
135 static inline int mem_cgroup_prepare_migration(struct page *page)
136 {
137 	return 0;
138 }
139 
140 static inline void mem_cgroup_end_migration(struct page *page)
141 {
142 }
143 
144 static inline void
145 mem_cgroup_page_migration(struct page *page, struct page *newpage)
146 {
147 }
148 
149 static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
150 {
151 	return 0;
152 }
153 
154 static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
155 {
156 	return 0;
157 }
158 
159 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
160 {
161 	return 0;
162 }
163 
164 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
165 						int priority)
166 {
167 }
168 
169 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
170 						int priority)
171 {
172 }
173 
174 static inline long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
175 					struct zone *zone, int priority)
176 {
177 	return 0;
178 }
179 
180 static inline long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
181 					struct zone *zone, int priority)
182 {
183 	return 0;
184 }
185 #endif /* CONFIG_CGROUP_MEM_CONT */
186 
187 #endif /* _LINUX_MEMCONTROL_H */
188 
189