1 /* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <[email protected]> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <[email protected]> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20 #ifndef _LINUX_MEMCONTROL_H 21 #define _LINUX_MEMCONTROL_H 22 #include <linux/cgroup.h> 23 struct mem_cgroup; 24 struct page_cgroup; 25 struct page; 26 struct mm_struct; 27 28 /* Stats that can be updated by kernel. */ 29 enum mem_cgroup_page_stat_item { 30 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 31 }; 32 33 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 34 struct list_head *dst, 35 unsigned long *scanned, int order, 36 int mode, struct zone *z, 37 struct mem_cgroup *mem_cont, 38 int active, int file); 39 40 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 41 /* 42 * All "charge" functions with gfp_mask should use GFP_KERNEL or 43 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 44 * alloc memory but reclaims memory from all available zones. So, "where I want 45 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 46 * available but adding a rule is better. charge functions' gfp_mask should 47 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 48 * codes. 49 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 50 */ 51 52 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 53 gfp_t gfp_mask); 54 /* for swap handling */ 55 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 56 struct page *page, gfp_t mask, struct mem_cgroup **ptr); 57 extern void mem_cgroup_commit_charge_swapin(struct page *page, 58 struct mem_cgroup *ptr); 59 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); 60 61 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 62 gfp_t gfp_mask); 63 extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); 64 extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); 65 extern void mem_cgroup_rotate_reclaimable_page(struct page *page); 66 extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); 67 extern void mem_cgroup_del_lru(struct page *page); 68 extern void mem_cgroup_move_lists(struct page *page, 69 enum lru_list from, enum lru_list to); 70 71 /* For coalescing uncharge for reducing memcg' overhead*/ 72 extern void mem_cgroup_uncharge_start(void); 73 extern void mem_cgroup_uncharge_end(void); 74 75 extern void mem_cgroup_uncharge_page(struct page *page); 76 extern void mem_cgroup_uncharge_cache_page(struct page *page); 77 extern int mem_cgroup_shmem_charge_fallback(struct page *page, 78 struct mm_struct *mm, gfp_t gfp_mask); 79 80 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); 81 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); 82 83 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 84 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 85 86 static inline 87 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 88 { 89 struct mem_cgroup *mem; 90 rcu_read_lock(); 91 mem = mem_cgroup_from_task(rcu_dereference((mm)->owner)); 92 rcu_read_unlock(); 93 return cgroup == mem; 94 } 95 96 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); 97 98 extern int 99 mem_cgroup_prepare_migration(struct page *page, 100 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); 101 extern void mem_cgroup_end_migration(struct mem_cgroup *mem, 102 struct page *oldpage, struct page *newpage, bool migration_ok); 103 104 /* 105 * For memory reclaim. 106 */ 107 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); 108 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); 109 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, 110 struct zone *zone, 111 enum lru_list lru); 112 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 113 struct zone *zone); 114 struct zone_reclaim_stat* 115 mem_cgroup_get_reclaim_stat_from_page(struct page *page); 116 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 117 struct task_struct *p); 118 119 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 120 extern int do_swap_account; 121 #endif 122 123 static inline bool mem_cgroup_disabled(void) 124 { 125 if (mem_cgroup_subsys.disabled) 126 return true; 127 return false; 128 } 129 130 void mem_cgroup_update_page_stat(struct page *page, 131 enum mem_cgroup_page_stat_item idx, 132 int val); 133 134 static inline void mem_cgroup_inc_page_stat(struct page *page, 135 enum mem_cgroup_page_stat_item idx) 136 { 137 mem_cgroup_update_page_stat(page, idx, 1); 138 } 139 140 static inline void mem_cgroup_dec_page_stat(struct page *page, 141 enum mem_cgroup_page_stat_item idx) 142 { 143 mem_cgroup_update_page_stat(page, idx, -1); 144 } 145 146 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 147 gfp_t gfp_mask); 148 u64 mem_cgroup_get_limit(struct mem_cgroup *mem); 149 150 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 151 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); 152 #endif 153 154 #else /* CONFIG_CGROUP_MEM_RES_CTLR */ 155 struct mem_cgroup; 156 157 static inline int mem_cgroup_newpage_charge(struct page *page, 158 struct mm_struct *mm, gfp_t gfp_mask) 159 { 160 return 0; 161 } 162 163 static inline int mem_cgroup_cache_charge(struct page *page, 164 struct mm_struct *mm, gfp_t gfp_mask) 165 { 166 return 0; 167 } 168 169 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 170 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) 171 { 172 return 0; 173 } 174 175 static inline void mem_cgroup_commit_charge_swapin(struct page *page, 176 struct mem_cgroup *ptr) 177 { 178 } 179 180 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) 181 { 182 } 183 184 static inline void mem_cgroup_uncharge_start(void) 185 { 186 } 187 188 static inline void mem_cgroup_uncharge_end(void) 189 { 190 } 191 192 static inline void mem_cgroup_uncharge_page(struct page *page) 193 { 194 } 195 196 static inline void mem_cgroup_uncharge_cache_page(struct page *page) 197 { 198 } 199 200 static inline int mem_cgroup_shmem_charge_fallback(struct page *page, 201 struct mm_struct *mm, gfp_t gfp_mask) 202 { 203 return 0; 204 } 205 206 static inline void mem_cgroup_add_lru_list(struct page *page, int lru) 207 { 208 } 209 210 static inline void mem_cgroup_del_lru_list(struct page *page, int lru) 211 { 212 return ; 213 } 214 215 static inline inline void mem_cgroup_rotate_reclaimable_page(struct page *page) 216 { 217 return ; 218 } 219 220 static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) 221 { 222 return ; 223 } 224 225 static inline void mem_cgroup_del_lru(struct page *page) 226 { 227 return ; 228 } 229 230 static inline void 231 mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) 232 { 233 } 234 235 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 236 { 237 return NULL; 238 } 239 240 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) 241 { 242 return 1; 243 } 244 245 static inline int task_in_mem_cgroup(struct task_struct *task, 246 const struct mem_cgroup *mem) 247 { 248 return 1; 249 } 250 251 static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) 252 { 253 return NULL; 254 } 255 256 static inline int 257 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 258 struct mem_cgroup **ptr, gfp_t gfp_mask) 259 { 260 return 0; 261 } 262 263 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, 264 struct page *oldpage, struct page *newpage, bool migration_ok) 265 { 266 } 267 268 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 269 { 270 return 0; 271 } 272 273 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, 274 int priority) 275 { 276 } 277 278 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, 279 int priority) 280 { 281 } 282 283 static inline bool mem_cgroup_disabled(void) 284 { 285 return true; 286 } 287 288 static inline int 289 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) 290 { 291 return 1; 292 } 293 294 static inline int 295 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) 296 { 297 return 1; 298 } 299 300 static inline unsigned long 301 mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, 302 enum lru_list lru) 303 { 304 return 0; 305 } 306 307 308 static inline struct zone_reclaim_stat* 309 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) 310 { 311 return NULL; 312 } 313 314 static inline struct zone_reclaim_stat* 315 mem_cgroup_get_reclaim_stat_from_page(struct page *page) 316 { 317 return NULL; 318 } 319 320 static inline void 321 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 322 { 323 } 324 325 static inline void mem_cgroup_inc_page_stat(struct page *page, 326 enum mem_cgroup_page_stat_item idx) 327 { 328 } 329 330 static inline void mem_cgroup_dec_page_stat(struct page *page, 331 enum mem_cgroup_page_stat_item idx) 332 { 333 } 334 335 static inline 336 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 337 gfp_t gfp_mask) 338 { 339 return 0; 340 } 341 342 static inline 343 u64 mem_cgroup_get_limit(struct mem_cgroup *mem) 344 { 345 return 0; 346 } 347 348 static inline void mem_cgroup_split_huge_fixup(struct page *head, 349 struct page *tail) 350 { 351 } 352 353 #endif /* CONFIG_CGROUP_MEM_CONT */ 354 355 #endif /* _LINUX_MEMCONTROL_H */ 356 357