1 /* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <[email protected]> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <[email protected]> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20 #ifndef _LINUX_MEMCONTROL_H 21 #define _LINUX_MEMCONTROL_H 22 #include <linux/cgroup.h> 23 #include <linux/vm_event_item.h> 24 25 struct mem_cgroup; 26 struct page_cgroup; 27 struct page; 28 struct mm_struct; 29 30 /* Stats that can be updated by kernel. */ 31 enum mem_cgroup_page_stat_item { 32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 33 }; 34 35 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 36 struct list_head *dst, 37 unsigned long *scanned, int order, 38 int mode, struct zone *z, 39 struct mem_cgroup *mem_cont, 40 int active, int file); 41 42 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 43 /* 44 * All "charge" functions with gfp_mask should use GFP_KERNEL or 45 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 46 * alloc memory but reclaims memory from all available zones. So, "where I want 47 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 48 * available but adding a rule is better. charge functions' gfp_mask should 49 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 50 * codes. 51 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 52 */ 53 54 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 55 gfp_t gfp_mask); 56 /* for swap handling */ 57 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 58 struct page *page, gfp_t mask, struct mem_cgroup **ptr); 59 extern void mem_cgroup_commit_charge_swapin(struct page *page, 60 struct mem_cgroup *ptr); 61 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); 62 63 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 64 gfp_t gfp_mask); 65 extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); 66 extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); 67 extern void mem_cgroup_rotate_reclaimable_page(struct page *page); 68 extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); 69 extern void mem_cgroup_del_lru(struct page *page); 70 extern void mem_cgroup_move_lists(struct page *page, 71 enum lru_list from, enum lru_list to); 72 73 /* For coalescing uncharge for reducing memcg' overhead*/ 74 extern void mem_cgroup_uncharge_start(void); 75 extern void mem_cgroup_uncharge_end(void); 76 77 extern void mem_cgroup_uncharge_page(struct page *page); 78 extern void mem_cgroup_uncharge_cache_page(struct page *page); 79 extern int mem_cgroup_shmem_charge_fallback(struct page *page, 80 struct mm_struct *mm, gfp_t gfp_mask); 81 82 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); 83 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); 84 85 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 86 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 87 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 88 89 static inline 90 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 91 { 92 struct mem_cgroup *mem; 93 rcu_read_lock(); 94 mem = mem_cgroup_from_task(rcu_dereference((mm)->owner)); 95 rcu_read_unlock(); 96 return cgroup == mem; 97 } 98 99 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); 100 101 extern int 102 mem_cgroup_prepare_migration(struct page *page, 103 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); 104 extern void mem_cgroup_end_migration(struct mem_cgroup *mem, 105 struct page *oldpage, struct page *newpage, bool migration_ok); 106 107 /* 108 * For memory reclaim. 109 */ 110 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); 111 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); 112 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 113 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, 114 struct zone *zone, 115 enum lru_list lru); 116 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 117 struct zone *zone); 118 struct zone_reclaim_stat* 119 mem_cgroup_get_reclaim_stat_from_page(struct page *page); 120 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 121 struct task_struct *p); 122 123 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 124 extern int do_swap_account; 125 #endif 126 127 static inline bool mem_cgroup_disabled(void) 128 { 129 if (mem_cgroup_subsys.disabled) 130 return true; 131 return false; 132 } 133 134 void mem_cgroup_update_page_stat(struct page *page, 135 enum mem_cgroup_page_stat_item idx, 136 int val); 137 138 static inline void mem_cgroup_inc_page_stat(struct page *page, 139 enum mem_cgroup_page_stat_item idx) 140 { 141 mem_cgroup_update_page_stat(page, idx, 1); 142 } 143 144 static inline void mem_cgroup_dec_page_stat(struct page *page, 145 enum mem_cgroup_page_stat_item idx) 146 { 147 mem_cgroup_update_page_stat(page, idx, -1); 148 } 149 150 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 151 gfp_t gfp_mask, 152 unsigned long *total_scanned); 153 u64 mem_cgroup_get_limit(struct mem_cgroup *mem); 154 155 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 156 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 157 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); 158 #endif 159 160 #ifdef CONFIG_DEBUG_VM 161 bool mem_cgroup_bad_page_check(struct page *page); 162 void mem_cgroup_print_bad_page(struct page *page); 163 #endif 164 #else /* CONFIG_CGROUP_MEM_RES_CTLR */ 165 struct mem_cgroup; 166 167 static inline int mem_cgroup_newpage_charge(struct page *page, 168 struct mm_struct *mm, gfp_t gfp_mask) 169 { 170 return 0; 171 } 172 173 static inline int mem_cgroup_cache_charge(struct page *page, 174 struct mm_struct *mm, gfp_t gfp_mask) 175 { 176 return 0; 177 } 178 179 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 180 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) 181 { 182 return 0; 183 } 184 185 static inline void mem_cgroup_commit_charge_swapin(struct page *page, 186 struct mem_cgroup *ptr) 187 { 188 } 189 190 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) 191 { 192 } 193 194 static inline void mem_cgroup_uncharge_start(void) 195 { 196 } 197 198 static inline void mem_cgroup_uncharge_end(void) 199 { 200 } 201 202 static inline void mem_cgroup_uncharge_page(struct page *page) 203 { 204 } 205 206 static inline void mem_cgroup_uncharge_cache_page(struct page *page) 207 { 208 } 209 210 static inline int mem_cgroup_shmem_charge_fallback(struct page *page, 211 struct mm_struct *mm, gfp_t gfp_mask) 212 { 213 return 0; 214 } 215 216 static inline void mem_cgroup_add_lru_list(struct page *page, int lru) 217 { 218 } 219 220 static inline void mem_cgroup_del_lru_list(struct page *page, int lru) 221 { 222 return ; 223 } 224 225 static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) 226 { 227 return ; 228 } 229 230 static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) 231 { 232 return ; 233 } 234 235 static inline void mem_cgroup_del_lru(struct page *page) 236 { 237 return ; 238 } 239 240 static inline void 241 mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) 242 { 243 } 244 245 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 246 { 247 return NULL; 248 } 249 250 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 251 { 252 return NULL; 253 } 254 255 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) 256 { 257 return 1; 258 } 259 260 static inline int task_in_mem_cgroup(struct task_struct *task, 261 const struct mem_cgroup *mem) 262 { 263 return 1; 264 } 265 266 static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) 267 { 268 return NULL; 269 } 270 271 static inline int 272 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 273 struct mem_cgroup **ptr, gfp_t gfp_mask) 274 { 275 return 0; 276 } 277 278 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, 279 struct page *oldpage, struct page *newpage, bool migration_ok) 280 { 281 } 282 283 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 284 { 285 return 0; 286 } 287 288 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, 289 int priority) 290 { 291 } 292 293 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, 294 int priority) 295 { 296 } 297 298 static inline bool mem_cgroup_disabled(void) 299 { 300 return true; 301 } 302 303 static inline int 304 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) 305 { 306 return 1; 307 } 308 309 static inline int 310 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) 311 { 312 return 1; 313 } 314 315 static inline unsigned long 316 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, struct zone *zone, 317 enum lru_list lru) 318 { 319 return 0; 320 } 321 322 323 static inline struct zone_reclaim_stat* 324 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) 325 { 326 return NULL; 327 } 328 329 static inline struct zone_reclaim_stat* 330 mem_cgroup_get_reclaim_stat_from_page(struct page *page) 331 { 332 return NULL; 333 } 334 335 static inline void 336 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 337 { 338 } 339 340 static inline void mem_cgroup_inc_page_stat(struct page *page, 341 enum mem_cgroup_page_stat_item idx) 342 { 343 } 344 345 static inline void mem_cgroup_dec_page_stat(struct page *page, 346 enum mem_cgroup_page_stat_item idx) 347 { 348 } 349 350 static inline 351 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 352 gfp_t gfp_mask, 353 unsigned long *total_scanned) 354 { 355 return 0; 356 } 357 358 static inline 359 u64 mem_cgroup_get_limit(struct mem_cgroup *mem) 360 { 361 return 0; 362 } 363 364 static inline void mem_cgroup_split_huge_fixup(struct page *head, 365 struct page *tail) 366 { 367 } 368 369 static inline 370 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 371 { 372 } 373 #endif /* CONFIG_CGROUP_MEM_CONT */ 374 375 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 376 static inline bool 377 mem_cgroup_bad_page_check(struct page *page) 378 { 379 return false; 380 } 381 382 static inline void 383 mem_cgroup_print_bad_page(struct page *page) 384 { 385 } 386 #endif 387 388 #endif /* _LINUX_MEMCONTROL_H */ 389 390