1 /* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <[email protected]> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <[email protected]> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20 #ifndef _LINUX_MEMCONTROL_H 21 #define _LINUX_MEMCONTROL_H 22 #include <linux/cgroup.h> 23 #include <linux/vm_event_item.h> 24 25 struct mem_cgroup; 26 struct page_cgroup; 27 struct page; 28 struct mm_struct; 29 30 /* Stats that can be updated by kernel. */ 31 enum mem_cgroup_page_stat_item { 32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 33 }; 34 35 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 36 struct list_head *dst, 37 unsigned long *scanned, int order, 38 int mode, struct zone *z, 39 struct mem_cgroup *mem_cont, 40 int active, int file); 41 42 struct memcg_scanrecord { 43 struct mem_cgroup *mem; /* scanend memory cgroup */ 44 struct mem_cgroup *root; /* scan target hierarchy root */ 45 int context; /* scanning context (see memcontrol.c) */ 46 unsigned long nr_scanned[2]; /* the number of scanned pages */ 47 unsigned long nr_rotated[2]; /* the number of rotated pages */ 48 unsigned long nr_freed[2]; /* the number of freed pages */ 49 unsigned long elapsed; /* nsec of time elapsed while scanning */ 50 }; 51 52 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 53 /* 54 * All "charge" functions with gfp_mask should use GFP_KERNEL or 55 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 56 * alloc memory but reclaims memory from all available zones. So, "where I want 57 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 58 * available but adding a rule is better. charge functions' gfp_mask should 59 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 60 * codes. 61 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 62 */ 63 64 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 65 gfp_t gfp_mask); 66 /* for swap handling */ 67 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 68 struct page *page, gfp_t mask, struct mem_cgroup **ptr); 69 extern void mem_cgroup_commit_charge_swapin(struct page *page, 70 struct mem_cgroup *ptr); 71 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); 72 73 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 74 gfp_t gfp_mask); 75 extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); 76 extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); 77 extern void mem_cgroup_rotate_reclaimable_page(struct page *page); 78 extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); 79 extern void mem_cgroup_del_lru(struct page *page); 80 extern void mem_cgroup_move_lists(struct page *page, 81 enum lru_list from, enum lru_list to); 82 83 /* For coalescing uncharge for reducing memcg' overhead*/ 84 extern void mem_cgroup_uncharge_start(void); 85 extern void mem_cgroup_uncharge_end(void); 86 87 extern void mem_cgroup_uncharge_page(struct page *page); 88 extern void mem_cgroup_uncharge_cache_page(struct page *page); 89 90 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); 91 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); 92 93 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 94 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 95 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 96 97 static inline 98 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 99 { 100 struct mem_cgroup *mem; 101 rcu_read_lock(); 102 mem = mem_cgroup_from_task(rcu_dereference((mm)->owner)); 103 rcu_read_unlock(); 104 return cgroup == mem; 105 } 106 107 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); 108 109 extern int 110 mem_cgroup_prepare_migration(struct page *page, 111 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); 112 extern void mem_cgroup_end_migration(struct mem_cgroup *mem, 113 struct page *oldpage, struct page *newpage, bool migration_ok); 114 115 /* 116 * For memory reclaim. 117 */ 118 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); 119 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); 120 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 121 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, 122 int nid, int zid, unsigned int lrumask); 123 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 124 struct zone *zone); 125 struct zone_reclaim_stat* 126 mem_cgroup_get_reclaim_stat_from_page(struct page *page); 127 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 128 struct task_struct *p); 129 130 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 131 gfp_t gfp_mask, bool noswap, 132 struct memcg_scanrecord *rec); 133 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 134 gfp_t gfp_mask, bool noswap, 135 struct zone *zone, 136 struct memcg_scanrecord *rec, 137 unsigned long *nr_scanned); 138 139 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 140 extern int do_swap_account; 141 #endif 142 143 static inline bool mem_cgroup_disabled(void) 144 { 145 if (mem_cgroup_subsys.disabled) 146 return true; 147 return false; 148 } 149 150 void mem_cgroup_update_page_stat(struct page *page, 151 enum mem_cgroup_page_stat_item idx, 152 int val); 153 154 static inline void mem_cgroup_inc_page_stat(struct page *page, 155 enum mem_cgroup_page_stat_item idx) 156 { 157 mem_cgroup_update_page_stat(page, idx, 1); 158 } 159 160 static inline void mem_cgroup_dec_page_stat(struct page *page, 161 enum mem_cgroup_page_stat_item idx) 162 { 163 mem_cgroup_update_page_stat(page, idx, -1); 164 } 165 166 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 167 gfp_t gfp_mask, 168 unsigned long *total_scanned); 169 u64 mem_cgroup_get_limit(struct mem_cgroup *mem); 170 171 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 172 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 173 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); 174 #endif 175 176 #ifdef CONFIG_DEBUG_VM 177 bool mem_cgroup_bad_page_check(struct page *page); 178 void mem_cgroup_print_bad_page(struct page *page); 179 #endif 180 #else /* CONFIG_CGROUP_MEM_RES_CTLR */ 181 struct mem_cgroup; 182 183 static inline int mem_cgroup_newpage_charge(struct page *page, 184 struct mm_struct *mm, gfp_t gfp_mask) 185 { 186 return 0; 187 } 188 189 static inline int mem_cgroup_cache_charge(struct page *page, 190 struct mm_struct *mm, gfp_t gfp_mask) 191 { 192 return 0; 193 } 194 195 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 196 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) 197 { 198 return 0; 199 } 200 201 static inline void mem_cgroup_commit_charge_swapin(struct page *page, 202 struct mem_cgroup *ptr) 203 { 204 } 205 206 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) 207 { 208 } 209 210 static inline void mem_cgroup_uncharge_start(void) 211 { 212 } 213 214 static inline void mem_cgroup_uncharge_end(void) 215 { 216 } 217 218 static inline void mem_cgroup_uncharge_page(struct page *page) 219 { 220 } 221 222 static inline void mem_cgroup_uncharge_cache_page(struct page *page) 223 { 224 } 225 226 static inline void mem_cgroup_add_lru_list(struct page *page, int lru) 227 { 228 } 229 230 static inline void mem_cgroup_del_lru_list(struct page *page, int lru) 231 { 232 return ; 233 } 234 235 static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) 236 { 237 return ; 238 } 239 240 static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) 241 { 242 return ; 243 } 244 245 static inline void mem_cgroup_del_lru(struct page *page) 246 { 247 return ; 248 } 249 250 static inline void 251 mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) 252 { 253 } 254 255 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 256 { 257 return NULL; 258 } 259 260 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 261 { 262 return NULL; 263 } 264 265 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) 266 { 267 return 1; 268 } 269 270 static inline int task_in_mem_cgroup(struct task_struct *task, 271 const struct mem_cgroup *mem) 272 { 273 return 1; 274 } 275 276 static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) 277 { 278 return NULL; 279 } 280 281 static inline int 282 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 283 struct mem_cgroup **ptr, gfp_t gfp_mask) 284 { 285 return 0; 286 } 287 288 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, 289 struct page *oldpage, struct page *newpage, bool migration_ok) 290 { 291 } 292 293 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 294 { 295 return 0; 296 } 297 298 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, 299 int priority) 300 { 301 } 302 303 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, 304 int priority) 305 { 306 } 307 308 static inline bool mem_cgroup_disabled(void) 309 { 310 return true; 311 } 312 313 static inline int 314 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) 315 { 316 return 1; 317 } 318 319 static inline int 320 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) 321 { 322 return 1; 323 } 324 325 static inline unsigned long 326 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 327 unsigned int lru_mask) 328 { 329 return 0; 330 } 331 332 333 static inline struct zone_reclaim_stat* 334 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) 335 { 336 return NULL; 337 } 338 339 static inline struct zone_reclaim_stat* 340 mem_cgroup_get_reclaim_stat_from_page(struct page *page) 341 { 342 return NULL; 343 } 344 345 static inline void 346 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 347 { 348 } 349 350 static inline void mem_cgroup_inc_page_stat(struct page *page, 351 enum mem_cgroup_page_stat_item idx) 352 { 353 } 354 355 static inline void mem_cgroup_dec_page_stat(struct page *page, 356 enum mem_cgroup_page_stat_item idx) 357 { 358 } 359 360 static inline 361 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 362 gfp_t gfp_mask, 363 unsigned long *total_scanned) 364 { 365 return 0; 366 } 367 368 static inline 369 u64 mem_cgroup_get_limit(struct mem_cgroup *mem) 370 { 371 return 0; 372 } 373 374 static inline void mem_cgroup_split_huge_fixup(struct page *head, 375 struct page *tail) 376 { 377 } 378 379 static inline 380 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 381 { 382 } 383 #endif /* CONFIG_CGROUP_MEM_CONT */ 384 385 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 386 static inline bool 387 mem_cgroup_bad_page_check(struct page *page) 388 { 389 return false; 390 } 391 392 static inline void 393 mem_cgroup_print_bad_page(struct page *page) 394 { 395 } 396 #endif 397 398 #endif /* _LINUX_MEMCONTROL_H */ 399 400