1 /* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <[email protected]> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <[email protected]> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20 #ifndef _LINUX_MEMCONTROL_H 21 #define _LINUX_MEMCONTROL_H 22 #include <linux/cgroup.h> 23 #include <linux/vm_event_item.h> 24 25 struct mem_cgroup; 26 struct page_cgroup; 27 struct page; 28 struct mm_struct; 29 30 /* Stats that can be updated by kernel. */ 31 enum mem_cgroup_page_stat_item { 32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 33 }; 34 35 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 36 struct list_head *dst, 37 unsigned long *scanned, int order, 38 isolate_mode_t mode, 39 struct zone *z, 40 struct mem_cgroup *mem_cont, 41 int active, int file); 42 43 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 44 /* 45 * All "charge" functions with gfp_mask should use GFP_KERNEL or 46 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 47 * alloc memory but reclaims memory from all available zones. So, "where I want 48 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 49 * available but adding a rule is better. charge functions' gfp_mask should 50 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 51 * codes. 52 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 53 */ 54 55 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 56 gfp_t gfp_mask); 57 /* for swap handling */ 58 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 59 struct page *page, gfp_t mask, struct mem_cgroup **ptr); 60 extern void mem_cgroup_commit_charge_swapin(struct page *page, 61 struct mem_cgroup *ptr); 62 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); 63 64 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 65 gfp_t gfp_mask); 66 extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); 67 extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); 68 extern void mem_cgroup_rotate_reclaimable_page(struct page *page); 69 extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); 70 extern void mem_cgroup_del_lru(struct page *page); 71 extern void mem_cgroup_move_lists(struct page *page, 72 enum lru_list from, enum lru_list to); 73 74 /* For coalescing uncharge for reducing memcg' overhead*/ 75 extern void mem_cgroup_uncharge_start(void); 76 extern void mem_cgroup_uncharge_end(void); 77 78 extern void mem_cgroup_uncharge_page(struct page *page); 79 extern void mem_cgroup_uncharge_cache_page(struct page *page); 80 81 extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask); 82 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); 83 84 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 85 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 86 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 87 88 static inline 89 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 90 { 91 struct mem_cgroup *memcg; 92 rcu_read_lock(); 93 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner)); 94 rcu_read_unlock(); 95 return cgroup == memcg; 96 } 97 98 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 99 100 extern int 101 mem_cgroup_prepare_migration(struct page *page, 102 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); 103 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 104 struct page *oldpage, struct page *newpage, bool migration_ok); 105 106 /* 107 * For memory reclaim. 108 */ 109 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, 110 struct zone *zone); 111 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, 112 struct zone *zone); 113 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 114 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, 115 int nid, int zid, unsigned int lrumask); 116 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 117 struct zone *zone); 118 struct zone_reclaim_stat* 119 mem_cgroup_get_reclaim_stat_from_page(struct page *page); 120 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 121 struct task_struct *p); 122 123 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 124 extern int do_swap_account; 125 #endif 126 127 static inline bool mem_cgroup_disabled(void) 128 { 129 if (mem_cgroup_subsys.disabled) 130 return true; 131 return false; 132 } 133 134 void mem_cgroup_update_page_stat(struct page *page, 135 enum mem_cgroup_page_stat_item idx, 136 int val); 137 138 static inline void mem_cgroup_inc_page_stat(struct page *page, 139 enum mem_cgroup_page_stat_item idx) 140 { 141 mem_cgroup_update_page_stat(page, idx, 1); 142 } 143 144 static inline void mem_cgroup_dec_page_stat(struct page *page, 145 enum mem_cgroup_page_stat_item idx) 146 { 147 mem_cgroup_update_page_stat(page, idx, -1); 148 } 149 150 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 151 gfp_t gfp_mask, 152 unsigned long *total_scanned); 153 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg); 154 155 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 156 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 157 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); 158 #endif 159 160 #ifdef CONFIG_DEBUG_VM 161 bool mem_cgroup_bad_page_check(struct page *page); 162 void mem_cgroup_print_bad_page(struct page *page); 163 #endif 164 #else /* CONFIG_CGROUP_MEM_RES_CTLR */ 165 struct mem_cgroup; 166 167 static inline int mem_cgroup_newpage_charge(struct page *page, 168 struct mm_struct *mm, gfp_t gfp_mask) 169 { 170 return 0; 171 } 172 173 static inline int mem_cgroup_cache_charge(struct page *page, 174 struct mm_struct *mm, gfp_t gfp_mask) 175 { 176 return 0; 177 } 178 179 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 180 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) 181 { 182 return 0; 183 } 184 185 static inline void mem_cgroup_commit_charge_swapin(struct page *page, 186 struct mem_cgroup *ptr) 187 { 188 } 189 190 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) 191 { 192 } 193 194 static inline void mem_cgroup_uncharge_start(void) 195 { 196 } 197 198 static inline void mem_cgroup_uncharge_end(void) 199 { 200 } 201 202 static inline void mem_cgroup_uncharge_page(struct page *page) 203 { 204 } 205 206 static inline void mem_cgroup_uncharge_cache_page(struct page *page) 207 { 208 } 209 210 static inline void mem_cgroup_add_lru_list(struct page *page, int lru) 211 { 212 } 213 214 static inline void mem_cgroup_del_lru_list(struct page *page, int lru) 215 { 216 return ; 217 } 218 219 static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) 220 { 221 return ; 222 } 223 224 static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) 225 { 226 return ; 227 } 228 229 static inline void mem_cgroup_del_lru(struct page *page) 230 { 231 return ; 232 } 233 234 static inline void 235 mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) 236 { 237 } 238 239 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 240 { 241 return NULL; 242 } 243 244 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 245 { 246 return NULL; 247 } 248 249 static inline int mm_match_cgroup(struct mm_struct *mm, 250 struct mem_cgroup *memcg) 251 { 252 return 1; 253 } 254 255 static inline int task_in_mem_cgroup(struct task_struct *task, 256 const struct mem_cgroup *memcg) 257 { 258 return 1; 259 } 260 261 static inline struct cgroup_subsys_state 262 *mem_cgroup_css(struct mem_cgroup *memcg) 263 { 264 return NULL; 265 } 266 267 static inline int 268 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 269 struct mem_cgroup **ptr, gfp_t gfp_mask) 270 { 271 return 0; 272 } 273 274 static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, 275 struct page *oldpage, struct page *newpage, bool migration_ok) 276 { 277 } 278 279 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *memcg) 280 { 281 return 0; 282 } 283 284 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *memcg, 285 int priority) 286 { 287 } 288 289 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *memcg, 290 int priority) 291 { 292 } 293 294 static inline bool mem_cgroup_disabled(void) 295 { 296 return true; 297 } 298 299 static inline int 300 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) 301 { 302 return 1; 303 } 304 305 static inline int 306 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone) 307 { 308 return 1; 309 } 310 311 static inline unsigned long 312 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 313 unsigned int lru_mask) 314 { 315 return 0; 316 } 317 318 319 static inline struct zone_reclaim_stat* 320 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) 321 { 322 return NULL; 323 } 324 325 static inline struct zone_reclaim_stat* 326 mem_cgroup_get_reclaim_stat_from_page(struct page *page) 327 { 328 return NULL; 329 } 330 331 static inline void 332 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 333 { 334 } 335 336 static inline void mem_cgroup_inc_page_stat(struct page *page, 337 enum mem_cgroup_page_stat_item idx) 338 { 339 } 340 341 static inline void mem_cgroup_dec_page_stat(struct page *page, 342 enum mem_cgroup_page_stat_item idx) 343 { 344 } 345 346 static inline 347 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 348 gfp_t gfp_mask, 349 unsigned long *total_scanned) 350 { 351 return 0; 352 } 353 354 static inline 355 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 356 { 357 return 0; 358 } 359 360 static inline void mem_cgroup_split_huge_fixup(struct page *head, 361 struct page *tail) 362 { 363 } 364 365 static inline 366 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 367 { 368 } 369 #endif /* CONFIG_CGROUP_MEM_CONT */ 370 371 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 372 static inline bool 373 mem_cgroup_bad_page_check(struct page *page) 374 { 375 return false; 376 } 377 378 static inline void 379 mem_cgroup_print_bad_page(struct page *page) 380 { 381 } 382 #endif 383 384 #endif /* _LINUX_MEMCONTROL_H */ 385 386