xref: /linux-6.15/include/linux/memcontrol.h (revision f6bcbf2e)
1 /* memcontrol.h - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <[email protected]>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 #include <linux/hardirq.h>
25 #include <linux/jump_label.h>
26 #include <linux/page_counter.h>
27 #include <linux/vmpressure.h>
28 #include <linux/eventfd.h>
29 #include <linux/mm.h>
30 #include <linux/vmstat.h>
31 #include <linux/writeback.h>
32 #include <linux/page-flags.h>
33 
34 struct mem_cgroup;
35 struct page;
36 struct mm_struct;
37 struct kmem_cache;
38 
39 /* Cgroup-specific page state, on top of universal node page state */
40 enum memcg_stat_item {
41 	MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
42 	MEMCG_RSS,
43 	MEMCG_RSS_HUGE,
44 	MEMCG_SWAP,
45 	MEMCG_SOCK,
46 	/* XXX: why are these zone and not node counters? */
47 	MEMCG_KERNEL_STACK_KB,
48 	MEMCG_NR_STAT,
49 };
50 
51 /* Cgroup-specific events, on top of universal VM events */
52 enum memcg_event_item {
53 	MEMCG_LOW = NR_VM_EVENT_ITEMS,
54 	MEMCG_HIGH,
55 	MEMCG_MAX,
56 	MEMCG_OOM,
57 	MEMCG_NR_EVENTS,
58 };
59 
60 struct mem_cgroup_reclaim_cookie {
61 	pg_data_t *pgdat;
62 	int priority;
63 	unsigned int generation;
64 };
65 
66 #ifdef CONFIG_MEMCG
67 
68 #define MEM_CGROUP_ID_SHIFT	16
69 #define MEM_CGROUP_ID_MAX	USHRT_MAX
70 
71 struct mem_cgroup_id {
72 	int id;
73 	atomic_t ref;
74 };
75 
76 /*
77  * Per memcg event counter is incremented at every pagein/pageout. With THP,
78  * it will be incremated by the number of pages. This counter is used for
79  * for trigger some periodic events. This is straightforward and better
80  * than using jiffies etc. to handle periodic memcg event.
81  */
82 enum mem_cgroup_events_target {
83 	MEM_CGROUP_TARGET_THRESH,
84 	MEM_CGROUP_TARGET_SOFTLIMIT,
85 	MEM_CGROUP_TARGET_NUMAINFO,
86 	MEM_CGROUP_NTARGETS,
87 };
88 
89 struct mem_cgroup_stat_cpu {
90 	long count[MEMCG_NR_STAT];
91 	unsigned long events[MEMCG_NR_EVENTS];
92 	unsigned long nr_page_events;
93 	unsigned long targets[MEM_CGROUP_NTARGETS];
94 };
95 
96 struct mem_cgroup_reclaim_iter {
97 	struct mem_cgroup *position;
98 	/* scan generation, increased every round-trip */
99 	unsigned int generation;
100 };
101 
102 struct lruvec_stat {
103 	long count[NR_VM_NODE_STAT_ITEMS];
104 };
105 
106 /*
107  * per-zone information in memory controller.
108  */
109 struct mem_cgroup_per_node {
110 	struct lruvec		lruvec;
111 
112 	struct lruvec_stat __percpu *lruvec_stat_cpu;
113 	atomic_long_t		lruvec_stat[NR_VM_NODE_STAT_ITEMS];
114 
115 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
116 
117 	struct mem_cgroup_reclaim_iter	iter[DEF_PRIORITY + 1];
118 
119 	struct rb_node		tree_node;	/* RB tree node */
120 	unsigned long		usage_in_excess;/* Set to the value by which */
121 						/* the soft limit is exceeded*/
122 	bool			on_tree;
123 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
124 						/* use container_of	   */
125 };
126 
127 struct mem_cgroup_threshold {
128 	struct eventfd_ctx *eventfd;
129 	unsigned long threshold;
130 };
131 
132 /* For threshold */
133 struct mem_cgroup_threshold_ary {
134 	/* An array index points to threshold just below or equal to usage. */
135 	int current_threshold;
136 	/* Size of entries[] */
137 	unsigned int size;
138 	/* Array of thresholds */
139 	struct mem_cgroup_threshold entries[0];
140 };
141 
142 struct mem_cgroup_thresholds {
143 	/* Primary thresholds array */
144 	struct mem_cgroup_threshold_ary *primary;
145 	/*
146 	 * Spare threshold array.
147 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
148 	 * It must be able to store at least primary->size - 1 entries.
149 	 */
150 	struct mem_cgroup_threshold_ary *spare;
151 };
152 
153 enum memcg_kmem_state {
154 	KMEM_NONE,
155 	KMEM_ALLOCATED,
156 	KMEM_ONLINE,
157 };
158 
159 /*
160  * The memory controller data structure. The memory controller controls both
161  * page cache and RSS per cgroup. We would eventually like to provide
162  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
163  * to help the administrator determine what knobs to tune.
164  */
165 struct mem_cgroup {
166 	struct cgroup_subsys_state css;
167 
168 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
169 	struct mem_cgroup_id id;
170 
171 	/* Accounted resources */
172 	struct page_counter memory;
173 	struct page_counter swap;
174 
175 	/* Legacy consumer-oriented counters */
176 	struct page_counter memsw;
177 	struct page_counter kmem;
178 	struct page_counter tcpmem;
179 
180 	/* Normal memory consumption range */
181 	unsigned long low;
182 	unsigned long high;
183 
184 	/* Range enforcement for interrupt charges */
185 	struct work_struct high_work;
186 
187 	unsigned long soft_limit;
188 
189 	/* vmpressure notifications */
190 	struct vmpressure vmpressure;
191 
192 	/*
193 	 * Should the accounting and control be hierarchical, per subtree?
194 	 */
195 	bool use_hierarchy;
196 
197 	/* protected by memcg_oom_lock */
198 	bool		oom_lock;
199 	int		under_oom;
200 
201 	int	swappiness;
202 	/* OOM-Killer disable */
203 	int		oom_kill_disable;
204 
205 	/* handle for "memory.events" */
206 	struct cgroup_file events_file;
207 
208 	/* protect arrays of thresholds */
209 	struct mutex thresholds_lock;
210 
211 	/* thresholds for memory usage. RCU-protected */
212 	struct mem_cgroup_thresholds thresholds;
213 
214 	/* thresholds for mem+swap usage. RCU-protected */
215 	struct mem_cgroup_thresholds memsw_thresholds;
216 
217 	/* For oom notifier event fd */
218 	struct list_head oom_notify;
219 
220 	/*
221 	 * Should we move charges of a task when a task is moved into this
222 	 * mem_cgroup ? And what type of charges should we move ?
223 	 */
224 	unsigned long move_charge_at_immigrate;
225 	/*
226 	 * set > 0 if pages under this cgroup are moving to other cgroup.
227 	 */
228 	atomic_t		moving_account;
229 	/* taken only while moving_account > 0 */
230 	spinlock_t		move_lock;
231 	struct task_struct	*move_lock_task;
232 	unsigned long		move_lock_flags;
233 
234 	struct mem_cgroup_stat_cpu __percpu *stat_cpu;
235 	atomic_long_t		stat[MEMCG_NR_STAT];
236 	atomic_long_t		events[MEMCG_NR_EVENTS];
237 
238 	unsigned long		socket_pressure;
239 
240 	/* Legacy tcp memory accounting */
241 	bool			tcpmem_active;
242 	int			tcpmem_pressure;
243 
244 #ifndef CONFIG_SLOB
245         /* Index in the kmem_cache->memcg_params.memcg_caches array */
246 	int kmemcg_id;
247 	enum memcg_kmem_state kmem_state;
248 	struct list_head kmem_caches;
249 #endif
250 
251 	int last_scanned_node;
252 #if MAX_NUMNODES > 1
253 	nodemask_t	scan_nodes;
254 	atomic_t	numainfo_events;
255 	atomic_t	numainfo_updating;
256 #endif
257 
258 #ifdef CONFIG_CGROUP_WRITEBACK
259 	struct list_head cgwb_list;
260 	struct wb_domain cgwb_domain;
261 #endif
262 
263 	/* List of events which userspace want to receive */
264 	struct list_head event_list;
265 	spinlock_t event_list_lock;
266 
267 	struct mem_cgroup_per_node *nodeinfo[0];
268 	/* WARNING: nodeinfo must be the last member here */
269 };
270 
271 /*
272  * size of first charge trial. "32" comes from vmscan.c's magic value.
273  * TODO: maybe necessary to use big numbers in big irons.
274  */
275 #define MEMCG_CHARGE_BATCH 32U
276 
277 extern struct mem_cgroup *root_mem_cgroup;
278 
279 static inline bool mem_cgroup_disabled(void)
280 {
281 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
282 }
283 
284 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
285 
286 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
287 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
288 			  bool compound);
289 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
290 			      bool lrucare, bool compound);
291 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
292 		bool compound);
293 void mem_cgroup_uncharge(struct page *page);
294 void mem_cgroup_uncharge_list(struct list_head *page_list);
295 
296 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
297 
298 static struct mem_cgroup_per_node *
299 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
300 {
301 	return memcg->nodeinfo[nid];
302 }
303 
304 /**
305  * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
306  * @node: node of the wanted lruvec
307  * @memcg: memcg of the wanted lruvec
308  *
309  * Returns the lru list vector holding pages for a given @node or a given
310  * @memcg and @zone. This can be the node lruvec, if the memory controller
311  * is disabled.
312  */
313 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
314 				struct mem_cgroup *memcg)
315 {
316 	struct mem_cgroup_per_node *mz;
317 	struct lruvec *lruvec;
318 
319 	if (mem_cgroup_disabled()) {
320 		lruvec = node_lruvec(pgdat);
321 		goto out;
322 	}
323 
324 	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
325 	lruvec = &mz->lruvec;
326 out:
327 	/*
328 	 * Since a node can be onlined after the mem_cgroup was created,
329 	 * we have to be prepared to initialize lruvec->pgdat here;
330 	 * and if offlined then reonlined, we need to reinitialize it.
331 	 */
332 	if (unlikely(lruvec->pgdat != pgdat))
333 		lruvec->pgdat = pgdat;
334 	return lruvec;
335 }
336 
337 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
338 
339 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
340 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
341 
342 static inline
343 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
344 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
345 }
346 
347 #define mem_cgroup_from_counter(counter, member)	\
348 	container_of(counter, struct mem_cgroup, member)
349 
350 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
351 				   struct mem_cgroup *,
352 				   struct mem_cgroup_reclaim_cookie *);
353 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
354 int mem_cgroup_scan_tasks(struct mem_cgroup *,
355 			  int (*)(struct task_struct *, void *), void *);
356 
357 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
358 {
359 	if (mem_cgroup_disabled())
360 		return 0;
361 
362 	return memcg->id.id;
363 }
364 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
365 
366 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
367 {
368 	struct mem_cgroup_per_node *mz;
369 
370 	if (mem_cgroup_disabled())
371 		return NULL;
372 
373 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
374 	return mz->memcg;
375 }
376 
377 /**
378  * parent_mem_cgroup - find the accounting parent of a memcg
379  * @memcg: memcg whose parent to find
380  *
381  * Returns the parent memcg, or NULL if this is the root or the memory
382  * controller is in legacy no-hierarchy mode.
383  */
384 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
385 {
386 	if (!memcg->memory.parent)
387 		return NULL;
388 	return mem_cgroup_from_counter(memcg->memory.parent, memory);
389 }
390 
391 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
392 			      struct mem_cgroup *root)
393 {
394 	if (root == memcg)
395 		return true;
396 	if (!root->use_hierarchy)
397 		return false;
398 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
399 }
400 
401 static inline bool mm_match_cgroup(struct mm_struct *mm,
402 				   struct mem_cgroup *memcg)
403 {
404 	struct mem_cgroup *task_memcg;
405 	bool match = false;
406 
407 	rcu_read_lock();
408 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
409 	if (task_memcg)
410 		match = mem_cgroup_is_descendant(task_memcg, memcg);
411 	rcu_read_unlock();
412 	return match;
413 }
414 
415 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
416 ino_t page_cgroup_ino(struct page *page);
417 
418 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
419 {
420 	if (mem_cgroup_disabled())
421 		return true;
422 	return !!(memcg->css.flags & CSS_ONLINE);
423 }
424 
425 /*
426  * For memory reclaim.
427  */
428 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
429 
430 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
431 		int zid, int nr_pages);
432 
433 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
434 					   int nid, unsigned int lru_mask);
435 
436 static inline
437 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
438 {
439 	struct mem_cgroup_per_node *mz;
440 	unsigned long nr_pages = 0;
441 	int zid;
442 
443 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
444 	for (zid = 0; zid < MAX_NR_ZONES; zid++)
445 		nr_pages += mz->lru_zone_size[zid][lru];
446 	return nr_pages;
447 }
448 
449 static inline
450 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
451 		enum lru_list lru, int zone_idx)
452 {
453 	struct mem_cgroup_per_node *mz;
454 
455 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
456 	return mz->lru_zone_size[zone_idx][lru];
457 }
458 
459 void mem_cgroup_handle_over_high(void);
460 
461 unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
462 
463 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
464 				struct task_struct *p);
465 
466 static inline void mem_cgroup_oom_enable(void)
467 {
468 	WARN_ON(current->memcg_may_oom);
469 	current->memcg_may_oom = 1;
470 }
471 
472 static inline void mem_cgroup_oom_disable(void)
473 {
474 	WARN_ON(!current->memcg_may_oom);
475 	current->memcg_may_oom = 0;
476 }
477 
478 static inline bool task_in_memcg_oom(struct task_struct *p)
479 {
480 	return p->memcg_in_oom;
481 }
482 
483 bool mem_cgroup_oom_synchronize(bool wait);
484 
485 #ifdef CONFIG_MEMCG_SWAP
486 extern int do_swap_account;
487 #endif
488 
489 struct mem_cgroup *lock_page_memcg(struct page *page);
490 void __unlock_page_memcg(struct mem_cgroup *memcg);
491 void unlock_page_memcg(struct page *page);
492 
493 /* idx can be of type enum memcg_stat_item or node_stat_item */
494 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
495 					     int idx)
496 {
497 	long x = atomic_long_read(&memcg->stat[idx]);
498 #ifdef CONFIG_SMP
499 	if (x < 0)
500 		x = 0;
501 #endif
502 	return x;
503 }
504 
505 /* idx can be of type enum memcg_stat_item or node_stat_item */
506 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
507 				     int idx, int val)
508 {
509 	long x;
510 
511 	if (mem_cgroup_disabled())
512 		return;
513 
514 	x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
515 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
516 		atomic_long_add(x, &memcg->stat[idx]);
517 		x = 0;
518 	}
519 	__this_cpu_write(memcg->stat_cpu->count[idx], x);
520 }
521 
522 /* idx can be of type enum memcg_stat_item or node_stat_item */
523 static inline void mod_memcg_state(struct mem_cgroup *memcg,
524 				   int idx, int val)
525 {
526 	unsigned long flags;
527 
528 	local_irq_save(flags);
529 	__mod_memcg_state(memcg, idx, val);
530 	local_irq_restore(flags);
531 }
532 
533 /**
534  * mod_memcg_page_state - update page state statistics
535  * @page: the page
536  * @idx: page state item to account
537  * @val: number of pages (positive or negative)
538  *
539  * The @page must be locked or the caller must use lock_page_memcg()
540  * to prevent double accounting when the page is concurrently being
541  * moved to another memcg:
542  *
543  *   lock_page(page) or lock_page_memcg(page)
544  *   if (TestClearPageState(page))
545  *     mod_memcg_page_state(page, state, -1);
546  *   unlock_page(page) or unlock_page_memcg(page)
547  *
548  * Kernel pages are an exception to this, since they'll never move.
549  */
550 static inline void __mod_memcg_page_state(struct page *page,
551 					  int idx, int val)
552 {
553 	if (page->mem_cgroup)
554 		__mod_memcg_state(page->mem_cgroup, idx, val);
555 }
556 
557 static inline void mod_memcg_page_state(struct page *page,
558 					int idx, int val)
559 {
560 	if (page->mem_cgroup)
561 		mod_memcg_state(page->mem_cgroup, idx, val);
562 }
563 
564 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
565 					      enum node_stat_item idx)
566 {
567 	struct mem_cgroup_per_node *pn;
568 	long x;
569 
570 	if (mem_cgroup_disabled())
571 		return node_page_state(lruvec_pgdat(lruvec), idx);
572 
573 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
574 	x = atomic_long_read(&pn->lruvec_stat[idx]);
575 #ifdef CONFIG_SMP
576 	if (x < 0)
577 		x = 0;
578 #endif
579 	return x;
580 }
581 
582 static inline void __mod_lruvec_state(struct lruvec *lruvec,
583 				      enum node_stat_item idx, int val)
584 {
585 	struct mem_cgroup_per_node *pn;
586 	long x;
587 
588 	/* Update node */
589 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
590 
591 	if (mem_cgroup_disabled())
592 		return;
593 
594 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
595 
596 	/* Update memcg */
597 	__mod_memcg_state(pn->memcg, idx, val);
598 
599 	/* Update lruvec */
600 	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
601 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
602 		atomic_long_add(x, &pn->lruvec_stat[idx]);
603 		x = 0;
604 	}
605 	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
606 }
607 
608 static inline void mod_lruvec_state(struct lruvec *lruvec,
609 				    enum node_stat_item idx, int val)
610 {
611 	unsigned long flags;
612 
613 	local_irq_save(flags);
614 	__mod_lruvec_state(lruvec, idx, val);
615 	local_irq_restore(flags);
616 }
617 
618 static inline void __mod_lruvec_page_state(struct page *page,
619 					   enum node_stat_item idx, int val)
620 {
621 	pg_data_t *pgdat = page_pgdat(page);
622 	struct lruvec *lruvec;
623 
624 	/* Untracked pages have no memcg, no lruvec. Update only the node */
625 	if (!page->mem_cgroup) {
626 		__mod_node_page_state(pgdat, idx, val);
627 		return;
628 	}
629 
630 	lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
631 	__mod_lruvec_state(lruvec, idx, val);
632 }
633 
634 static inline void mod_lruvec_page_state(struct page *page,
635 					 enum node_stat_item idx, int val)
636 {
637 	unsigned long flags;
638 
639 	local_irq_save(flags);
640 	__mod_lruvec_page_state(page, idx, val);
641 	local_irq_restore(flags);
642 }
643 
644 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
645 						gfp_t gfp_mask,
646 						unsigned long *total_scanned);
647 
648 /* idx can be of type enum memcg_event_item or vm_event_item */
649 static inline void __count_memcg_events(struct mem_cgroup *memcg,
650 					int idx, unsigned long count)
651 {
652 	unsigned long x;
653 
654 	if (mem_cgroup_disabled())
655 		return;
656 
657 	x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
658 	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
659 		atomic_long_add(x, &memcg->events[idx]);
660 		x = 0;
661 	}
662 	__this_cpu_write(memcg->stat_cpu->events[idx], x);
663 }
664 
665 static inline void count_memcg_events(struct mem_cgroup *memcg,
666 				      int idx, unsigned long count)
667 {
668 	unsigned long flags;
669 
670 	local_irq_save(flags);
671 	__count_memcg_events(memcg, idx, count);
672 	local_irq_restore(flags);
673 }
674 
675 /* idx can be of type enum memcg_event_item or vm_event_item */
676 static inline void count_memcg_page_event(struct page *page,
677 					  int idx)
678 {
679 	if (page->mem_cgroup)
680 		count_memcg_events(page->mem_cgroup, idx, 1);
681 }
682 
683 static inline void count_memcg_event_mm(struct mm_struct *mm,
684 					enum vm_event_item idx)
685 {
686 	struct mem_cgroup *memcg;
687 
688 	if (mem_cgroup_disabled())
689 		return;
690 
691 	rcu_read_lock();
692 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
693 	if (likely(memcg)) {
694 		count_memcg_events(memcg, idx, 1);
695 		if (idx == OOM_KILL)
696 			cgroup_file_notify(&memcg->events_file);
697 	}
698 	rcu_read_unlock();
699 }
700 
701 static inline void mem_cgroup_event(struct mem_cgroup *memcg,
702 				    enum memcg_event_item event)
703 {
704 	count_memcg_events(memcg, event, 1);
705 	cgroup_file_notify(&memcg->events_file);
706 }
707 
708 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
709 void mem_cgroup_split_huge_fixup(struct page *head);
710 #endif
711 
712 #else /* CONFIG_MEMCG */
713 
714 #define MEM_CGROUP_ID_SHIFT	0
715 #define MEM_CGROUP_ID_MAX	0
716 
717 struct mem_cgroup;
718 
719 static inline bool mem_cgroup_disabled(void)
720 {
721 	return true;
722 }
723 
724 static inline void mem_cgroup_event(struct mem_cgroup *memcg,
725 				    enum memcg_event_item event)
726 {
727 }
728 
729 static inline bool mem_cgroup_low(struct mem_cgroup *root,
730 				  struct mem_cgroup *memcg)
731 {
732 	return false;
733 }
734 
735 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
736 					gfp_t gfp_mask,
737 					struct mem_cgroup **memcgp,
738 					bool compound)
739 {
740 	*memcgp = NULL;
741 	return 0;
742 }
743 
744 static inline void mem_cgroup_commit_charge(struct page *page,
745 					    struct mem_cgroup *memcg,
746 					    bool lrucare, bool compound)
747 {
748 }
749 
750 static inline void mem_cgroup_cancel_charge(struct page *page,
751 					    struct mem_cgroup *memcg,
752 					    bool compound)
753 {
754 }
755 
756 static inline void mem_cgroup_uncharge(struct page *page)
757 {
758 }
759 
760 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
761 {
762 }
763 
764 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
765 {
766 }
767 
768 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
769 				struct mem_cgroup *memcg)
770 {
771 	return node_lruvec(pgdat);
772 }
773 
774 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
775 						    struct pglist_data *pgdat)
776 {
777 	return &pgdat->lruvec;
778 }
779 
780 static inline bool mm_match_cgroup(struct mm_struct *mm,
781 		struct mem_cgroup *memcg)
782 {
783 	return true;
784 }
785 
786 static inline bool task_in_mem_cgroup(struct task_struct *task,
787 				      const struct mem_cgroup *memcg)
788 {
789 	return true;
790 }
791 
792 static inline struct mem_cgroup *
793 mem_cgroup_iter(struct mem_cgroup *root,
794 		struct mem_cgroup *prev,
795 		struct mem_cgroup_reclaim_cookie *reclaim)
796 {
797 	return NULL;
798 }
799 
800 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
801 					 struct mem_cgroup *prev)
802 {
803 }
804 
805 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
806 		int (*fn)(struct task_struct *, void *), void *arg)
807 {
808 	return 0;
809 }
810 
811 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
812 {
813 	return 0;
814 }
815 
816 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
817 {
818 	WARN_ON_ONCE(id);
819 	/* XXX: This should always return root_mem_cgroup */
820 	return NULL;
821 }
822 
823 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
824 {
825 	return NULL;
826 }
827 
828 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
829 {
830 	return true;
831 }
832 
833 static inline unsigned long
834 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
835 {
836 	return 0;
837 }
838 static inline
839 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
840 		enum lru_list lru, int zone_idx)
841 {
842 	return 0;
843 }
844 
845 static inline unsigned long
846 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
847 			     int nid, unsigned int lru_mask)
848 {
849 	return 0;
850 }
851 
852 static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
853 {
854 	return 0;
855 }
856 
857 static inline void
858 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
859 {
860 }
861 
862 static inline struct mem_cgroup *lock_page_memcg(struct page *page)
863 {
864 	return NULL;
865 }
866 
867 static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
868 {
869 }
870 
871 static inline void unlock_page_memcg(struct page *page)
872 {
873 }
874 
875 static inline void mem_cgroup_handle_over_high(void)
876 {
877 }
878 
879 static inline void mem_cgroup_oom_enable(void)
880 {
881 }
882 
883 static inline void mem_cgroup_oom_disable(void)
884 {
885 }
886 
887 static inline bool task_in_memcg_oom(struct task_struct *p)
888 {
889 	return false;
890 }
891 
892 static inline bool mem_cgroup_oom_synchronize(bool wait)
893 {
894 	return false;
895 }
896 
897 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
898 					     int idx)
899 {
900 	return 0;
901 }
902 
903 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
904 				     int idx,
905 				     int nr)
906 {
907 }
908 
909 static inline void mod_memcg_state(struct mem_cgroup *memcg,
910 				   int idx,
911 				   int nr)
912 {
913 }
914 
915 static inline void __mod_memcg_page_state(struct page *page,
916 					  int idx,
917 					  int nr)
918 {
919 }
920 
921 static inline void mod_memcg_page_state(struct page *page,
922 					int idx,
923 					int nr)
924 {
925 }
926 
927 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
928 					      enum node_stat_item idx)
929 {
930 	return node_page_state(lruvec_pgdat(lruvec), idx);
931 }
932 
933 static inline void __mod_lruvec_state(struct lruvec *lruvec,
934 				      enum node_stat_item idx, int val)
935 {
936 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
937 }
938 
939 static inline void mod_lruvec_state(struct lruvec *lruvec,
940 				    enum node_stat_item idx, int val)
941 {
942 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
943 }
944 
945 static inline void __mod_lruvec_page_state(struct page *page,
946 					   enum node_stat_item idx, int val)
947 {
948 	__mod_node_page_state(page_pgdat(page), idx, val);
949 }
950 
951 static inline void mod_lruvec_page_state(struct page *page,
952 					 enum node_stat_item idx, int val)
953 {
954 	mod_node_page_state(page_pgdat(page), idx, val);
955 }
956 
957 static inline
958 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
959 					    gfp_t gfp_mask,
960 					    unsigned long *total_scanned)
961 {
962 	return 0;
963 }
964 
965 static inline void mem_cgroup_split_huge_fixup(struct page *head)
966 {
967 }
968 
969 static inline void count_memcg_events(struct mem_cgroup *memcg,
970 				      enum vm_event_item idx,
971 				      unsigned long count)
972 {
973 }
974 
975 static inline void count_memcg_page_event(struct page *page,
976 					  int idx)
977 {
978 }
979 
980 static inline
981 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
982 {
983 }
984 #endif /* CONFIG_MEMCG */
985 
986 /* idx can be of type enum memcg_stat_item or node_stat_item */
987 static inline void __inc_memcg_state(struct mem_cgroup *memcg,
988 				     int idx)
989 {
990 	__mod_memcg_state(memcg, idx, 1);
991 }
992 
993 /* idx can be of type enum memcg_stat_item or node_stat_item */
994 static inline void __dec_memcg_state(struct mem_cgroup *memcg,
995 				     int idx)
996 {
997 	__mod_memcg_state(memcg, idx, -1);
998 }
999 
1000 /* idx can be of type enum memcg_stat_item or node_stat_item */
1001 static inline void __inc_memcg_page_state(struct page *page,
1002 					  int idx)
1003 {
1004 	__mod_memcg_page_state(page, idx, 1);
1005 }
1006 
1007 /* idx can be of type enum memcg_stat_item or node_stat_item */
1008 static inline void __dec_memcg_page_state(struct page *page,
1009 					  int idx)
1010 {
1011 	__mod_memcg_page_state(page, idx, -1);
1012 }
1013 
1014 static inline void __inc_lruvec_state(struct lruvec *lruvec,
1015 				      enum node_stat_item idx)
1016 {
1017 	__mod_lruvec_state(lruvec, idx, 1);
1018 }
1019 
1020 static inline void __dec_lruvec_state(struct lruvec *lruvec,
1021 				      enum node_stat_item idx)
1022 {
1023 	__mod_lruvec_state(lruvec, idx, -1);
1024 }
1025 
1026 static inline void __inc_lruvec_page_state(struct page *page,
1027 					   enum node_stat_item idx)
1028 {
1029 	__mod_lruvec_page_state(page, idx, 1);
1030 }
1031 
1032 static inline void __dec_lruvec_page_state(struct page *page,
1033 					   enum node_stat_item idx)
1034 {
1035 	__mod_lruvec_page_state(page, idx, -1);
1036 }
1037 
1038 /* idx can be of type enum memcg_stat_item or node_stat_item */
1039 static inline void inc_memcg_state(struct mem_cgroup *memcg,
1040 				   int idx)
1041 {
1042 	mod_memcg_state(memcg, idx, 1);
1043 }
1044 
1045 /* idx can be of type enum memcg_stat_item or node_stat_item */
1046 static inline void dec_memcg_state(struct mem_cgroup *memcg,
1047 				   int idx)
1048 {
1049 	mod_memcg_state(memcg, idx, -1);
1050 }
1051 
1052 /* idx can be of type enum memcg_stat_item or node_stat_item */
1053 static inline void inc_memcg_page_state(struct page *page,
1054 					int idx)
1055 {
1056 	mod_memcg_page_state(page, idx, 1);
1057 }
1058 
1059 /* idx can be of type enum memcg_stat_item or node_stat_item */
1060 static inline void dec_memcg_page_state(struct page *page,
1061 					int idx)
1062 {
1063 	mod_memcg_page_state(page, idx, -1);
1064 }
1065 
1066 static inline void inc_lruvec_state(struct lruvec *lruvec,
1067 				    enum node_stat_item idx)
1068 {
1069 	mod_lruvec_state(lruvec, idx, 1);
1070 }
1071 
1072 static inline void dec_lruvec_state(struct lruvec *lruvec,
1073 				    enum node_stat_item idx)
1074 {
1075 	mod_lruvec_state(lruvec, idx, -1);
1076 }
1077 
1078 static inline void inc_lruvec_page_state(struct page *page,
1079 					 enum node_stat_item idx)
1080 {
1081 	mod_lruvec_page_state(page, idx, 1);
1082 }
1083 
1084 static inline void dec_lruvec_page_state(struct page *page,
1085 					 enum node_stat_item idx)
1086 {
1087 	mod_lruvec_page_state(page, idx, -1);
1088 }
1089 
1090 #ifdef CONFIG_CGROUP_WRITEBACK
1091 
1092 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
1093 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1094 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1095 			 unsigned long *pheadroom, unsigned long *pdirty,
1096 			 unsigned long *pwriteback);
1097 
1098 #else	/* CONFIG_CGROUP_WRITEBACK */
1099 
1100 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1101 {
1102 	return NULL;
1103 }
1104 
1105 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1106 				       unsigned long *pfilepages,
1107 				       unsigned long *pheadroom,
1108 				       unsigned long *pdirty,
1109 				       unsigned long *pwriteback)
1110 {
1111 }
1112 
1113 #endif	/* CONFIG_CGROUP_WRITEBACK */
1114 
1115 struct sock;
1116 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1117 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1118 #ifdef CONFIG_MEMCG
1119 extern struct static_key_false memcg_sockets_enabled_key;
1120 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1121 void mem_cgroup_sk_alloc(struct sock *sk);
1122 void mem_cgroup_sk_free(struct sock *sk);
1123 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1124 {
1125 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1126 		return true;
1127 	do {
1128 		if (time_before(jiffies, memcg->socket_pressure))
1129 			return true;
1130 	} while ((memcg = parent_mem_cgroup(memcg)));
1131 	return false;
1132 }
1133 #else
1134 #define mem_cgroup_sockets_enabled 0
1135 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1136 static inline void mem_cgroup_sk_free(struct sock *sk) { };
1137 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1138 {
1139 	return false;
1140 }
1141 #endif
1142 
1143 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1144 void memcg_kmem_put_cache(struct kmem_cache *cachep);
1145 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1146 			    struct mem_cgroup *memcg);
1147 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1148 void memcg_kmem_uncharge(struct page *page, int order);
1149 
1150 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
1151 extern struct static_key_false memcg_kmem_enabled_key;
1152 extern struct workqueue_struct *memcg_kmem_cache_wq;
1153 
1154 extern int memcg_nr_cache_ids;
1155 void memcg_get_cache_ids(void);
1156 void memcg_put_cache_ids(void);
1157 
1158 /*
1159  * Helper macro to loop through all memcg-specific caches. Callers must still
1160  * check if the cache is valid (it is either valid or NULL).
1161  * the slab_mutex must be held when looping through those caches
1162  */
1163 #define for_each_memcg_cache_index(_idx)	\
1164 	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1165 
1166 static inline bool memcg_kmem_enabled(void)
1167 {
1168 	return static_branch_unlikely(&memcg_kmem_enabled_key);
1169 }
1170 
1171 /*
1172  * helper for accessing a memcg's index. It will be used as an index in the
1173  * child cache array in kmem_cache, and also to derive its name. This function
1174  * will return -1 when this is not a kmem-limited memcg.
1175  */
1176 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1177 {
1178 	return memcg ? memcg->kmemcg_id : -1;
1179 }
1180 
1181 #else
1182 #define for_each_memcg_cache_index(_idx)	\
1183 	for (; NULL; )
1184 
1185 static inline bool memcg_kmem_enabled(void)
1186 {
1187 	return false;
1188 }
1189 
1190 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1191 {
1192 	return -1;
1193 }
1194 
1195 static inline void memcg_get_cache_ids(void)
1196 {
1197 }
1198 
1199 static inline void memcg_put_cache_ids(void)
1200 {
1201 }
1202 
1203 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
1204 
1205 #endif /* _LINUX_MEMCONTROL_H */
1206