xref: /linux-6.15/include/linux/memcontrol.h (revision bee19cd8)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* memcontrol.h - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <[email protected]>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <[email protected]>
9  */
10 
11 #ifndef _LINUX_MEMCONTROL_H
12 #define _LINUX_MEMCONTROL_H
13 #include <linux/cgroup.h>
14 #include <linux/vm_event_item.h>
15 #include <linux/hardirq.h>
16 #include <linux/jump_label.h>
17 #include <linux/page_counter.h>
18 #include <linux/vmpressure.h>
19 #include <linux/eventfd.h>
20 #include <linux/mm.h>
21 #include <linux/vmstat.h>
22 #include <linux/writeback.h>
23 #include <linux/page-flags.h>
24 
25 struct mem_cgroup;
26 struct page;
27 struct mm_struct;
28 struct kmem_cache;
29 
30 /* Cgroup-specific page state, on top of universal node page state */
31 enum memcg_stat_item {
32 	MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
33 	MEMCG_RSS,
34 	MEMCG_RSS_HUGE,
35 	MEMCG_SWAP,
36 	MEMCG_SOCK,
37 	/* XXX: why are these zone and not node counters? */
38 	MEMCG_KERNEL_STACK_KB,
39 	MEMCG_NR_STAT,
40 };
41 
42 enum memcg_memory_event {
43 	MEMCG_LOW,
44 	MEMCG_HIGH,
45 	MEMCG_MAX,
46 	MEMCG_OOM,
47 	MEMCG_OOM_KILL,
48 	MEMCG_SWAP_MAX,
49 	MEMCG_SWAP_FAIL,
50 	MEMCG_NR_MEMORY_EVENTS,
51 };
52 
53 enum mem_cgroup_protection {
54 	MEMCG_PROT_NONE,
55 	MEMCG_PROT_LOW,
56 	MEMCG_PROT_MIN,
57 };
58 
59 struct mem_cgroup_reclaim_cookie {
60 	pg_data_t *pgdat;
61 	int priority;
62 	unsigned int generation;
63 };
64 
65 #ifdef CONFIG_MEMCG
66 
67 #define MEM_CGROUP_ID_SHIFT	16
68 #define MEM_CGROUP_ID_MAX	USHRT_MAX
69 
70 struct mem_cgroup_id {
71 	int id;
72 	refcount_t ref;
73 };
74 
75 /*
76  * Per memcg event counter is incremented at every pagein/pageout. With THP,
77  * it will be incremated by the number of pages. This counter is used for
78  * for trigger some periodic events. This is straightforward and better
79  * than using jiffies etc. to handle periodic memcg event.
80  */
81 enum mem_cgroup_events_target {
82 	MEM_CGROUP_TARGET_THRESH,
83 	MEM_CGROUP_TARGET_SOFTLIMIT,
84 	MEM_CGROUP_TARGET_NUMAINFO,
85 	MEM_CGROUP_NTARGETS,
86 };
87 
88 struct memcg_vmstats_percpu {
89 	long stat[MEMCG_NR_STAT];
90 	unsigned long events[NR_VM_EVENT_ITEMS];
91 	unsigned long nr_page_events;
92 	unsigned long targets[MEM_CGROUP_NTARGETS];
93 };
94 
95 struct mem_cgroup_reclaim_iter {
96 	struct mem_cgroup *position;
97 	/* scan generation, increased every round-trip */
98 	unsigned int generation;
99 };
100 
101 struct lruvec_stat {
102 	long count[NR_VM_NODE_STAT_ITEMS];
103 };
104 
105 /*
106  * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
107  * which have elements charged to this memcg.
108  */
109 struct memcg_shrinker_map {
110 	struct rcu_head rcu;
111 	unsigned long map[0];
112 };
113 
114 /*
115  * per-zone information in memory controller.
116  */
117 struct mem_cgroup_per_node {
118 	struct lruvec		lruvec;
119 
120 	/* Legacy local VM stats */
121 	struct lruvec_stat __percpu *lruvec_stat_local;
122 
123 	/* Subtree VM stats (batched updates) */
124 	struct lruvec_stat __percpu *lruvec_stat_cpu;
125 	atomic_long_t		lruvec_stat[NR_VM_NODE_STAT_ITEMS];
126 
127 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
128 
129 	struct mem_cgroup_reclaim_iter	iter[DEF_PRIORITY + 1];
130 
131 #ifdef CONFIG_MEMCG_KMEM
132 	struct memcg_shrinker_map __rcu	*shrinker_map;
133 #endif
134 	struct rb_node		tree_node;	/* RB tree node */
135 	unsigned long		usage_in_excess;/* Set to the value by which */
136 						/* the soft limit is exceeded*/
137 	bool			on_tree;
138 	bool			congested;	/* memcg has many dirty pages */
139 						/* backed by a congested BDI */
140 
141 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
142 						/* use container_of	   */
143 };
144 
145 struct mem_cgroup_threshold {
146 	struct eventfd_ctx *eventfd;
147 	unsigned long threshold;
148 };
149 
150 /* For threshold */
151 struct mem_cgroup_threshold_ary {
152 	/* An array index points to threshold just below or equal to usage. */
153 	int current_threshold;
154 	/* Size of entries[] */
155 	unsigned int size;
156 	/* Array of thresholds */
157 	struct mem_cgroup_threshold entries[0];
158 };
159 
160 struct mem_cgroup_thresholds {
161 	/* Primary thresholds array */
162 	struct mem_cgroup_threshold_ary *primary;
163 	/*
164 	 * Spare threshold array.
165 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
166 	 * It must be able to store at least primary->size - 1 entries.
167 	 */
168 	struct mem_cgroup_threshold_ary *spare;
169 };
170 
171 enum memcg_kmem_state {
172 	KMEM_NONE,
173 	KMEM_ALLOCATED,
174 	KMEM_ONLINE,
175 };
176 
177 #if defined(CONFIG_SMP)
178 struct memcg_padding {
179 	char x[0];
180 } ____cacheline_internodealigned_in_smp;
181 #define MEMCG_PADDING(name)      struct memcg_padding name;
182 #else
183 #define MEMCG_PADDING(name)
184 #endif
185 
186 /*
187  * The memory controller data structure. The memory controller controls both
188  * page cache and RSS per cgroup. We would eventually like to provide
189  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
190  * to help the administrator determine what knobs to tune.
191  */
192 struct mem_cgroup {
193 	struct cgroup_subsys_state css;
194 
195 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
196 	struct mem_cgroup_id id;
197 
198 	/* Accounted resources */
199 	struct page_counter memory;
200 	struct page_counter swap;
201 
202 	/* Legacy consumer-oriented counters */
203 	struct page_counter memsw;
204 	struct page_counter kmem;
205 	struct page_counter tcpmem;
206 
207 	/* Upper bound of normal memory consumption range */
208 	unsigned long high;
209 
210 	/* Range enforcement for interrupt charges */
211 	struct work_struct high_work;
212 
213 	unsigned long soft_limit;
214 
215 	/* vmpressure notifications */
216 	struct vmpressure vmpressure;
217 
218 	/*
219 	 * Should the accounting and control be hierarchical, per subtree?
220 	 */
221 	bool use_hierarchy;
222 
223 	/*
224 	 * Should the OOM killer kill all belonging tasks, had it kill one?
225 	 */
226 	bool oom_group;
227 
228 	/* protected by memcg_oom_lock */
229 	bool		oom_lock;
230 	int		under_oom;
231 
232 	int	swappiness;
233 	/* OOM-Killer disable */
234 	int		oom_kill_disable;
235 
236 	/* memory.events */
237 	struct cgroup_file events_file;
238 
239 	/* handle for "memory.swap.events" */
240 	struct cgroup_file swap_events_file;
241 
242 	/* protect arrays of thresholds */
243 	struct mutex thresholds_lock;
244 
245 	/* thresholds for memory usage. RCU-protected */
246 	struct mem_cgroup_thresholds thresholds;
247 
248 	/* thresholds for mem+swap usage. RCU-protected */
249 	struct mem_cgroup_thresholds memsw_thresholds;
250 
251 	/* For oom notifier event fd */
252 	struct list_head oom_notify;
253 
254 	/*
255 	 * Should we move charges of a task when a task is moved into this
256 	 * mem_cgroup ? And what type of charges should we move ?
257 	 */
258 	unsigned long move_charge_at_immigrate;
259 	/* taken only while moving_account > 0 */
260 	spinlock_t		move_lock;
261 	unsigned long		move_lock_flags;
262 
263 	MEMCG_PADDING(_pad1_);
264 
265 	/*
266 	 * set > 0 if pages under this cgroup are moving to other cgroup.
267 	 */
268 	atomic_t		moving_account;
269 	struct task_struct	*move_lock_task;
270 
271 	/* Legacy local VM stats and events */
272 	struct memcg_vmstats_percpu __percpu *vmstats_local;
273 
274 	/* Subtree VM stats and events (batched updates) */
275 	struct memcg_vmstats_percpu __percpu *vmstats_percpu;
276 
277 	MEMCG_PADDING(_pad2_);
278 
279 	atomic_long_t		vmstats[MEMCG_NR_STAT];
280 	atomic_long_t		vmevents[NR_VM_EVENT_ITEMS];
281 
282 	/* memory.events */
283 	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
284 
285 	unsigned long		socket_pressure;
286 
287 	/* Legacy tcp memory accounting */
288 	bool			tcpmem_active;
289 	int			tcpmem_pressure;
290 
291 #ifdef CONFIG_MEMCG_KMEM
292         /* Index in the kmem_cache->memcg_params.memcg_caches array */
293 	int kmemcg_id;
294 	enum memcg_kmem_state kmem_state;
295 	struct list_head kmem_caches;
296 #endif
297 
298 	int last_scanned_node;
299 #if MAX_NUMNODES > 1
300 	nodemask_t	scan_nodes;
301 	atomic_t	numainfo_events;
302 	atomic_t	numainfo_updating;
303 #endif
304 
305 #ifdef CONFIG_CGROUP_WRITEBACK
306 	struct list_head cgwb_list;
307 	struct wb_domain cgwb_domain;
308 #endif
309 
310 	/* List of events which userspace want to receive */
311 	struct list_head event_list;
312 	spinlock_t event_list_lock;
313 
314 	struct mem_cgroup_per_node *nodeinfo[0];
315 	/* WARNING: nodeinfo must be the last member here */
316 };
317 
318 /*
319  * size of first charge trial. "32" comes from vmscan.c's magic value.
320  * TODO: maybe necessary to use big numbers in big irons.
321  */
322 #define MEMCG_CHARGE_BATCH 32U
323 
324 extern struct mem_cgroup *root_mem_cgroup;
325 
326 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
327 {
328 	return (memcg == root_mem_cgroup);
329 }
330 
331 static inline bool mem_cgroup_disabled(void)
332 {
333 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
334 }
335 
336 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
337 						struct mem_cgroup *memcg);
338 
339 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
340 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
341 			  bool compound);
342 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
343 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
344 			  bool compound);
345 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
346 			      bool lrucare, bool compound);
347 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
348 		bool compound);
349 void mem_cgroup_uncharge(struct page *page);
350 void mem_cgroup_uncharge_list(struct list_head *page_list);
351 
352 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
353 
354 static struct mem_cgroup_per_node *
355 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
356 {
357 	return memcg->nodeinfo[nid];
358 }
359 
360 /**
361  * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
362  * @node: node of the wanted lruvec
363  * @memcg: memcg of the wanted lruvec
364  *
365  * Returns the lru list vector holding pages for a given @node or a given
366  * @memcg and @zone. This can be the node lruvec, if the memory controller
367  * is disabled.
368  */
369 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
370 				struct mem_cgroup *memcg)
371 {
372 	struct mem_cgroup_per_node *mz;
373 	struct lruvec *lruvec;
374 
375 	if (mem_cgroup_disabled()) {
376 		lruvec = node_lruvec(pgdat);
377 		goto out;
378 	}
379 
380 	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
381 	lruvec = &mz->lruvec;
382 out:
383 	/*
384 	 * Since a node can be onlined after the mem_cgroup was created,
385 	 * we have to be prepared to initialize lruvec->pgdat here;
386 	 * and if offlined then reonlined, we need to reinitialize it.
387 	 */
388 	if (unlikely(lruvec->pgdat != pgdat))
389 		lruvec->pgdat = pgdat;
390 	return lruvec;
391 }
392 
393 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
394 
395 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
396 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
397 
398 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
399 
400 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
401 
402 static inline
403 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
404 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
405 }
406 
407 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
408 {
409 	if (memcg)
410 		css_put(&memcg->css);
411 }
412 
413 #define mem_cgroup_from_counter(counter, member)	\
414 	container_of(counter, struct mem_cgroup, member)
415 
416 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
417 				   struct mem_cgroup *,
418 				   struct mem_cgroup_reclaim_cookie *);
419 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
420 int mem_cgroup_scan_tasks(struct mem_cgroup *,
421 			  int (*)(struct task_struct *, void *), void *);
422 
423 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
424 {
425 	if (mem_cgroup_disabled())
426 		return 0;
427 
428 	return memcg->id.id;
429 }
430 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
431 
432 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
433 {
434 	return mem_cgroup_from_css(seq_css(m));
435 }
436 
437 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
438 {
439 	struct mem_cgroup_per_node *mz;
440 
441 	if (mem_cgroup_disabled())
442 		return NULL;
443 
444 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
445 	return mz->memcg;
446 }
447 
448 /**
449  * parent_mem_cgroup - find the accounting parent of a memcg
450  * @memcg: memcg whose parent to find
451  *
452  * Returns the parent memcg, or NULL if this is the root or the memory
453  * controller is in legacy no-hierarchy mode.
454  */
455 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
456 {
457 	if (!memcg->memory.parent)
458 		return NULL;
459 	return mem_cgroup_from_counter(memcg->memory.parent, memory);
460 }
461 
462 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
463 			      struct mem_cgroup *root)
464 {
465 	if (root == memcg)
466 		return true;
467 	if (!root->use_hierarchy)
468 		return false;
469 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
470 }
471 
472 static inline bool mm_match_cgroup(struct mm_struct *mm,
473 				   struct mem_cgroup *memcg)
474 {
475 	struct mem_cgroup *task_memcg;
476 	bool match = false;
477 
478 	rcu_read_lock();
479 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
480 	if (task_memcg)
481 		match = mem_cgroup_is_descendant(task_memcg, memcg);
482 	rcu_read_unlock();
483 	return match;
484 }
485 
486 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
487 ino_t page_cgroup_ino(struct page *page);
488 
489 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
490 {
491 	if (mem_cgroup_disabled())
492 		return true;
493 	return !!(memcg->css.flags & CSS_ONLINE);
494 }
495 
496 /*
497  * For memory reclaim.
498  */
499 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
500 
501 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
502 		int zid, int nr_pages);
503 
504 static inline
505 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
506 		enum lru_list lru, int zone_idx)
507 {
508 	struct mem_cgroup_per_node *mz;
509 
510 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
511 	return mz->lru_zone_size[zone_idx][lru];
512 }
513 
514 void mem_cgroup_handle_over_high(void);
515 
516 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
517 
518 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
519 				struct task_struct *p);
520 
521 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
522 
523 static inline void mem_cgroup_enter_user_fault(void)
524 {
525 	WARN_ON(current->in_user_fault);
526 	current->in_user_fault = 1;
527 }
528 
529 static inline void mem_cgroup_exit_user_fault(void)
530 {
531 	WARN_ON(!current->in_user_fault);
532 	current->in_user_fault = 0;
533 }
534 
535 static inline bool task_in_memcg_oom(struct task_struct *p)
536 {
537 	return p->memcg_in_oom;
538 }
539 
540 bool mem_cgroup_oom_synchronize(bool wait);
541 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
542 					    struct mem_cgroup *oom_domain);
543 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
544 
545 #ifdef CONFIG_MEMCG_SWAP
546 extern int do_swap_account;
547 #endif
548 
549 struct mem_cgroup *lock_page_memcg(struct page *page);
550 void __unlock_page_memcg(struct mem_cgroup *memcg);
551 void unlock_page_memcg(struct page *page);
552 
553 /*
554  * idx can be of type enum memcg_stat_item or node_stat_item.
555  * Keep in sync with memcg_exact_page_state().
556  */
557 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
558 {
559 	long x = atomic_long_read(&memcg->vmstats[idx]);
560 #ifdef CONFIG_SMP
561 	if (x < 0)
562 		x = 0;
563 #endif
564 	return x;
565 }
566 
567 /*
568  * idx can be of type enum memcg_stat_item or node_stat_item.
569  * Keep in sync with memcg_exact_page_state().
570  */
571 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
572 						   int idx)
573 {
574 	long x = 0;
575 	int cpu;
576 
577 	for_each_possible_cpu(cpu)
578 		x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
579 #ifdef CONFIG_SMP
580 	if (x < 0)
581 		x = 0;
582 #endif
583 	return x;
584 }
585 
586 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
587 
588 /* idx can be of type enum memcg_stat_item or node_stat_item */
589 static inline void mod_memcg_state(struct mem_cgroup *memcg,
590 				   int idx, int val)
591 {
592 	unsigned long flags;
593 
594 	local_irq_save(flags);
595 	__mod_memcg_state(memcg, idx, val);
596 	local_irq_restore(flags);
597 }
598 
599 /**
600  * mod_memcg_page_state - update page state statistics
601  * @page: the page
602  * @idx: page state item to account
603  * @val: number of pages (positive or negative)
604  *
605  * The @page must be locked or the caller must use lock_page_memcg()
606  * to prevent double accounting when the page is concurrently being
607  * moved to another memcg:
608  *
609  *   lock_page(page) or lock_page_memcg(page)
610  *   if (TestClearPageState(page))
611  *     mod_memcg_page_state(page, state, -1);
612  *   unlock_page(page) or unlock_page_memcg(page)
613  *
614  * Kernel pages are an exception to this, since they'll never move.
615  */
616 static inline void __mod_memcg_page_state(struct page *page,
617 					  int idx, int val)
618 {
619 	if (page->mem_cgroup)
620 		__mod_memcg_state(page->mem_cgroup, idx, val);
621 }
622 
623 static inline void mod_memcg_page_state(struct page *page,
624 					int idx, int val)
625 {
626 	if (page->mem_cgroup)
627 		mod_memcg_state(page->mem_cgroup, idx, val);
628 }
629 
630 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
631 					      enum node_stat_item idx)
632 {
633 	struct mem_cgroup_per_node *pn;
634 	long x;
635 
636 	if (mem_cgroup_disabled())
637 		return node_page_state(lruvec_pgdat(lruvec), idx);
638 
639 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
640 	x = atomic_long_read(&pn->lruvec_stat[idx]);
641 #ifdef CONFIG_SMP
642 	if (x < 0)
643 		x = 0;
644 #endif
645 	return x;
646 }
647 
648 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
649 						    enum node_stat_item idx)
650 {
651 	struct mem_cgroup_per_node *pn;
652 	long x = 0;
653 	int cpu;
654 
655 	if (mem_cgroup_disabled())
656 		return node_page_state(lruvec_pgdat(lruvec), idx);
657 
658 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
659 	for_each_possible_cpu(cpu)
660 		x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
661 #ifdef CONFIG_SMP
662 	if (x < 0)
663 		x = 0;
664 #endif
665 	return x;
666 }
667 
668 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
669 			int val);
670 
671 static inline void mod_lruvec_state(struct lruvec *lruvec,
672 				    enum node_stat_item idx, int val)
673 {
674 	unsigned long flags;
675 
676 	local_irq_save(flags);
677 	__mod_lruvec_state(lruvec, idx, val);
678 	local_irq_restore(flags);
679 }
680 
681 static inline void __mod_lruvec_page_state(struct page *page,
682 					   enum node_stat_item idx, int val)
683 {
684 	pg_data_t *pgdat = page_pgdat(page);
685 	struct lruvec *lruvec;
686 
687 	/* Untracked pages have no memcg, no lruvec. Update only the node */
688 	if (!page->mem_cgroup) {
689 		__mod_node_page_state(pgdat, idx, val);
690 		return;
691 	}
692 
693 	lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
694 	__mod_lruvec_state(lruvec, idx, val);
695 }
696 
697 static inline void mod_lruvec_page_state(struct page *page,
698 					 enum node_stat_item idx, int val)
699 {
700 	unsigned long flags;
701 
702 	local_irq_save(flags);
703 	__mod_lruvec_page_state(page, idx, val);
704 	local_irq_restore(flags);
705 }
706 
707 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
708 						gfp_t gfp_mask,
709 						unsigned long *total_scanned);
710 
711 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
712 			  unsigned long count);
713 
714 static inline void count_memcg_events(struct mem_cgroup *memcg,
715 				      enum vm_event_item idx,
716 				      unsigned long count)
717 {
718 	unsigned long flags;
719 
720 	local_irq_save(flags);
721 	__count_memcg_events(memcg, idx, count);
722 	local_irq_restore(flags);
723 }
724 
725 static inline void count_memcg_page_event(struct page *page,
726 					  enum vm_event_item idx)
727 {
728 	if (page->mem_cgroup)
729 		count_memcg_events(page->mem_cgroup, idx, 1);
730 }
731 
732 static inline void count_memcg_event_mm(struct mm_struct *mm,
733 					enum vm_event_item idx)
734 {
735 	struct mem_cgroup *memcg;
736 
737 	if (mem_cgroup_disabled())
738 		return;
739 
740 	rcu_read_lock();
741 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
742 	if (likely(memcg))
743 		count_memcg_events(memcg, idx, 1);
744 	rcu_read_unlock();
745 }
746 
747 static inline void memcg_memory_event(struct mem_cgroup *memcg,
748 				      enum memcg_memory_event event)
749 {
750 	do {
751 		atomic_long_inc(&memcg->memory_events[event]);
752 		cgroup_file_notify(&memcg->events_file);
753 
754 		if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
755 			break;
756 	} while ((memcg = parent_mem_cgroup(memcg)) &&
757 		 !mem_cgroup_is_root(memcg));
758 }
759 
760 static inline void memcg_memory_event_mm(struct mm_struct *mm,
761 					 enum memcg_memory_event event)
762 {
763 	struct mem_cgroup *memcg;
764 
765 	if (mem_cgroup_disabled())
766 		return;
767 
768 	rcu_read_lock();
769 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
770 	if (likely(memcg))
771 		memcg_memory_event(memcg, event);
772 	rcu_read_unlock();
773 }
774 
775 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
776 void mem_cgroup_split_huge_fixup(struct page *head);
777 #endif
778 
779 #else /* CONFIG_MEMCG */
780 
781 #define MEM_CGROUP_ID_SHIFT	0
782 #define MEM_CGROUP_ID_MAX	0
783 
784 struct mem_cgroup;
785 
786 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
787 {
788 	return true;
789 }
790 
791 static inline bool mem_cgroup_disabled(void)
792 {
793 	return true;
794 }
795 
796 static inline void memcg_memory_event(struct mem_cgroup *memcg,
797 				      enum memcg_memory_event event)
798 {
799 }
800 
801 static inline void memcg_memory_event_mm(struct mm_struct *mm,
802 					 enum memcg_memory_event event)
803 {
804 }
805 
806 static inline enum mem_cgroup_protection mem_cgroup_protected(
807 	struct mem_cgroup *root, struct mem_cgroup *memcg)
808 {
809 	return MEMCG_PROT_NONE;
810 }
811 
812 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
813 					gfp_t gfp_mask,
814 					struct mem_cgroup **memcgp,
815 					bool compound)
816 {
817 	*memcgp = NULL;
818 	return 0;
819 }
820 
821 static inline int mem_cgroup_try_charge_delay(struct page *page,
822 					      struct mm_struct *mm,
823 					      gfp_t gfp_mask,
824 					      struct mem_cgroup **memcgp,
825 					      bool compound)
826 {
827 	*memcgp = NULL;
828 	return 0;
829 }
830 
831 static inline void mem_cgroup_commit_charge(struct page *page,
832 					    struct mem_cgroup *memcg,
833 					    bool lrucare, bool compound)
834 {
835 }
836 
837 static inline void mem_cgroup_cancel_charge(struct page *page,
838 					    struct mem_cgroup *memcg,
839 					    bool compound)
840 {
841 }
842 
843 static inline void mem_cgroup_uncharge(struct page *page)
844 {
845 }
846 
847 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
848 {
849 }
850 
851 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
852 {
853 }
854 
855 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
856 				struct mem_cgroup *memcg)
857 {
858 	return node_lruvec(pgdat);
859 }
860 
861 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
862 						    struct pglist_data *pgdat)
863 {
864 	return &pgdat->lruvec;
865 }
866 
867 static inline bool mm_match_cgroup(struct mm_struct *mm,
868 		struct mem_cgroup *memcg)
869 {
870 	return true;
871 }
872 
873 static inline bool task_in_mem_cgroup(struct task_struct *task,
874 				      const struct mem_cgroup *memcg)
875 {
876 	return true;
877 }
878 
879 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
880 {
881 	return NULL;
882 }
883 
884 static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
885 {
886 	return NULL;
887 }
888 
889 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
890 {
891 }
892 
893 static inline struct mem_cgroup *
894 mem_cgroup_iter(struct mem_cgroup *root,
895 		struct mem_cgroup *prev,
896 		struct mem_cgroup_reclaim_cookie *reclaim)
897 {
898 	return NULL;
899 }
900 
901 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
902 					 struct mem_cgroup *prev)
903 {
904 }
905 
906 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
907 		int (*fn)(struct task_struct *, void *), void *arg)
908 {
909 	return 0;
910 }
911 
912 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
913 {
914 	return 0;
915 }
916 
917 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
918 {
919 	WARN_ON_ONCE(id);
920 	/* XXX: This should always return root_mem_cgroup */
921 	return NULL;
922 }
923 
924 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
925 {
926 	return NULL;
927 }
928 
929 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
930 {
931 	return NULL;
932 }
933 
934 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
935 {
936 	return true;
937 }
938 
939 static inline
940 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
941 		enum lru_list lru, int zone_idx)
942 {
943 	return 0;
944 }
945 
946 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
947 {
948 	return 0;
949 }
950 
951 static inline void
952 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
953 {
954 }
955 
956 static inline void
957 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
958 {
959 }
960 
961 static inline struct mem_cgroup *lock_page_memcg(struct page *page)
962 {
963 	return NULL;
964 }
965 
966 static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
967 {
968 }
969 
970 static inline void unlock_page_memcg(struct page *page)
971 {
972 }
973 
974 static inline void mem_cgroup_handle_over_high(void)
975 {
976 }
977 
978 static inline void mem_cgroup_enter_user_fault(void)
979 {
980 }
981 
982 static inline void mem_cgroup_exit_user_fault(void)
983 {
984 }
985 
986 static inline bool task_in_memcg_oom(struct task_struct *p)
987 {
988 	return false;
989 }
990 
991 static inline bool mem_cgroup_oom_synchronize(bool wait)
992 {
993 	return false;
994 }
995 
996 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
997 	struct task_struct *victim, struct mem_cgroup *oom_domain)
998 {
999 	return NULL;
1000 }
1001 
1002 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1003 {
1004 }
1005 
1006 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1007 {
1008 	return 0;
1009 }
1010 
1011 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1012 						   int idx)
1013 {
1014 	return 0;
1015 }
1016 
1017 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1018 				     int idx,
1019 				     int nr)
1020 {
1021 }
1022 
1023 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1024 				   int idx,
1025 				   int nr)
1026 {
1027 }
1028 
1029 static inline void __mod_memcg_page_state(struct page *page,
1030 					  int idx,
1031 					  int nr)
1032 {
1033 }
1034 
1035 static inline void mod_memcg_page_state(struct page *page,
1036 					int idx,
1037 					int nr)
1038 {
1039 }
1040 
1041 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1042 					      enum node_stat_item idx)
1043 {
1044 	return node_page_state(lruvec_pgdat(lruvec), idx);
1045 }
1046 
1047 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1048 						    enum node_stat_item idx)
1049 {
1050 	return node_page_state(lruvec_pgdat(lruvec), idx);
1051 }
1052 
1053 static inline void __mod_lruvec_state(struct lruvec *lruvec,
1054 				      enum node_stat_item idx, int val)
1055 {
1056 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1057 }
1058 
1059 static inline void mod_lruvec_state(struct lruvec *lruvec,
1060 				    enum node_stat_item idx, int val)
1061 {
1062 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1063 }
1064 
1065 static inline void __mod_lruvec_page_state(struct page *page,
1066 					   enum node_stat_item idx, int val)
1067 {
1068 	__mod_node_page_state(page_pgdat(page), idx, val);
1069 }
1070 
1071 static inline void mod_lruvec_page_state(struct page *page,
1072 					 enum node_stat_item idx, int val)
1073 {
1074 	mod_node_page_state(page_pgdat(page), idx, val);
1075 }
1076 
1077 static inline
1078 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1079 					    gfp_t gfp_mask,
1080 					    unsigned long *total_scanned)
1081 {
1082 	return 0;
1083 }
1084 
1085 static inline void mem_cgroup_split_huge_fixup(struct page *head)
1086 {
1087 }
1088 
1089 static inline void count_memcg_events(struct mem_cgroup *memcg,
1090 				      enum vm_event_item idx,
1091 				      unsigned long count)
1092 {
1093 }
1094 
1095 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1096 					enum vm_event_item idx,
1097 					unsigned long count)
1098 {
1099 }
1100 
1101 static inline void count_memcg_page_event(struct page *page,
1102 					  int idx)
1103 {
1104 }
1105 
1106 static inline
1107 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1108 {
1109 }
1110 #endif /* CONFIG_MEMCG */
1111 
1112 /* idx can be of type enum memcg_stat_item or node_stat_item */
1113 static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1114 				     int idx)
1115 {
1116 	__mod_memcg_state(memcg, idx, 1);
1117 }
1118 
1119 /* idx can be of type enum memcg_stat_item or node_stat_item */
1120 static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1121 				     int idx)
1122 {
1123 	__mod_memcg_state(memcg, idx, -1);
1124 }
1125 
1126 /* idx can be of type enum memcg_stat_item or node_stat_item */
1127 static inline void __inc_memcg_page_state(struct page *page,
1128 					  int idx)
1129 {
1130 	__mod_memcg_page_state(page, idx, 1);
1131 }
1132 
1133 /* idx can be of type enum memcg_stat_item or node_stat_item */
1134 static inline void __dec_memcg_page_state(struct page *page,
1135 					  int idx)
1136 {
1137 	__mod_memcg_page_state(page, idx, -1);
1138 }
1139 
1140 static inline void __inc_lruvec_state(struct lruvec *lruvec,
1141 				      enum node_stat_item idx)
1142 {
1143 	__mod_lruvec_state(lruvec, idx, 1);
1144 }
1145 
1146 static inline void __dec_lruvec_state(struct lruvec *lruvec,
1147 				      enum node_stat_item idx)
1148 {
1149 	__mod_lruvec_state(lruvec, idx, -1);
1150 }
1151 
1152 static inline void __inc_lruvec_page_state(struct page *page,
1153 					   enum node_stat_item idx)
1154 {
1155 	__mod_lruvec_page_state(page, idx, 1);
1156 }
1157 
1158 static inline void __dec_lruvec_page_state(struct page *page,
1159 					   enum node_stat_item idx)
1160 {
1161 	__mod_lruvec_page_state(page, idx, -1);
1162 }
1163 
1164 /* idx can be of type enum memcg_stat_item or node_stat_item */
1165 static inline void inc_memcg_state(struct mem_cgroup *memcg,
1166 				   int idx)
1167 {
1168 	mod_memcg_state(memcg, idx, 1);
1169 }
1170 
1171 /* idx can be of type enum memcg_stat_item or node_stat_item */
1172 static inline void dec_memcg_state(struct mem_cgroup *memcg,
1173 				   int idx)
1174 {
1175 	mod_memcg_state(memcg, idx, -1);
1176 }
1177 
1178 /* idx can be of type enum memcg_stat_item or node_stat_item */
1179 static inline void inc_memcg_page_state(struct page *page,
1180 					int idx)
1181 {
1182 	mod_memcg_page_state(page, idx, 1);
1183 }
1184 
1185 /* idx can be of type enum memcg_stat_item or node_stat_item */
1186 static inline void dec_memcg_page_state(struct page *page,
1187 					int idx)
1188 {
1189 	mod_memcg_page_state(page, idx, -1);
1190 }
1191 
1192 static inline void inc_lruvec_state(struct lruvec *lruvec,
1193 				    enum node_stat_item idx)
1194 {
1195 	mod_lruvec_state(lruvec, idx, 1);
1196 }
1197 
1198 static inline void dec_lruvec_state(struct lruvec *lruvec,
1199 				    enum node_stat_item idx)
1200 {
1201 	mod_lruvec_state(lruvec, idx, -1);
1202 }
1203 
1204 static inline void inc_lruvec_page_state(struct page *page,
1205 					 enum node_stat_item idx)
1206 {
1207 	mod_lruvec_page_state(page, idx, 1);
1208 }
1209 
1210 static inline void dec_lruvec_page_state(struct page *page,
1211 					 enum node_stat_item idx)
1212 {
1213 	mod_lruvec_page_state(page, idx, -1);
1214 }
1215 
1216 #ifdef CONFIG_CGROUP_WRITEBACK
1217 
1218 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1219 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1220 			 unsigned long *pheadroom, unsigned long *pdirty,
1221 			 unsigned long *pwriteback);
1222 
1223 #else	/* CONFIG_CGROUP_WRITEBACK */
1224 
1225 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1226 {
1227 	return NULL;
1228 }
1229 
1230 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1231 				       unsigned long *pfilepages,
1232 				       unsigned long *pheadroom,
1233 				       unsigned long *pdirty,
1234 				       unsigned long *pwriteback)
1235 {
1236 }
1237 
1238 #endif	/* CONFIG_CGROUP_WRITEBACK */
1239 
1240 struct sock;
1241 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1242 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1243 #ifdef CONFIG_MEMCG
1244 extern struct static_key_false memcg_sockets_enabled_key;
1245 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1246 void mem_cgroup_sk_alloc(struct sock *sk);
1247 void mem_cgroup_sk_free(struct sock *sk);
1248 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1249 {
1250 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1251 		return true;
1252 	do {
1253 		if (time_before(jiffies, memcg->socket_pressure))
1254 			return true;
1255 	} while ((memcg = parent_mem_cgroup(memcg)));
1256 	return false;
1257 }
1258 #else
1259 #define mem_cgroup_sockets_enabled 0
1260 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1261 static inline void mem_cgroup_sk_free(struct sock *sk) { };
1262 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1263 {
1264 	return false;
1265 }
1266 #endif
1267 
1268 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1269 void memcg_kmem_put_cache(struct kmem_cache *cachep);
1270 
1271 #ifdef CONFIG_MEMCG_KMEM
1272 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1273 void __memcg_kmem_uncharge(struct page *page, int order);
1274 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1275 			      struct mem_cgroup *memcg);
1276 
1277 extern struct static_key_false memcg_kmem_enabled_key;
1278 extern struct workqueue_struct *memcg_kmem_cache_wq;
1279 
1280 extern int memcg_nr_cache_ids;
1281 void memcg_get_cache_ids(void);
1282 void memcg_put_cache_ids(void);
1283 
1284 /*
1285  * Helper macro to loop through all memcg-specific caches. Callers must still
1286  * check if the cache is valid (it is either valid or NULL).
1287  * the slab_mutex must be held when looping through those caches
1288  */
1289 #define for_each_memcg_cache_index(_idx)	\
1290 	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1291 
1292 static inline bool memcg_kmem_enabled(void)
1293 {
1294 	return static_branch_unlikely(&memcg_kmem_enabled_key);
1295 }
1296 
1297 static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1298 {
1299 	if (memcg_kmem_enabled())
1300 		return __memcg_kmem_charge(page, gfp, order);
1301 	return 0;
1302 }
1303 
1304 static inline void memcg_kmem_uncharge(struct page *page, int order)
1305 {
1306 	if (memcg_kmem_enabled())
1307 		__memcg_kmem_uncharge(page, order);
1308 }
1309 
1310 static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
1311 					  int order, struct mem_cgroup *memcg)
1312 {
1313 	if (memcg_kmem_enabled())
1314 		return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
1315 	return 0;
1316 }
1317 /*
1318  * helper for accessing a memcg's index. It will be used as an index in the
1319  * child cache array in kmem_cache, and also to derive its name. This function
1320  * will return -1 when this is not a kmem-limited memcg.
1321  */
1322 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1323 {
1324 	return memcg ? memcg->kmemcg_id : -1;
1325 }
1326 
1327 extern int memcg_expand_shrinker_maps(int new_id);
1328 
1329 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1330 				   int nid, int shrinker_id);
1331 #else
1332 
1333 static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1334 {
1335 	return 0;
1336 }
1337 
1338 static inline void memcg_kmem_uncharge(struct page *page, int order)
1339 {
1340 }
1341 
1342 static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1343 {
1344 	return 0;
1345 }
1346 
1347 static inline void __memcg_kmem_uncharge(struct page *page, int order)
1348 {
1349 }
1350 
1351 #define for_each_memcg_cache_index(_idx)	\
1352 	for (; NULL; )
1353 
1354 static inline bool memcg_kmem_enabled(void)
1355 {
1356 	return false;
1357 }
1358 
1359 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1360 {
1361 	return -1;
1362 }
1363 
1364 static inline void memcg_get_cache_ids(void)
1365 {
1366 }
1367 
1368 static inline void memcg_put_cache_ids(void)
1369 {
1370 }
1371 
1372 static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1373 					  int nid, int shrinker_id) { }
1374 #endif /* CONFIG_MEMCG_KMEM */
1375 
1376 #endif /* _LINUX_MEMCONTROL_H */
1377