xref: /linux-6.15/include/linux/memcontrol.h (revision 23db762b)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* memcontrol.h - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <[email protected]>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <[email protected]>
9  */
10 
11 #ifndef _LINUX_MEMCONTROL_H
12 #define _LINUX_MEMCONTROL_H
13 #include <linux/cgroup.h>
14 #include <linux/vm_event_item.h>
15 #include <linux/hardirq.h>
16 #include <linux/jump_label.h>
17 #include <linux/page_counter.h>
18 #include <linux/vmpressure.h>
19 #include <linux/eventfd.h>
20 #include <linux/mm.h>
21 #include <linux/vmstat.h>
22 #include <linux/writeback.h>
23 #include <linux/page-flags.h>
24 
25 struct mem_cgroup;
26 struct obj_cgroup;
27 struct page;
28 struct mm_struct;
29 struct kmem_cache;
30 
31 /* Cgroup-specific page state, on top of universal node page state */
32 enum memcg_stat_item {
33 	MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
34 	MEMCG_SOCK,
35 	MEMCG_NR_STAT,
36 };
37 
38 enum memcg_memory_event {
39 	MEMCG_LOW,
40 	MEMCG_HIGH,
41 	MEMCG_MAX,
42 	MEMCG_OOM,
43 	MEMCG_OOM_KILL,
44 	MEMCG_SWAP_HIGH,
45 	MEMCG_SWAP_MAX,
46 	MEMCG_SWAP_FAIL,
47 	MEMCG_NR_MEMORY_EVENTS,
48 };
49 
50 struct mem_cgroup_reclaim_cookie {
51 	pg_data_t *pgdat;
52 	unsigned int generation;
53 };
54 
55 #ifdef CONFIG_MEMCG
56 
57 #define MEM_CGROUP_ID_SHIFT	16
58 #define MEM_CGROUP_ID_MAX	USHRT_MAX
59 
60 struct mem_cgroup_id {
61 	int id;
62 	refcount_t ref;
63 };
64 
65 /*
66  * Per memcg event counter is incremented at every pagein/pageout. With THP,
67  * it will be incremated by the number of pages. This counter is used for
68  * for trigger some periodic events. This is straightforward and better
69  * than using jiffies etc. to handle periodic memcg event.
70  */
71 enum mem_cgroup_events_target {
72 	MEM_CGROUP_TARGET_THRESH,
73 	MEM_CGROUP_TARGET_SOFTLIMIT,
74 	MEM_CGROUP_NTARGETS,
75 };
76 
77 struct memcg_vmstats_percpu {
78 	long stat[MEMCG_NR_STAT];
79 	unsigned long events[NR_VM_EVENT_ITEMS];
80 	unsigned long nr_page_events;
81 	unsigned long targets[MEM_CGROUP_NTARGETS];
82 };
83 
84 struct mem_cgroup_reclaim_iter {
85 	struct mem_cgroup *position;
86 	/* scan generation, increased every round-trip */
87 	unsigned int generation;
88 };
89 
90 struct lruvec_stat {
91 	long count[NR_VM_NODE_STAT_ITEMS];
92 };
93 
94 /*
95  * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
96  * which have elements charged to this memcg.
97  */
98 struct memcg_shrinker_map {
99 	struct rcu_head rcu;
100 	unsigned long map[];
101 };
102 
103 /*
104  * per-node information in memory controller.
105  */
106 struct mem_cgroup_per_node {
107 	struct lruvec		lruvec;
108 
109 	/* Legacy local VM stats */
110 	struct lruvec_stat __percpu *lruvec_stat_local;
111 
112 	/* Subtree VM stats (batched updates) */
113 	struct lruvec_stat __percpu *lruvec_stat_cpu;
114 	atomic_long_t		lruvec_stat[NR_VM_NODE_STAT_ITEMS];
115 
116 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
117 
118 	struct mem_cgroup_reclaim_iter	iter;
119 
120 	struct memcg_shrinker_map __rcu	*shrinker_map;
121 
122 	struct rb_node		tree_node;	/* RB tree node */
123 	unsigned long		usage_in_excess;/* Set to the value by which */
124 						/* the soft limit is exceeded*/
125 	bool			on_tree;
126 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
127 						/* use container_of	   */
128 };
129 
130 struct mem_cgroup_threshold {
131 	struct eventfd_ctx *eventfd;
132 	unsigned long threshold;
133 };
134 
135 /* For threshold */
136 struct mem_cgroup_threshold_ary {
137 	/* An array index points to threshold just below or equal to usage. */
138 	int current_threshold;
139 	/* Size of entries[] */
140 	unsigned int size;
141 	/* Array of thresholds */
142 	struct mem_cgroup_threshold entries[];
143 };
144 
145 struct mem_cgroup_thresholds {
146 	/* Primary thresholds array */
147 	struct mem_cgroup_threshold_ary *primary;
148 	/*
149 	 * Spare threshold array.
150 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
151 	 * It must be able to store at least primary->size - 1 entries.
152 	 */
153 	struct mem_cgroup_threshold_ary *spare;
154 };
155 
156 enum memcg_kmem_state {
157 	KMEM_NONE,
158 	KMEM_ALLOCATED,
159 	KMEM_ONLINE,
160 };
161 
162 #if defined(CONFIG_SMP)
163 struct memcg_padding {
164 	char x[0];
165 } ____cacheline_internodealigned_in_smp;
166 #define MEMCG_PADDING(name)      struct memcg_padding name;
167 #else
168 #define MEMCG_PADDING(name)
169 #endif
170 
171 /*
172  * Remember four most recent foreign writebacks with dirty pages in this
173  * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
174  * one in a given round, we're likely to catch it later if it keeps
175  * foreign-dirtying, so a fairly low count should be enough.
176  *
177  * See mem_cgroup_track_foreign_dirty_slowpath() for details.
178  */
179 #define MEMCG_CGWB_FRN_CNT	4
180 
181 struct memcg_cgwb_frn {
182 	u64 bdi_id;			/* bdi->id of the foreign inode */
183 	int memcg_id;			/* memcg->css.id of foreign inode */
184 	u64 at;				/* jiffies_64 at the time of dirtying */
185 	struct wb_completion done;	/* tracks in-flight foreign writebacks */
186 };
187 
188 /*
189  * Bucket for arbitrarily byte-sized objects charged to a memory
190  * cgroup. The bucket can be reparented in one piece when the cgroup
191  * is destroyed, without having to round up the individual references
192  * of all live memory objects in the wild.
193  */
194 struct obj_cgroup {
195 	struct percpu_ref refcnt;
196 	struct mem_cgroup *memcg;
197 	atomic_t nr_charged_bytes;
198 	union {
199 		struct list_head list;
200 		struct rcu_head rcu;
201 	};
202 };
203 
204 /*
205  * The memory controller data structure. The memory controller controls both
206  * page cache and RSS per cgroup. We would eventually like to provide
207  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
208  * to help the administrator determine what knobs to tune.
209  */
210 struct mem_cgroup {
211 	struct cgroup_subsys_state css;
212 
213 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
214 	struct mem_cgroup_id id;
215 
216 	/* Accounted resources */
217 	struct page_counter memory;
218 	struct page_counter swap;
219 
220 	/* Legacy consumer-oriented counters */
221 	struct page_counter memsw;
222 	struct page_counter kmem;
223 	struct page_counter tcpmem;
224 
225 	/* Range enforcement for interrupt charges */
226 	struct work_struct high_work;
227 
228 	unsigned long soft_limit;
229 
230 	/* vmpressure notifications */
231 	struct vmpressure vmpressure;
232 
233 	/*
234 	 * Should the accounting and control be hierarchical, per subtree?
235 	 */
236 	bool use_hierarchy;
237 
238 	/*
239 	 * Should the OOM killer kill all belonging tasks, had it kill one?
240 	 */
241 	bool oom_group;
242 
243 	/* protected by memcg_oom_lock */
244 	bool		oom_lock;
245 	int		under_oom;
246 
247 	int	swappiness;
248 	/* OOM-Killer disable */
249 	int		oom_kill_disable;
250 
251 	/* memory.events and memory.events.local */
252 	struct cgroup_file events_file;
253 	struct cgroup_file events_local_file;
254 
255 	/* handle for "memory.swap.events" */
256 	struct cgroup_file swap_events_file;
257 
258 	/* protect arrays of thresholds */
259 	struct mutex thresholds_lock;
260 
261 	/* thresholds for memory usage. RCU-protected */
262 	struct mem_cgroup_thresholds thresholds;
263 
264 	/* thresholds for mem+swap usage. RCU-protected */
265 	struct mem_cgroup_thresholds memsw_thresholds;
266 
267 	/* For oom notifier event fd */
268 	struct list_head oom_notify;
269 
270 	/*
271 	 * Should we move charges of a task when a task is moved into this
272 	 * mem_cgroup ? And what type of charges should we move ?
273 	 */
274 	unsigned long move_charge_at_immigrate;
275 	/* taken only while moving_account > 0 */
276 	spinlock_t		move_lock;
277 	unsigned long		move_lock_flags;
278 
279 	MEMCG_PADDING(_pad1_);
280 
281 	/*
282 	 * set > 0 if pages under this cgroup are moving to other cgroup.
283 	 */
284 	atomic_t		moving_account;
285 	struct task_struct	*move_lock_task;
286 
287 	/* Legacy local VM stats and events */
288 	struct memcg_vmstats_percpu __percpu *vmstats_local;
289 
290 	/* Subtree VM stats and events (batched updates) */
291 	struct memcg_vmstats_percpu __percpu *vmstats_percpu;
292 
293 	MEMCG_PADDING(_pad2_);
294 
295 	atomic_long_t		vmstats[MEMCG_NR_STAT];
296 	atomic_long_t		vmevents[NR_VM_EVENT_ITEMS];
297 
298 	/* memory.events */
299 	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
300 	atomic_long_t		memory_events_local[MEMCG_NR_MEMORY_EVENTS];
301 
302 	unsigned long		socket_pressure;
303 
304 	/* Legacy tcp memory accounting */
305 	bool			tcpmem_active;
306 	int			tcpmem_pressure;
307 
308 #ifdef CONFIG_MEMCG_KMEM
309         /* Index in the kmem_cache->memcg_params.memcg_caches array */
310 	int kmemcg_id;
311 	enum memcg_kmem_state kmem_state;
312 	struct obj_cgroup __rcu *objcg;
313 	struct list_head objcg_list; /* list of inherited objcgs */
314 #endif
315 
316 #ifdef CONFIG_CGROUP_WRITEBACK
317 	struct list_head cgwb_list;
318 	struct wb_domain cgwb_domain;
319 	struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
320 #endif
321 
322 	/* List of events which userspace want to receive */
323 	struct list_head event_list;
324 	spinlock_t event_list_lock;
325 
326 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
327 	struct deferred_split deferred_split_queue;
328 #endif
329 
330 	struct mem_cgroup_per_node *nodeinfo[0];
331 	/* WARNING: nodeinfo must be the last member here */
332 };
333 
334 /*
335  * size of first charge trial. "32" comes from vmscan.c's magic value.
336  * TODO: maybe necessary to use big numbers in big irons.
337  */
338 #define MEMCG_CHARGE_BATCH 32U
339 
340 extern struct mem_cgroup *root_mem_cgroup;
341 
342 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
343 {
344 	return (memcg == root_mem_cgroup);
345 }
346 
347 static inline bool mem_cgroup_disabled(void)
348 {
349 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
350 }
351 
352 static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
353 						  struct mem_cgroup *memcg,
354 						  bool in_low_reclaim)
355 {
356 	if (mem_cgroup_disabled())
357 		return 0;
358 
359 	/*
360 	 * There is no reclaim protection applied to a targeted reclaim.
361 	 * We are special casing this specific case here because
362 	 * mem_cgroup_protected calculation is not robust enough to keep
363 	 * the protection invariant for calculated effective values for
364 	 * parallel reclaimers with different reclaim target. This is
365 	 * especially a problem for tail memcgs (as they have pages on LRU)
366 	 * which would want to have effective values 0 for targeted reclaim
367 	 * but a different value for external reclaim.
368 	 *
369 	 * Example
370 	 * Let's have global and A's reclaim in parallel:
371 	 *  |
372 	 *  A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
373 	 *  |\
374 	 *  | C (low = 1G, usage = 2.5G)
375 	 *  B (low = 1G, usage = 0.5G)
376 	 *
377 	 * For the global reclaim
378 	 * A.elow = A.low
379 	 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
380 	 * C.elow = min(C.usage, C.low)
381 	 *
382 	 * With the effective values resetting we have A reclaim
383 	 * A.elow = 0
384 	 * B.elow = B.low
385 	 * C.elow = C.low
386 	 *
387 	 * If the global reclaim races with A's reclaim then
388 	 * B.elow = C.elow = 0 because children_low_usage > A.elow)
389 	 * is possible and reclaiming B would be violating the protection.
390 	 *
391 	 */
392 	if (root == memcg)
393 		return 0;
394 
395 	if (in_low_reclaim)
396 		return READ_ONCE(memcg->memory.emin);
397 
398 	return max(READ_ONCE(memcg->memory.emin),
399 		   READ_ONCE(memcg->memory.elow));
400 }
401 
402 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
403 				     struct mem_cgroup *memcg);
404 
405 static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
406 {
407 	/*
408 	 * The root memcg doesn't account charges, and doesn't support
409 	 * protection.
410 	 */
411 	return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
412 
413 }
414 
415 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
416 {
417 	if (!mem_cgroup_supports_protection(memcg))
418 		return false;
419 
420 	return READ_ONCE(memcg->memory.elow) >=
421 		page_counter_read(&memcg->memory);
422 }
423 
424 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
425 {
426 	if (!mem_cgroup_supports_protection(memcg))
427 		return false;
428 
429 	return READ_ONCE(memcg->memory.emin) >=
430 		page_counter_read(&memcg->memory);
431 }
432 
433 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
434 
435 void mem_cgroup_uncharge(struct page *page);
436 void mem_cgroup_uncharge_list(struct list_head *page_list);
437 
438 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
439 
440 static struct mem_cgroup_per_node *
441 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
442 {
443 	return memcg->nodeinfo[nid];
444 }
445 
446 /**
447  * mem_cgroup_lruvec - get the lru list vector for a memcg & node
448  * @memcg: memcg of the wanted lruvec
449  *
450  * Returns the lru list vector holding pages for a given @memcg &
451  * @node combination. This can be the node lruvec, if the memory
452  * controller is disabled.
453  */
454 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
455 					       struct pglist_data *pgdat)
456 {
457 	struct mem_cgroup_per_node *mz;
458 	struct lruvec *lruvec;
459 
460 	if (mem_cgroup_disabled()) {
461 		lruvec = &pgdat->__lruvec;
462 		goto out;
463 	}
464 
465 	if (!memcg)
466 		memcg = root_mem_cgroup;
467 
468 	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
469 	lruvec = &mz->lruvec;
470 out:
471 	/*
472 	 * Since a node can be onlined after the mem_cgroup was created,
473 	 * we have to be prepared to initialize lruvec->pgdat here;
474 	 * and if offlined then reonlined, we need to reinitialize it.
475 	 */
476 	if (unlikely(lruvec->pgdat != pgdat))
477 		lruvec->pgdat = pgdat;
478 	return lruvec;
479 }
480 
481 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
482 
483 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
484 
485 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
486 
487 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
488 
489 static inline
490 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
491 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
492 }
493 
494 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
495 {
496 	return percpu_ref_tryget(&objcg->refcnt);
497 }
498 
499 static inline void obj_cgroup_get(struct obj_cgroup *objcg)
500 {
501 	percpu_ref_get(&objcg->refcnt);
502 }
503 
504 static inline void obj_cgroup_put(struct obj_cgroup *objcg)
505 {
506 	percpu_ref_put(&objcg->refcnt);
507 }
508 
509 /*
510  * After the initialization objcg->memcg is always pointing at
511  * a valid memcg, but can be atomically swapped to the parent memcg.
512  *
513  * The caller must ensure that the returned memcg won't be released:
514  * e.g. acquire the rcu_read_lock or css_set_lock.
515  */
516 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
517 {
518 	return READ_ONCE(objcg->memcg);
519 }
520 
521 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
522 {
523 	if (memcg)
524 		css_put(&memcg->css);
525 }
526 
527 #define mem_cgroup_from_counter(counter, member)	\
528 	container_of(counter, struct mem_cgroup, member)
529 
530 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
531 				   struct mem_cgroup *,
532 				   struct mem_cgroup_reclaim_cookie *);
533 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
534 int mem_cgroup_scan_tasks(struct mem_cgroup *,
535 			  int (*)(struct task_struct *, void *), void *);
536 
537 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
538 {
539 	if (mem_cgroup_disabled())
540 		return 0;
541 
542 	return memcg->id.id;
543 }
544 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
545 
546 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
547 {
548 	return mem_cgroup_from_css(seq_css(m));
549 }
550 
551 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
552 {
553 	struct mem_cgroup_per_node *mz;
554 
555 	if (mem_cgroup_disabled())
556 		return NULL;
557 
558 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
559 	return mz->memcg;
560 }
561 
562 /**
563  * parent_mem_cgroup - find the accounting parent of a memcg
564  * @memcg: memcg whose parent to find
565  *
566  * Returns the parent memcg, or NULL if this is the root or the memory
567  * controller is in legacy no-hierarchy mode.
568  */
569 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
570 {
571 	if (!memcg->memory.parent)
572 		return NULL;
573 	return mem_cgroup_from_counter(memcg->memory.parent, memory);
574 }
575 
576 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
577 			      struct mem_cgroup *root)
578 {
579 	if (root == memcg)
580 		return true;
581 	if (!root->use_hierarchy)
582 		return false;
583 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
584 }
585 
586 static inline bool mm_match_cgroup(struct mm_struct *mm,
587 				   struct mem_cgroup *memcg)
588 {
589 	struct mem_cgroup *task_memcg;
590 	bool match = false;
591 
592 	rcu_read_lock();
593 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
594 	if (task_memcg)
595 		match = mem_cgroup_is_descendant(task_memcg, memcg);
596 	rcu_read_unlock();
597 	return match;
598 }
599 
600 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
601 ino_t page_cgroup_ino(struct page *page);
602 
603 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
604 {
605 	if (mem_cgroup_disabled())
606 		return true;
607 	return !!(memcg->css.flags & CSS_ONLINE);
608 }
609 
610 /*
611  * For memory reclaim.
612  */
613 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
614 
615 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
616 		int zid, int nr_pages);
617 
618 static inline
619 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
620 		enum lru_list lru, int zone_idx)
621 {
622 	struct mem_cgroup_per_node *mz;
623 
624 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
625 	return mz->lru_zone_size[zone_idx][lru];
626 }
627 
628 void mem_cgroup_handle_over_high(void);
629 
630 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
631 
632 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
633 
634 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
635 				struct task_struct *p);
636 
637 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
638 
639 static inline void mem_cgroup_enter_user_fault(void)
640 {
641 	WARN_ON(current->in_user_fault);
642 	current->in_user_fault = 1;
643 }
644 
645 static inline void mem_cgroup_exit_user_fault(void)
646 {
647 	WARN_ON(!current->in_user_fault);
648 	current->in_user_fault = 0;
649 }
650 
651 static inline bool task_in_memcg_oom(struct task_struct *p)
652 {
653 	return p->memcg_in_oom;
654 }
655 
656 bool mem_cgroup_oom_synchronize(bool wait);
657 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
658 					    struct mem_cgroup *oom_domain);
659 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
660 
661 #ifdef CONFIG_MEMCG_SWAP
662 extern bool cgroup_memory_noswap;
663 #endif
664 
665 struct mem_cgroup *lock_page_memcg(struct page *page);
666 void __unlock_page_memcg(struct mem_cgroup *memcg);
667 void unlock_page_memcg(struct page *page);
668 
669 /*
670  * idx can be of type enum memcg_stat_item or node_stat_item.
671  * Keep in sync with memcg_exact_page_state().
672  */
673 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
674 {
675 	long x = atomic_long_read(&memcg->vmstats[idx]);
676 #ifdef CONFIG_SMP
677 	if (x < 0)
678 		x = 0;
679 #endif
680 	return x;
681 }
682 
683 /*
684  * idx can be of type enum memcg_stat_item or node_stat_item.
685  * Keep in sync with memcg_exact_page_state().
686  */
687 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
688 						   int idx)
689 {
690 	long x = 0;
691 	int cpu;
692 
693 	for_each_possible_cpu(cpu)
694 		x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
695 #ifdef CONFIG_SMP
696 	if (x < 0)
697 		x = 0;
698 #endif
699 	return x;
700 }
701 
702 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
703 
704 /* idx can be of type enum memcg_stat_item or node_stat_item */
705 static inline void mod_memcg_state(struct mem_cgroup *memcg,
706 				   int idx, int val)
707 {
708 	unsigned long flags;
709 
710 	local_irq_save(flags);
711 	__mod_memcg_state(memcg, idx, val);
712 	local_irq_restore(flags);
713 }
714 
715 /**
716  * mod_memcg_page_state - update page state statistics
717  * @page: the page
718  * @idx: page state item to account
719  * @val: number of pages (positive or negative)
720  *
721  * The @page must be locked or the caller must use lock_page_memcg()
722  * to prevent double accounting when the page is concurrently being
723  * moved to another memcg:
724  *
725  *   lock_page(page) or lock_page_memcg(page)
726  *   if (TestClearPageState(page))
727  *     mod_memcg_page_state(page, state, -1);
728  *   unlock_page(page) or unlock_page_memcg(page)
729  *
730  * Kernel pages are an exception to this, since they'll never move.
731  */
732 static inline void __mod_memcg_page_state(struct page *page,
733 					  int idx, int val)
734 {
735 	if (page->mem_cgroup)
736 		__mod_memcg_state(page->mem_cgroup, idx, val);
737 }
738 
739 static inline void mod_memcg_page_state(struct page *page,
740 					int idx, int val)
741 {
742 	if (page->mem_cgroup)
743 		mod_memcg_state(page->mem_cgroup, idx, val);
744 }
745 
746 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
747 					      enum node_stat_item idx)
748 {
749 	struct mem_cgroup_per_node *pn;
750 	long x;
751 
752 	if (mem_cgroup_disabled())
753 		return node_page_state(lruvec_pgdat(lruvec), idx);
754 
755 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
756 	x = atomic_long_read(&pn->lruvec_stat[idx]);
757 #ifdef CONFIG_SMP
758 	if (x < 0)
759 		x = 0;
760 #endif
761 	return x;
762 }
763 
764 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
765 						    enum node_stat_item idx)
766 {
767 	struct mem_cgroup_per_node *pn;
768 	long x = 0;
769 	int cpu;
770 
771 	if (mem_cgroup_disabled())
772 		return node_page_state(lruvec_pgdat(lruvec), idx);
773 
774 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
775 	for_each_possible_cpu(cpu)
776 		x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
777 #ifdef CONFIG_SMP
778 	if (x < 0)
779 		x = 0;
780 #endif
781 	return x;
782 }
783 
784 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
785 			      int val);
786 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
787 			int val);
788 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
789 
790 void mod_memcg_obj_state(void *p, int idx, int val);
791 
792 static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
793 					 int val)
794 {
795 	unsigned long flags;
796 
797 	local_irq_save(flags);
798 	__mod_lruvec_slab_state(p, idx, val);
799 	local_irq_restore(flags);
800 }
801 
802 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
803 					  enum node_stat_item idx, int val)
804 {
805 	unsigned long flags;
806 
807 	local_irq_save(flags);
808 	__mod_memcg_lruvec_state(lruvec, idx, val);
809 	local_irq_restore(flags);
810 }
811 
812 static inline void mod_lruvec_state(struct lruvec *lruvec,
813 				    enum node_stat_item idx, int val)
814 {
815 	unsigned long flags;
816 
817 	local_irq_save(flags);
818 	__mod_lruvec_state(lruvec, idx, val);
819 	local_irq_restore(flags);
820 }
821 
822 static inline void __mod_lruvec_page_state(struct page *page,
823 					   enum node_stat_item idx, int val)
824 {
825 	struct page *head = compound_head(page); /* rmap on tail pages */
826 	pg_data_t *pgdat = page_pgdat(page);
827 	struct lruvec *lruvec;
828 
829 	/* Untracked pages have no memcg, no lruvec. Update only the node */
830 	if (!head->mem_cgroup) {
831 		__mod_node_page_state(pgdat, idx, val);
832 		return;
833 	}
834 
835 	lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat);
836 	__mod_lruvec_state(lruvec, idx, val);
837 }
838 
839 static inline void mod_lruvec_page_state(struct page *page,
840 					 enum node_stat_item idx, int val)
841 {
842 	unsigned long flags;
843 
844 	local_irq_save(flags);
845 	__mod_lruvec_page_state(page, idx, val);
846 	local_irq_restore(flags);
847 }
848 
849 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
850 						gfp_t gfp_mask,
851 						unsigned long *total_scanned);
852 
853 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
854 			  unsigned long count);
855 
856 static inline void count_memcg_events(struct mem_cgroup *memcg,
857 				      enum vm_event_item idx,
858 				      unsigned long count)
859 {
860 	unsigned long flags;
861 
862 	local_irq_save(flags);
863 	__count_memcg_events(memcg, idx, count);
864 	local_irq_restore(flags);
865 }
866 
867 static inline void count_memcg_page_event(struct page *page,
868 					  enum vm_event_item idx)
869 {
870 	if (page->mem_cgroup)
871 		count_memcg_events(page->mem_cgroup, idx, 1);
872 }
873 
874 static inline void count_memcg_event_mm(struct mm_struct *mm,
875 					enum vm_event_item idx)
876 {
877 	struct mem_cgroup *memcg;
878 
879 	if (mem_cgroup_disabled())
880 		return;
881 
882 	rcu_read_lock();
883 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
884 	if (likely(memcg))
885 		count_memcg_events(memcg, idx, 1);
886 	rcu_read_unlock();
887 }
888 
889 static inline void memcg_memory_event(struct mem_cgroup *memcg,
890 				      enum memcg_memory_event event)
891 {
892 	atomic_long_inc(&memcg->memory_events_local[event]);
893 	cgroup_file_notify(&memcg->events_local_file);
894 
895 	do {
896 		atomic_long_inc(&memcg->memory_events[event]);
897 		cgroup_file_notify(&memcg->events_file);
898 
899 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
900 			break;
901 		if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
902 			break;
903 	} while ((memcg = parent_mem_cgroup(memcg)) &&
904 		 !mem_cgroup_is_root(memcg));
905 }
906 
907 static inline void memcg_memory_event_mm(struct mm_struct *mm,
908 					 enum memcg_memory_event event)
909 {
910 	struct mem_cgroup *memcg;
911 
912 	if (mem_cgroup_disabled())
913 		return;
914 
915 	rcu_read_lock();
916 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
917 	if (likely(memcg))
918 		memcg_memory_event(memcg, event);
919 	rcu_read_unlock();
920 }
921 
922 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
923 void mem_cgroup_split_huge_fixup(struct page *head);
924 #endif
925 
926 #else /* CONFIG_MEMCG */
927 
928 #define MEM_CGROUP_ID_SHIFT	0
929 #define MEM_CGROUP_ID_MAX	0
930 
931 struct mem_cgroup;
932 
933 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
934 {
935 	return true;
936 }
937 
938 static inline bool mem_cgroup_disabled(void)
939 {
940 	return true;
941 }
942 
943 static inline void memcg_memory_event(struct mem_cgroup *memcg,
944 				      enum memcg_memory_event event)
945 {
946 }
947 
948 static inline void memcg_memory_event_mm(struct mm_struct *mm,
949 					 enum memcg_memory_event event)
950 {
951 }
952 
953 static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
954 						  struct mem_cgroup *memcg,
955 						  bool in_low_reclaim)
956 {
957 	return 0;
958 }
959 
960 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
961 						   struct mem_cgroup *memcg)
962 {
963 }
964 
965 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
966 {
967 	return false;
968 }
969 
970 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
971 {
972 	return false;
973 }
974 
975 static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
976 				    gfp_t gfp_mask)
977 {
978 	return 0;
979 }
980 
981 static inline void mem_cgroup_uncharge(struct page *page)
982 {
983 }
984 
985 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
986 {
987 }
988 
989 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
990 {
991 }
992 
993 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
994 					       struct pglist_data *pgdat)
995 {
996 	return &pgdat->__lruvec;
997 }
998 
999 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
1000 						    struct pglist_data *pgdat)
1001 {
1002 	return &pgdat->__lruvec;
1003 }
1004 
1005 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1006 {
1007 	return NULL;
1008 }
1009 
1010 static inline bool mm_match_cgroup(struct mm_struct *mm,
1011 		struct mem_cgroup *memcg)
1012 {
1013 	return true;
1014 }
1015 
1016 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1017 {
1018 	return NULL;
1019 }
1020 
1021 static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1022 {
1023 	return NULL;
1024 }
1025 
1026 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1027 {
1028 }
1029 
1030 static inline struct mem_cgroup *
1031 mem_cgroup_iter(struct mem_cgroup *root,
1032 		struct mem_cgroup *prev,
1033 		struct mem_cgroup_reclaim_cookie *reclaim)
1034 {
1035 	return NULL;
1036 }
1037 
1038 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1039 					 struct mem_cgroup *prev)
1040 {
1041 }
1042 
1043 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1044 		int (*fn)(struct task_struct *, void *), void *arg)
1045 {
1046 	return 0;
1047 }
1048 
1049 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1050 {
1051 	return 0;
1052 }
1053 
1054 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1055 {
1056 	WARN_ON_ONCE(id);
1057 	/* XXX: This should always return root_mem_cgroup */
1058 	return NULL;
1059 }
1060 
1061 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1062 {
1063 	return NULL;
1064 }
1065 
1066 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1067 {
1068 	return NULL;
1069 }
1070 
1071 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1072 {
1073 	return true;
1074 }
1075 
1076 static inline
1077 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1078 		enum lru_list lru, int zone_idx)
1079 {
1080 	return 0;
1081 }
1082 
1083 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1084 {
1085 	return 0;
1086 }
1087 
1088 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1089 {
1090 	return 0;
1091 }
1092 
1093 static inline void
1094 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1095 {
1096 }
1097 
1098 static inline void
1099 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1100 {
1101 }
1102 
1103 static inline struct mem_cgroup *lock_page_memcg(struct page *page)
1104 {
1105 	return NULL;
1106 }
1107 
1108 static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
1109 {
1110 }
1111 
1112 static inline void unlock_page_memcg(struct page *page)
1113 {
1114 }
1115 
1116 static inline void mem_cgroup_handle_over_high(void)
1117 {
1118 }
1119 
1120 static inline void mem_cgroup_enter_user_fault(void)
1121 {
1122 }
1123 
1124 static inline void mem_cgroup_exit_user_fault(void)
1125 {
1126 }
1127 
1128 static inline bool task_in_memcg_oom(struct task_struct *p)
1129 {
1130 	return false;
1131 }
1132 
1133 static inline bool mem_cgroup_oom_synchronize(bool wait)
1134 {
1135 	return false;
1136 }
1137 
1138 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1139 	struct task_struct *victim, struct mem_cgroup *oom_domain)
1140 {
1141 	return NULL;
1142 }
1143 
1144 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1145 {
1146 }
1147 
1148 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1149 {
1150 	return 0;
1151 }
1152 
1153 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1154 						   int idx)
1155 {
1156 	return 0;
1157 }
1158 
1159 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1160 				     int idx,
1161 				     int nr)
1162 {
1163 }
1164 
1165 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1166 				   int idx,
1167 				   int nr)
1168 {
1169 }
1170 
1171 static inline void __mod_memcg_page_state(struct page *page,
1172 					  int idx,
1173 					  int nr)
1174 {
1175 }
1176 
1177 static inline void mod_memcg_page_state(struct page *page,
1178 					int idx,
1179 					int nr)
1180 {
1181 }
1182 
1183 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1184 					      enum node_stat_item idx)
1185 {
1186 	return node_page_state(lruvec_pgdat(lruvec), idx);
1187 }
1188 
1189 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1190 						    enum node_stat_item idx)
1191 {
1192 	return node_page_state(lruvec_pgdat(lruvec), idx);
1193 }
1194 
1195 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1196 					    enum node_stat_item idx, int val)
1197 {
1198 }
1199 
1200 static inline void __mod_lruvec_state(struct lruvec *lruvec,
1201 				      enum node_stat_item idx, int val)
1202 {
1203 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1204 }
1205 
1206 static inline void mod_lruvec_state(struct lruvec *lruvec,
1207 				    enum node_stat_item idx, int val)
1208 {
1209 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1210 }
1211 
1212 static inline void __mod_lruvec_page_state(struct page *page,
1213 					   enum node_stat_item idx, int val)
1214 {
1215 	__mod_node_page_state(page_pgdat(page), idx, val);
1216 }
1217 
1218 static inline void mod_lruvec_page_state(struct page *page,
1219 					 enum node_stat_item idx, int val)
1220 {
1221 	mod_node_page_state(page_pgdat(page), idx, val);
1222 }
1223 
1224 static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1225 					   int val)
1226 {
1227 	struct page *page = virt_to_head_page(p);
1228 
1229 	__mod_node_page_state(page_pgdat(page), idx, val);
1230 }
1231 
1232 static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1233 					 int val)
1234 {
1235 	struct page *page = virt_to_head_page(p);
1236 
1237 	mod_node_page_state(page_pgdat(page), idx, val);
1238 }
1239 
1240 static inline void mod_memcg_obj_state(void *p, int idx, int val)
1241 {
1242 }
1243 
1244 static inline
1245 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1246 					    gfp_t gfp_mask,
1247 					    unsigned long *total_scanned)
1248 {
1249 	return 0;
1250 }
1251 
1252 static inline void mem_cgroup_split_huge_fixup(struct page *head)
1253 {
1254 }
1255 
1256 static inline void count_memcg_events(struct mem_cgroup *memcg,
1257 				      enum vm_event_item idx,
1258 				      unsigned long count)
1259 {
1260 }
1261 
1262 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1263 					enum vm_event_item idx,
1264 					unsigned long count)
1265 {
1266 }
1267 
1268 static inline void count_memcg_page_event(struct page *page,
1269 					  int idx)
1270 {
1271 }
1272 
1273 static inline
1274 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1275 {
1276 }
1277 #endif /* CONFIG_MEMCG */
1278 
1279 /* idx can be of type enum memcg_stat_item or node_stat_item */
1280 static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1281 				     int idx)
1282 {
1283 	__mod_memcg_state(memcg, idx, 1);
1284 }
1285 
1286 /* idx can be of type enum memcg_stat_item or node_stat_item */
1287 static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1288 				     int idx)
1289 {
1290 	__mod_memcg_state(memcg, idx, -1);
1291 }
1292 
1293 /* idx can be of type enum memcg_stat_item or node_stat_item */
1294 static inline void __inc_memcg_page_state(struct page *page,
1295 					  int idx)
1296 {
1297 	__mod_memcg_page_state(page, idx, 1);
1298 }
1299 
1300 /* idx can be of type enum memcg_stat_item or node_stat_item */
1301 static inline void __dec_memcg_page_state(struct page *page,
1302 					  int idx)
1303 {
1304 	__mod_memcg_page_state(page, idx, -1);
1305 }
1306 
1307 static inline void __inc_lruvec_state(struct lruvec *lruvec,
1308 				      enum node_stat_item idx)
1309 {
1310 	__mod_lruvec_state(lruvec, idx, 1);
1311 }
1312 
1313 static inline void __dec_lruvec_state(struct lruvec *lruvec,
1314 				      enum node_stat_item idx)
1315 {
1316 	__mod_lruvec_state(lruvec, idx, -1);
1317 }
1318 
1319 static inline void __inc_lruvec_page_state(struct page *page,
1320 					   enum node_stat_item idx)
1321 {
1322 	__mod_lruvec_page_state(page, idx, 1);
1323 }
1324 
1325 static inline void __dec_lruvec_page_state(struct page *page,
1326 					   enum node_stat_item idx)
1327 {
1328 	__mod_lruvec_page_state(page, idx, -1);
1329 }
1330 
1331 static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1332 {
1333 	__mod_lruvec_slab_state(p, idx, 1);
1334 }
1335 
1336 static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1337 {
1338 	__mod_lruvec_slab_state(p, idx, -1);
1339 }
1340 
1341 /* idx can be of type enum memcg_stat_item or node_stat_item */
1342 static inline void inc_memcg_state(struct mem_cgroup *memcg,
1343 				   int idx)
1344 {
1345 	mod_memcg_state(memcg, idx, 1);
1346 }
1347 
1348 /* idx can be of type enum memcg_stat_item or node_stat_item */
1349 static inline void dec_memcg_state(struct mem_cgroup *memcg,
1350 				   int idx)
1351 {
1352 	mod_memcg_state(memcg, idx, -1);
1353 }
1354 
1355 /* idx can be of type enum memcg_stat_item or node_stat_item */
1356 static inline void inc_memcg_page_state(struct page *page,
1357 					int idx)
1358 {
1359 	mod_memcg_page_state(page, idx, 1);
1360 }
1361 
1362 /* idx can be of type enum memcg_stat_item or node_stat_item */
1363 static inline void dec_memcg_page_state(struct page *page,
1364 					int idx)
1365 {
1366 	mod_memcg_page_state(page, idx, -1);
1367 }
1368 
1369 static inline void inc_lruvec_state(struct lruvec *lruvec,
1370 				    enum node_stat_item idx)
1371 {
1372 	mod_lruvec_state(lruvec, idx, 1);
1373 }
1374 
1375 static inline void dec_lruvec_state(struct lruvec *lruvec,
1376 				    enum node_stat_item idx)
1377 {
1378 	mod_lruvec_state(lruvec, idx, -1);
1379 }
1380 
1381 static inline void inc_lruvec_page_state(struct page *page,
1382 					 enum node_stat_item idx)
1383 {
1384 	mod_lruvec_page_state(page, idx, 1);
1385 }
1386 
1387 static inline void dec_lruvec_page_state(struct page *page,
1388 					 enum node_stat_item idx)
1389 {
1390 	mod_lruvec_page_state(page, idx, -1);
1391 }
1392 
1393 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1394 {
1395 	struct mem_cgroup *memcg;
1396 
1397 	memcg = lruvec_memcg(lruvec);
1398 	if (!memcg)
1399 		return NULL;
1400 	memcg = parent_mem_cgroup(memcg);
1401 	if (!memcg)
1402 		return NULL;
1403 	return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1404 }
1405 
1406 #ifdef CONFIG_CGROUP_WRITEBACK
1407 
1408 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1409 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1410 			 unsigned long *pheadroom, unsigned long *pdirty,
1411 			 unsigned long *pwriteback);
1412 
1413 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1414 					     struct bdi_writeback *wb);
1415 
1416 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1417 						  struct bdi_writeback *wb)
1418 {
1419 	if (mem_cgroup_disabled())
1420 		return;
1421 
1422 	if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
1423 		mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1424 }
1425 
1426 void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1427 
1428 #else	/* CONFIG_CGROUP_WRITEBACK */
1429 
1430 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1431 {
1432 	return NULL;
1433 }
1434 
1435 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1436 				       unsigned long *pfilepages,
1437 				       unsigned long *pheadroom,
1438 				       unsigned long *pdirty,
1439 				       unsigned long *pwriteback)
1440 {
1441 }
1442 
1443 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1444 						  struct bdi_writeback *wb)
1445 {
1446 }
1447 
1448 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1449 {
1450 }
1451 
1452 #endif	/* CONFIG_CGROUP_WRITEBACK */
1453 
1454 struct sock;
1455 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1456 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1457 #ifdef CONFIG_MEMCG
1458 extern struct static_key_false memcg_sockets_enabled_key;
1459 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1460 void mem_cgroup_sk_alloc(struct sock *sk);
1461 void mem_cgroup_sk_free(struct sock *sk);
1462 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1463 {
1464 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1465 		return true;
1466 	do {
1467 		if (time_before(jiffies, memcg->socket_pressure))
1468 			return true;
1469 	} while ((memcg = parent_mem_cgroup(memcg)));
1470 	return false;
1471 }
1472 
1473 extern int memcg_expand_shrinker_maps(int new_id);
1474 
1475 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1476 				   int nid, int shrinker_id);
1477 #else
1478 #define mem_cgroup_sockets_enabled 0
1479 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1480 static inline void mem_cgroup_sk_free(struct sock *sk) { };
1481 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1482 {
1483 	return false;
1484 }
1485 
1486 static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1487 					  int nid, int shrinker_id)
1488 {
1489 }
1490 #endif
1491 
1492 #ifdef CONFIG_MEMCG_KMEM
1493 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1494 			unsigned int nr_pages);
1495 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
1496 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1497 void __memcg_kmem_uncharge_page(struct page *page, int order);
1498 
1499 struct obj_cgroup *get_obj_cgroup_from_current(void);
1500 
1501 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1502 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1503 
1504 extern struct static_key_false memcg_kmem_enabled_key;
1505 
1506 extern int memcg_nr_cache_ids;
1507 void memcg_get_cache_ids(void);
1508 void memcg_put_cache_ids(void);
1509 
1510 /*
1511  * Helper macro to loop through all memcg-specific caches. Callers must still
1512  * check if the cache is valid (it is either valid or NULL).
1513  * the slab_mutex must be held when looping through those caches
1514  */
1515 #define for_each_memcg_cache_index(_idx)	\
1516 	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1517 
1518 static inline bool memcg_kmem_enabled(void)
1519 {
1520 	return static_branch_likely(&memcg_kmem_enabled_key);
1521 }
1522 
1523 static inline bool memcg_kmem_bypass(void)
1524 {
1525 	if (in_interrupt())
1526 		return true;
1527 
1528 	/* Allow remote memcg charging in kthread contexts. */
1529 	if ((!current->mm || (current->flags & PF_KTHREAD)) &&
1530 	     !current->active_memcg)
1531 		return true;
1532 	return false;
1533 }
1534 
1535 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1536 					 int order)
1537 {
1538 	if (memcg_kmem_enabled())
1539 		return __memcg_kmem_charge_page(page, gfp, order);
1540 	return 0;
1541 }
1542 
1543 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1544 {
1545 	if (memcg_kmem_enabled())
1546 		__memcg_kmem_uncharge_page(page, order);
1547 }
1548 
1549 static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1550 				    unsigned int nr_pages)
1551 {
1552 	if (memcg_kmem_enabled())
1553 		return __memcg_kmem_charge(memcg, gfp, nr_pages);
1554 	return 0;
1555 }
1556 
1557 static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
1558 				       unsigned int nr_pages)
1559 {
1560 	if (memcg_kmem_enabled())
1561 		__memcg_kmem_uncharge(memcg, nr_pages);
1562 }
1563 
1564 /*
1565  * helper for accessing a memcg's index. It will be used as an index in the
1566  * child cache array in kmem_cache, and also to derive its name. This function
1567  * will return -1 when this is not a kmem-limited memcg.
1568  */
1569 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1570 {
1571 	return memcg ? memcg->kmemcg_id : -1;
1572 }
1573 
1574 struct mem_cgroup *mem_cgroup_from_obj(void *p);
1575 
1576 #else
1577 
1578 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1579 					 int order)
1580 {
1581 	return 0;
1582 }
1583 
1584 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1585 {
1586 }
1587 
1588 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1589 					   int order)
1590 {
1591 	return 0;
1592 }
1593 
1594 static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1595 {
1596 }
1597 
1598 #define for_each_memcg_cache_index(_idx)	\
1599 	for (; NULL; )
1600 
1601 static inline bool memcg_kmem_enabled(void)
1602 {
1603 	return false;
1604 }
1605 
1606 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1607 {
1608 	return -1;
1609 }
1610 
1611 static inline void memcg_get_cache_ids(void)
1612 {
1613 }
1614 
1615 static inline void memcg_put_cache_ids(void)
1616 {
1617 }
1618 
1619 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1620 {
1621        return NULL;
1622 }
1623 
1624 #endif /* CONFIG_MEMCG_KMEM */
1625 
1626 #endif /* _LINUX_MEMCONTROL_H */
1627