xref: /linux-6.15/include/linux/memcontrol.h (revision 4e108d4f)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* memcontrol.h - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <[email protected]>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <[email protected]>
9  */
10 
11 #ifndef _LINUX_MEMCONTROL_H
12 #define _LINUX_MEMCONTROL_H
13 #include <linux/cgroup.h>
14 #include <linux/vm_event_item.h>
15 #include <linux/hardirq.h>
16 #include <linux/jump_label.h>
17 #include <linux/page_counter.h>
18 #include <linux/vmpressure.h>
19 #include <linux/eventfd.h>
20 #include <linux/mm.h>
21 #include <linux/vmstat.h>
22 #include <linux/writeback.h>
23 #include <linux/page-flags.h>
24 
25 struct mem_cgroup;
26 struct page;
27 struct mm_struct;
28 struct kmem_cache;
29 
30 /* Cgroup-specific page state, on top of universal node page state */
31 enum memcg_stat_item {
32 	MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
33 	MEMCG_SOCK,
34 	/* XXX: why are these zone and not node counters? */
35 	MEMCG_KERNEL_STACK_KB,
36 	MEMCG_NR_STAT,
37 };
38 
39 enum memcg_memory_event {
40 	MEMCG_LOW,
41 	MEMCG_HIGH,
42 	MEMCG_MAX,
43 	MEMCG_OOM,
44 	MEMCG_OOM_KILL,
45 	MEMCG_SWAP_HIGH,
46 	MEMCG_SWAP_MAX,
47 	MEMCG_SWAP_FAIL,
48 	MEMCG_NR_MEMORY_EVENTS,
49 };
50 
51 enum mem_cgroup_protection {
52 	MEMCG_PROT_NONE,
53 	MEMCG_PROT_LOW,
54 	MEMCG_PROT_MIN,
55 };
56 
57 struct mem_cgroup_reclaim_cookie {
58 	pg_data_t *pgdat;
59 	unsigned int generation;
60 };
61 
62 #ifdef CONFIG_MEMCG
63 
64 #define MEM_CGROUP_ID_SHIFT	16
65 #define MEM_CGROUP_ID_MAX	USHRT_MAX
66 
67 struct mem_cgroup_id {
68 	int id;
69 	refcount_t ref;
70 };
71 
72 /*
73  * Per memcg event counter is incremented at every pagein/pageout. With THP,
74  * it will be incremated by the number of pages. This counter is used for
75  * for trigger some periodic events. This is straightforward and better
76  * than using jiffies etc. to handle periodic memcg event.
77  */
78 enum mem_cgroup_events_target {
79 	MEM_CGROUP_TARGET_THRESH,
80 	MEM_CGROUP_TARGET_SOFTLIMIT,
81 	MEM_CGROUP_NTARGETS,
82 };
83 
84 struct memcg_vmstats_percpu {
85 	long stat[MEMCG_NR_STAT];
86 	unsigned long events[NR_VM_EVENT_ITEMS];
87 	unsigned long nr_page_events;
88 	unsigned long targets[MEM_CGROUP_NTARGETS];
89 };
90 
91 struct mem_cgroup_reclaim_iter {
92 	struct mem_cgroup *position;
93 	/* scan generation, increased every round-trip */
94 	unsigned int generation;
95 };
96 
97 struct lruvec_stat {
98 	long count[NR_VM_NODE_STAT_ITEMS];
99 };
100 
101 /*
102  * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
103  * which have elements charged to this memcg.
104  */
105 struct memcg_shrinker_map {
106 	struct rcu_head rcu;
107 	unsigned long map[];
108 };
109 
110 /*
111  * per-node information in memory controller.
112  */
113 struct mem_cgroup_per_node {
114 	struct lruvec		lruvec;
115 
116 	/* Legacy local VM stats */
117 	struct lruvec_stat __percpu *lruvec_stat_local;
118 
119 	/* Subtree VM stats (batched updates) */
120 	struct lruvec_stat __percpu *lruvec_stat_cpu;
121 	atomic_long_t		lruvec_stat[NR_VM_NODE_STAT_ITEMS];
122 
123 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
124 
125 	struct mem_cgroup_reclaim_iter	iter;
126 
127 	struct memcg_shrinker_map __rcu	*shrinker_map;
128 
129 	struct rb_node		tree_node;	/* RB tree node */
130 	unsigned long		usage_in_excess;/* Set to the value by which */
131 						/* the soft limit is exceeded*/
132 	bool			on_tree;
133 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
134 						/* use container_of	   */
135 };
136 
137 struct mem_cgroup_threshold {
138 	struct eventfd_ctx *eventfd;
139 	unsigned long threshold;
140 };
141 
142 /* For threshold */
143 struct mem_cgroup_threshold_ary {
144 	/* An array index points to threshold just below or equal to usage. */
145 	int current_threshold;
146 	/* Size of entries[] */
147 	unsigned int size;
148 	/* Array of thresholds */
149 	struct mem_cgroup_threshold entries[];
150 };
151 
152 struct mem_cgroup_thresholds {
153 	/* Primary thresholds array */
154 	struct mem_cgroup_threshold_ary *primary;
155 	/*
156 	 * Spare threshold array.
157 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
158 	 * It must be able to store at least primary->size - 1 entries.
159 	 */
160 	struct mem_cgroup_threshold_ary *spare;
161 };
162 
163 enum memcg_kmem_state {
164 	KMEM_NONE,
165 	KMEM_ALLOCATED,
166 	KMEM_ONLINE,
167 };
168 
169 #if defined(CONFIG_SMP)
170 struct memcg_padding {
171 	char x[0];
172 } ____cacheline_internodealigned_in_smp;
173 #define MEMCG_PADDING(name)      struct memcg_padding name;
174 #else
175 #define MEMCG_PADDING(name)
176 #endif
177 
178 /*
179  * Remember four most recent foreign writebacks with dirty pages in this
180  * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
181  * one in a given round, we're likely to catch it later if it keeps
182  * foreign-dirtying, so a fairly low count should be enough.
183  *
184  * See mem_cgroup_track_foreign_dirty_slowpath() for details.
185  */
186 #define MEMCG_CGWB_FRN_CNT	4
187 
188 struct memcg_cgwb_frn {
189 	u64 bdi_id;			/* bdi->id of the foreign inode */
190 	int memcg_id;			/* memcg->css.id of foreign inode */
191 	u64 at;				/* jiffies_64 at the time of dirtying */
192 	struct wb_completion done;	/* tracks in-flight foreign writebacks */
193 };
194 
195 /*
196  * The memory controller data structure. The memory controller controls both
197  * page cache and RSS per cgroup. We would eventually like to provide
198  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
199  * to help the administrator determine what knobs to tune.
200  */
201 struct mem_cgroup {
202 	struct cgroup_subsys_state css;
203 
204 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
205 	struct mem_cgroup_id id;
206 
207 	/* Accounted resources */
208 	struct page_counter memory;
209 	struct page_counter swap;
210 
211 	/* Legacy consumer-oriented counters */
212 	struct page_counter memsw;
213 	struct page_counter kmem;
214 	struct page_counter tcpmem;
215 
216 	/* Range enforcement for interrupt charges */
217 	struct work_struct high_work;
218 
219 	unsigned long soft_limit;
220 
221 	/* vmpressure notifications */
222 	struct vmpressure vmpressure;
223 
224 	/*
225 	 * Should the accounting and control be hierarchical, per subtree?
226 	 */
227 	bool use_hierarchy;
228 
229 	/*
230 	 * Should the OOM killer kill all belonging tasks, had it kill one?
231 	 */
232 	bool oom_group;
233 
234 	/* protected by memcg_oom_lock */
235 	bool		oom_lock;
236 	int		under_oom;
237 
238 	int	swappiness;
239 	/* OOM-Killer disable */
240 	int		oom_kill_disable;
241 
242 	/* memory.events and memory.events.local */
243 	struct cgroup_file events_file;
244 	struct cgroup_file events_local_file;
245 
246 	/* handle for "memory.swap.events" */
247 	struct cgroup_file swap_events_file;
248 
249 	/* protect arrays of thresholds */
250 	struct mutex thresholds_lock;
251 
252 	/* thresholds for memory usage. RCU-protected */
253 	struct mem_cgroup_thresholds thresholds;
254 
255 	/* thresholds for mem+swap usage. RCU-protected */
256 	struct mem_cgroup_thresholds memsw_thresholds;
257 
258 	/* For oom notifier event fd */
259 	struct list_head oom_notify;
260 
261 	/*
262 	 * Should we move charges of a task when a task is moved into this
263 	 * mem_cgroup ? And what type of charges should we move ?
264 	 */
265 	unsigned long move_charge_at_immigrate;
266 	/* taken only while moving_account > 0 */
267 	spinlock_t		move_lock;
268 	unsigned long		move_lock_flags;
269 
270 	MEMCG_PADDING(_pad1_);
271 
272 	/*
273 	 * set > 0 if pages under this cgroup are moving to other cgroup.
274 	 */
275 	atomic_t		moving_account;
276 	struct task_struct	*move_lock_task;
277 
278 	/* Legacy local VM stats and events */
279 	struct memcg_vmstats_percpu __percpu *vmstats_local;
280 
281 	/* Subtree VM stats and events (batched updates) */
282 	struct memcg_vmstats_percpu __percpu *vmstats_percpu;
283 
284 	MEMCG_PADDING(_pad2_);
285 
286 	atomic_long_t		vmstats[MEMCG_NR_STAT];
287 	atomic_long_t		vmevents[NR_VM_EVENT_ITEMS];
288 
289 	/* memory.events */
290 	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
291 	atomic_long_t		memory_events_local[MEMCG_NR_MEMORY_EVENTS];
292 
293 	unsigned long		socket_pressure;
294 
295 	/* Legacy tcp memory accounting */
296 	bool			tcpmem_active;
297 	int			tcpmem_pressure;
298 
299 #ifdef CONFIG_MEMCG_KMEM
300         /* Index in the kmem_cache->memcg_params.memcg_caches array */
301 	int kmemcg_id;
302 	enum memcg_kmem_state kmem_state;
303 	struct list_head kmem_caches;
304 #endif
305 
306 #ifdef CONFIG_CGROUP_WRITEBACK
307 	struct list_head cgwb_list;
308 	struct wb_domain cgwb_domain;
309 	struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
310 #endif
311 
312 	/* List of events which userspace want to receive */
313 	struct list_head event_list;
314 	spinlock_t event_list_lock;
315 
316 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
317 	struct deferred_split deferred_split_queue;
318 #endif
319 
320 	struct mem_cgroup_per_node *nodeinfo[0];
321 	/* WARNING: nodeinfo must be the last member here */
322 };
323 
324 /*
325  * size of first charge trial. "32" comes from vmscan.c's magic value.
326  * TODO: maybe necessary to use big numbers in big irons.
327  */
328 #define MEMCG_CHARGE_BATCH 32U
329 
330 extern struct mem_cgroup *root_mem_cgroup;
331 
332 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
333 {
334 	return (memcg == root_mem_cgroup);
335 }
336 
337 static inline bool mem_cgroup_disabled(void)
338 {
339 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
340 }
341 
342 static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
343 						  bool in_low_reclaim)
344 {
345 	if (mem_cgroup_disabled())
346 		return 0;
347 
348 	if (in_low_reclaim)
349 		return READ_ONCE(memcg->memory.emin);
350 
351 	return max(READ_ONCE(memcg->memory.emin),
352 		   READ_ONCE(memcg->memory.elow));
353 }
354 
355 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
356 						struct mem_cgroup *memcg);
357 
358 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
359 
360 void mem_cgroup_uncharge(struct page *page);
361 void mem_cgroup_uncharge_list(struct list_head *page_list);
362 
363 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
364 
365 static struct mem_cgroup_per_node *
366 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
367 {
368 	return memcg->nodeinfo[nid];
369 }
370 
371 /**
372  * mem_cgroup_lruvec - get the lru list vector for a memcg & node
373  * @memcg: memcg of the wanted lruvec
374  *
375  * Returns the lru list vector holding pages for a given @memcg &
376  * @node combination. This can be the node lruvec, if the memory
377  * controller is disabled.
378  */
379 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
380 					       struct pglist_data *pgdat)
381 {
382 	struct mem_cgroup_per_node *mz;
383 	struct lruvec *lruvec;
384 
385 	if (mem_cgroup_disabled()) {
386 		lruvec = &pgdat->__lruvec;
387 		goto out;
388 	}
389 
390 	if (!memcg)
391 		memcg = root_mem_cgroup;
392 
393 	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
394 	lruvec = &mz->lruvec;
395 out:
396 	/*
397 	 * Since a node can be onlined after the mem_cgroup was created,
398 	 * we have to be prepared to initialize lruvec->pgdat here;
399 	 * and if offlined then reonlined, we need to reinitialize it.
400 	 */
401 	if (unlikely(lruvec->pgdat != pgdat))
402 		lruvec->pgdat = pgdat;
403 	return lruvec;
404 }
405 
406 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
407 
408 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
409 
410 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
411 
412 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
413 
414 static inline
415 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
416 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
417 }
418 
419 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
420 {
421 	if (memcg)
422 		css_put(&memcg->css);
423 }
424 
425 #define mem_cgroup_from_counter(counter, member)	\
426 	container_of(counter, struct mem_cgroup, member)
427 
428 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
429 				   struct mem_cgroup *,
430 				   struct mem_cgroup_reclaim_cookie *);
431 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
432 int mem_cgroup_scan_tasks(struct mem_cgroup *,
433 			  int (*)(struct task_struct *, void *), void *);
434 
435 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
436 {
437 	if (mem_cgroup_disabled())
438 		return 0;
439 
440 	return memcg->id.id;
441 }
442 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
443 
444 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
445 {
446 	return mem_cgroup_from_css(seq_css(m));
447 }
448 
449 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
450 {
451 	struct mem_cgroup_per_node *mz;
452 
453 	if (mem_cgroup_disabled())
454 		return NULL;
455 
456 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
457 	return mz->memcg;
458 }
459 
460 /**
461  * parent_mem_cgroup - find the accounting parent of a memcg
462  * @memcg: memcg whose parent to find
463  *
464  * Returns the parent memcg, or NULL if this is the root or the memory
465  * controller is in legacy no-hierarchy mode.
466  */
467 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
468 {
469 	if (!memcg->memory.parent)
470 		return NULL;
471 	return mem_cgroup_from_counter(memcg->memory.parent, memory);
472 }
473 
474 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
475 			      struct mem_cgroup *root)
476 {
477 	if (root == memcg)
478 		return true;
479 	if (!root->use_hierarchy)
480 		return false;
481 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
482 }
483 
484 static inline bool mm_match_cgroup(struct mm_struct *mm,
485 				   struct mem_cgroup *memcg)
486 {
487 	struct mem_cgroup *task_memcg;
488 	bool match = false;
489 
490 	rcu_read_lock();
491 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
492 	if (task_memcg)
493 		match = mem_cgroup_is_descendant(task_memcg, memcg);
494 	rcu_read_unlock();
495 	return match;
496 }
497 
498 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
499 ino_t page_cgroup_ino(struct page *page);
500 
501 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
502 {
503 	if (mem_cgroup_disabled())
504 		return true;
505 	return !!(memcg->css.flags & CSS_ONLINE);
506 }
507 
508 /*
509  * For memory reclaim.
510  */
511 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
512 
513 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
514 		int zid, int nr_pages);
515 
516 static inline
517 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
518 		enum lru_list lru, int zone_idx)
519 {
520 	struct mem_cgroup_per_node *mz;
521 
522 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
523 	return mz->lru_zone_size[zone_idx][lru];
524 }
525 
526 void mem_cgroup_handle_over_high(void);
527 
528 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
529 
530 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
531 
532 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
533 				struct task_struct *p);
534 
535 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
536 
537 static inline void mem_cgroup_enter_user_fault(void)
538 {
539 	WARN_ON(current->in_user_fault);
540 	current->in_user_fault = 1;
541 }
542 
543 static inline void mem_cgroup_exit_user_fault(void)
544 {
545 	WARN_ON(!current->in_user_fault);
546 	current->in_user_fault = 0;
547 }
548 
549 static inline bool task_in_memcg_oom(struct task_struct *p)
550 {
551 	return p->memcg_in_oom;
552 }
553 
554 bool mem_cgroup_oom_synchronize(bool wait);
555 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
556 					    struct mem_cgroup *oom_domain);
557 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
558 
559 #ifdef CONFIG_MEMCG_SWAP
560 extern bool cgroup_memory_noswap;
561 #endif
562 
563 struct mem_cgroup *lock_page_memcg(struct page *page);
564 void __unlock_page_memcg(struct mem_cgroup *memcg);
565 void unlock_page_memcg(struct page *page);
566 
567 /*
568  * idx can be of type enum memcg_stat_item or node_stat_item.
569  * Keep in sync with memcg_exact_page_state().
570  */
571 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
572 {
573 	long x = atomic_long_read(&memcg->vmstats[idx]);
574 #ifdef CONFIG_SMP
575 	if (x < 0)
576 		x = 0;
577 #endif
578 	return x;
579 }
580 
581 /*
582  * idx can be of type enum memcg_stat_item or node_stat_item.
583  * Keep in sync with memcg_exact_page_state().
584  */
585 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
586 						   int idx)
587 {
588 	long x = 0;
589 	int cpu;
590 
591 	for_each_possible_cpu(cpu)
592 		x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
593 #ifdef CONFIG_SMP
594 	if (x < 0)
595 		x = 0;
596 #endif
597 	return x;
598 }
599 
600 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
601 
602 /* idx can be of type enum memcg_stat_item or node_stat_item */
603 static inline void mod_memcg_state(struct mem_cgroup *memcg,
604 				   int idx, int val)
605 {
606 	unsigned long flags;
607 
608 	local_irq_save(flags);
609 	__mod_memcg_state(memcg, idx, val);
610 	local_irq_restore(flags);
611 }
612 
613 /**
614  * mod_memcg_page_state - update page state statistics
615  * @page: the page
616  * @idx: page state item to account
617  * @val: number of pages (positive or negative)
618  *
619  * The @page must be locked or the caller must use lock_page_memcg()
620  * to prevent double accounting when the page is concurrently being
621  * moved to another memcg:
622  *
623  *   lock_page(page) or lock_page_memcg(page)
624  *   if (TestClearPageState(page))
625  *     mod_memcg_page_state(page, state, -1);
626  *   unlock_page(page) or unlock_page_memcg(page)
627  *
628  * Kernel pages are an exception to this, since they'll never move.
629  */
630 static inline void __mod_memcg_page_state(struct page *page,
631 					  int idx, int val)
632 {
633 	if (page->mem_cgroup)
634 		__mod_memcg_state(page->mem_cgroup, idx, val);
635 }
636 
637 static inline void mod_memcg_page_state(struct page *page,
638 					int idx, int val)
639 {
640 	if (page->mem_cgroup)
641 		mod_memcg_state(page->mem_cgroup, idx, val);
642 }
643 
644 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
645 					      enum node_stat_item idx)
646 {
647 	struct mem_cgroup_per_node *pn;
648 	long x;
649 
650 	if (mem_cgroup_disabled())
651 		return node_page_state(lruvec_pgdat(lruvec), idx);
652 
653 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
654 	x = atomic_long_read(&pn->lruvec_stat[idx]);
655 #ifdef CONFIG_SMP
656 	if (x < 0)
657 		x = 0;
658 #endif
659 	return x;
660 }
661 
662 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
663 						    enum node_stat_item idx)
664 {
665 	struct mem_cgroup_per_node *pn;
666 	long x = 0;
667 	int cpu;
668 
669 	if (mem_cgroup_disabled())
670 		return node_page_state(lruvec_pgdat(lruvec), idx);
671 
672 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
673 	for_each_possible_cpu(cpu)
674 		x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
675 #ifdef CONFIG_SMP
676 	if (x < 0)
677 		x = 0;
678 #endif
679 	return x;
680 }
681 
682 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
683 			int val);
684 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
685 void mod_memcg_obj_state(void *p, int idx, int val);
686 
687 static inline void mod_lruvec_state(struct lruvec *lruvec,
688 				    enum node_stat_item idx, int val)
689 {
690 	unsigned long flags;
691 
692 	local_irq_save(flags);
693 	__mod_lruvec_state(lruvec, idx, val);
694 	local_irq_restore(flags);
695 }
696 
697 static inline void __mod_lruvec_page_state(struct page *page,
698 					   enum node_stat_item idx, int val)
699 {
700 	struct page *head = compound_head(page); /* rmap on tail pages */
701 	pg_data_t *pgdat = page_pgdat(page);
702 	struct lruvec *lruvec;
703 
704 	/* Untracked pages have no memcg, no lruvec. Update only the node */
705 	if (!head->mem_cgroup) {
706 		__mod_node_page_state(pgdat, idx, val);
707 		return;
708 	}
709 
710 	lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat);
711 	__mod_lruvec_state(lruvec, idx, val);
712 }
713 
714 static inline void mod_lruvec_page_state(struct page *page,
715 					 enum node_stat_item idx, int val)
716 {
717 	unsigned long flags;
718 
719 	local_irq_save(flags);
720 	__mod_lruvec_page_state(page, idx, val);
721 	local_irq_restore(flags);
722 }
723 
724 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
725 						gfp_t gfp_mask,
726 						unsigned long *total_scanned);
727 
728 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
729 			  unsigned long count);
730 
731 static inline void count_memcg_events(struct mem_cgroup *memcg,
732 				      enum vm_event_item idx,
733 				      unsigned long count)
734 {
735 	unsigned long flags;
736 
737 	local_irq_save(flags);
738 	__count_memcg_events(memcg, idx, count);
739 	local_irq_restore(flags);
740 }
741 
742 static inline void count_memcg_page_event(struct page *page,
743 					  enum vm_event_item idx)
744 {
745 	if (page->mem_cgroup)
746 		count_memcg_events(page->mem_cgroup, idx, 1);
747 }
748 
749 static inline void count_memcg_event_mm(struct mm_struct *mm,
750 					enum vm_event_item idx)
751 {
752 	struct mem_cgroup *memcg;
753 
754 	if (mem_cgroup_disabled())
755 		return;
756 
757 	rcu_read_lock();
758 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
759 	if (likely(memcg))
760 		count_memcg_events(memcg, idx, 1);
761 	rcu_read_unlock();
762 }
763 
764 static inline void memcg_memory_event(struct mem_cgroup *memcg,
765 				      enum memcg_memory_event event)
766 {
767 	atomic_long_inc(&memcg->memory_events_local[event]);
768 	cgroup_file_notify(&memcg->events_local_file);
769 
770 	do {
771 		atomic_long_inc(&memcg->memory_events[event]);
772 		cgroup_file_notify(&memcg->events_file);
773 
774 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
775 			break;
776 		if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
777 			break;
778 	} while ((memcg = parent_mem_cgroup(memcg)) &&
779 		 !mem_cgroup_is_root(memcg));
780 }
781 
782 static inline void memcg_memory_event_mm(struct mm_struct *mm,
783 					 enum memcg_memory_event event)
784 {
785 	struct mem_cgroup *memcg;
786 
787 	if (mem_cgroup_disabled())
788 		return;
789 
790 	rcu_read_lock();
791 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
792 	if (likely(memcg))
793 		memcg_memory_event(memcg, event);
794 	rcu_read_unlock();
795 }
796 
797 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
798 void mem_cgroup_split_huge_fixup(struct page *head);
799 #endif
800 
801 #else /* CONFIG_MEMCG */
802 
803 #define MEM_CGROUP_ID_SHIFT	0
804 #define MEM_CGROUP_ID_MAX	0
805 
806 struct mem_cgroup;
807 
808 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
809 {
810 	return true;
811 }
812 
813 static inline bool mem_cgroup_disabled(void)
814 {
815 	return true;
816 }
817 
818 static inline void memcg_memory_event(struct mem_cgroup *memcg,
819 				      enum memcg_memory_event event)
820 {
821 }
822 
823 static inline void memcg_memory_event_mm(struct mm_struct *mm,
824 					 enum memcg_memory_event event)
825 {
826 }
827 
828 static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
829 						  bool in_low_reclaim)
830 {
831 	return 0;
832 }
833 
834 static inline enum mem_cgroup_protection mem_cgroup_protected(
835 	struct mem_cgroup *root, struct mem_cgroup *memcg)
836 {
837 	return MEMCG_PROT_NONE;
838 }
839 
840 static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
841 				    gfp_t gfp_mask)
842 {
843 	return 0;
844 }
845 
846 static inline void mem_cgroup_uncharge(struct page *page)
847 {
848 }
849 
850 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
851 {
852 }
853 
854 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
855 {
856 }
857 
858 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
859 					       struct pglist_data *pgdat)
860 {
861 	return &pgdat->__lruvec;
862 }
863 
864 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
865 						    struct pglist_data *pgdat)
866 {
867 	return &pgdat->__lruvec;
868 }
869 
870 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
871 {
872 	return NULL;
873 }
874 
875 static inline bool mm_match_cgroup(struct mm_struct *mm,
876 		struct mem_cgroup *memcg)
877 {
878 	return true;
879 }
880 
881 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
882 {
883 	return NULL;
884 }
885 
886 static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
887 {
888 	return NULL;
889 }
890 
891 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
892 {
893 }
894 
895 static inline struct mem_cgroup *
896 mem_cgroup_iter(struct mem_cgroup *root,
897 		struct mem_cgroup *prev,
898 		struct mem_cgroup_reclaim_cookie *reclaim)
899 {
900 	return NULL;
901 }
902 
903 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
904 					 struct mem_cgroup *prev)
905 {
906 }
907 
908 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
909 		int (*fn)(struct task_struct *, void *), void *arg)
910 {
911 	return 0;
912 }
913 
914 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
915 {
916 	return 0;
917 }
918 
919 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
920 {
921 	WARN_ON_ONCE(id);
922 	/* XXX: This should always return root_mem_cgroup */
923 	return NULL;
924 }
925 
926 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
927 {
928 	return NULL;
929 }
930 
931 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
932 {
933 	return NULL;
934 }
935 
936 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
937 {
938 	return true;
939 }
940 
941 static inline
942 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
943 		enum lru_list lru, int zone_idx)
944 {
945 	return 0;
946 }
947 
948 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
949 {
950 	return 0;
951 }
952 
953 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
954 {
955 	return 0;
956 }
957 
958 static inline void
959 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
960 {
961 }
962 
963 static inline void
964 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
965 {
966 }
967 
968 static inline struct mem_cgroup *lock_page_memcg(struct page *page)
969 {
970 	return NULL;
971 }
972 
973 static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
974 {
975 }
976 
977 static inline void unlock_page_memcg(struct page *page)
978 {
979 }
980 
981 static inline void mem_cgroup_handle_over_high(void)
982 {
983 }
984 
985 static inline void mem_cgroup_enter_user_fault(void)
986 {
987 }
988 
989 static inline void mem_cgroup_exit_user_fault(void)
990 {
991 }
992 
993 static inline bool task_in_memcg_oom(struct task_struct *p)
994 {
995 	return false;
996 }
997 
998 static inline bool mem_cgroup_oom_synchronize(bool wait)
999 {
1000 	return false;
1001 }
1002 
1003 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1004 	struct task_struct *victim, struct mem_cgroup *oom_domain)
1005 {
1006 	return NULL;
1007 }
1008 
1009 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1010 {
1011 }
1012 
1013 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1014 {
1015 	return 0;
1016 }
1017 
1018 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1019 						   int idx)
1020 {
1021 	return 0;
1022 }
1023 
1024 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1025 				     int idx,
1026 				     int nr)
1027 {
1028 }
1029 
1030 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1031 				   int idx,
1032 				   int nr)
1033 {
1034 }
1035 
1036 static inline void __mod_memcg_page_state(struct page *page,
1037 					  int idx,
1038 					  int nr)
1039 {
1040 }
1041 
1042 static inline void mod_memcg_page_state(struct page *page,
1043 					int idx,
1044 					int nr)
1045 {
1046 }
1047 
1048 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1049 					      enum node_stat_item idx)
1050 {
1051 	return node_page_state(lruvec_pgdat(lruvec), idx);
1052 }
1053 
1054 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1055 						    enum node_stat_item idx)
1056 {
1057 	return node_page_state(lruvec_pgdat(lruvec), idx);
1058 }
1059 
1060 static inline void __mod_lruvec_state(struct lruvec *lruvec,
1061 				      enum node_stat_item idx, int val)
1062 {
1063 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1064 }
1065 
1066 static inline void mod_lruvec_state(struct lruvec *lruvec,
1067 				    enum node_stat_item idx, int val)
1068 {
1069 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1070 }
1071 
1072 static inline void __mod_lruvec_page_state(struct page *page,
1073 					   enum node_stat_item idx, int val)
1074 {
1075 	__mod_node_page_state(page_pgdat(page), idx, val);
1076 }
1077 
1078 static inline void mod_lruvec_page_state(struct page *page,
1079 					 enum node_stat_item idx, int val)
1080 {
1081 	mod_node_page_state(page_pgdat(page), idx, val);
1082 }
1083 
1084 static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1085 					   int val)
1086 {
1087 	struct page *page = virt_to_head_page(p);
1088 
1089 	__mod_node_page_state(page_pgdat(page), idx, val);
1090 }
1091 
1092 static inline void mod_memcg_obj_state(void *p, int idx, int val)
1093 {
1094 }
1095 
1096 static inline
1097 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1098 					    gfp_t gfp_mask,
1099 					    unsigned long *total_scanned)
1100 {
1101 	return 0;
1102 }
1103 
1104 static inline void mem_cgroup_split_huge_fixup(struct page *head)
1105 {
1106 }
1107 
1108 static inline void count_memcg_events(struct mem_cgroup *memcg,
1109 				      enum vm_event_item idx,
1110 				      unsigned long count)
1111 {
1112 }
1113 
1114 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1115 					enum vm_event_item idx,
1116 					unsigned long count)
1117 {
1118 }
1119 
1120 static inline void count_memcg_page_event(struct page *page,
1121 					  int idx)
1122 {
1123 }
1124 
1125 static inline
1126 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1127 {
1128 }
1129 #endif /* CONFIG_MEMCG */
1130 
1131 /* idx can be of type enum memcg_stat_item or node_stat_item */
1132 static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1133 				     int idx)
1134 {
1135 	__mod_memcg_state(memcg, idx, 1);
1136 }
1137 
1138 /* idx can be of type enum memcg_stat_item or node_stat_item */
1139 static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1140 				     int idx)
1141 {
1142 	__mod_memcg_state(memcg, idx, -1);
1143 }
1144 
1145 /* idx can be of type enum memcg_stat_item or node_stat_item */
1146 static inline void __inc_memcg_page_state(struct page *page,
1147 					  int idx)
1148 {
1149 	__mod_memcg_page_state(page, idx, 1);
1150 }
1151 
1152 /* idx can be of type enum memcg_stat_item or node_stat_item */
1153 static inline void __dec_memcg_page_state(struct page *page,
1154 					  int idx)
1155 {
1156 	__mod_memcg_page_state(page, idx, -1);
1157 }
1158 
1159 static inline void __inc_lruvec_state(struct lruvec *lruvec,
1160 				      enum node_stat_item idx)
1161 {
1162 	__mod_lruvec_state(lruvec, idx, 1);
1163 }
1164 
1165 static inline void __dec_lruvec_state(struct lruvec *lruvec,
1166 				      enum node_stat_item idx)
1167 {
1168 	__mod_lruvec_state(lruvec, idx, -1);
1169 }
1170 
1171 static inline void __inc_lruvec_page_state(struct page *page,
1172 					   enum node_stat_item idx)
1173 {
1174 	__mod_lruvec_page_state(page, idx, 1);
1175 }
1176 
1177 static inline void __dec_lruvec_page_state(struct page *page,
1178 					   enum node_stat_item idx)
1179 {
1180 	__mod_lruvec_page_state(page, idx, -1);
1181 }
1182 
1183 static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1184 {
1185 	__mod_lruvec_slab_state(p, idx, 1);
1186 }
1187 
1188 static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1189 {
1190 	__mod_lruvec_slab_state(p, idx, -1);
1191 }
1192 
1193 /* idx can be of type enum memcg_stat_item or node_stat_item */
1194 static inline void inc_memcg_state(struct mem_cgroup *memcg,
1195 				   int idx)
1196 {
1197 	mod_memcg_state(memcg, idx, 1);
1198 }
1199 
1200 /* idx can be of type enum memcg_stat_item or node_stat_item */
1201 static inline void dec_memcg_state(struct mem_cgroup *memcg,
1202 				   int idx)
1203 {
1204 	mod_memcg_state(memcg, idx, -1);
1205 }
1206 
1207 /* idx can be of type enum memcg_stat_item or node_stat_item */
1208 static inline void inc_memcg_page_state(struct page *page,
1209 					int idx)
1210 {
1211 	mod_memcg_page_state(page, idx, 1);
1212 }
1213 
1214 /* idx can be of type enum memcg_stat_item or node_stat_item */
1215 static inline void dec_memcg_page_state(struct page *page,
1216 					int idx)
1217 {
1218 	mod_memcg_page_state(page, idx, -1);
1219 }
1220 
1221 static inline void inc_lruvec_state(struct lruvec *lruvec,
1222 				    enum node_stat_item idx)
1223 {
1224 	mod_lruvec_state(lruvec, idx, 1);
1225 }
1226 
1227 static inline void dec_lruvec_state(struct lruvec *lruvec,
1228 				    enum node_stat_item idx)
1229 {
1230 	mod_lruvec_state(lruvec, idx, -1);
1231 }
1232 
1233 static inline void inc_lruvec_page_state(struct page *page,
1234 					 enum node_stat_item idx)
1235 {
1236 	mod_lruvec_page_state(page, idx, 1);
1237 }
1238 
1239 static inline void dec_lruvec_page_state(struct page *page,
1240 					 enum node_stat_item idx)
1241 {
1242 	mod_lruvec_page_state(page, idx, -1);
1243 }
1244 
1245 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1246 {
1247 	struct mem_cgroup *memcg;
1248 
1249 	memcg = lruvec_memcg(lruvec);
1250 	if (!memcg)
1251 		return NULL;
1252 	memcg = parent_mem_cgroup(memcg);
1253 	if (!memcg)
1254 		return NULL;
1255 	return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1256 }
1257 
1258 #ifdef CONFIG_CGROUP_WRITEBACK
1259 
1260 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1261 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1262 			 unsigned long *pheadroom, unsigned long *pdirty,
1263 			 unsigned long *pwriteback);
1264 
1265 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1266 					     struct bdi_writeback *wb);
1267 
1268 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1269 						  struct bdi_writeback *wb)
1270 {
1271 	if (mem_cgroup_disabled())
1272 		return;
1273 
1274 	if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
1275 		mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1276 }
1277 
1278 void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1279 
1280 #else	/* CONFIG_CGROUP_WRITEBACK */
1281 
1282 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1283 {
1284 	return NULL;
1285 }
1286 
1287 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1288 				       unsigned long *pfilepages,
1289 				       unsigned long *pheadroom,
1290 				       unsigned long *pdirty,
1291 				       unsigned long *pwriteback)
1292 {
1293 }
1294 
1295 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1296 						  struct bdi_writeback *wb)
1297 {
1298 }
1299 
1300 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1301 {
1302 }
1303 
1304 #endif	/* CONFIG_CGROUP_WRITEBACK */
1305 
1306 struct sock;
1307 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1308 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1309 #ifdef CONFIG_MEMCG
1310 extern struct static_key_false memcg_sockets_enabled_key;
1311 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1312 void mem_cgroup_sk_alloc(struct sock *sk);
1313 void mem_cgroup_sk_free(struct sock *sk);
1314 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1315 {
1316 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1317 		return true;
1318 	do {
1319 		if (time_before(jiffies, memcg->socket_pressure))
1320 			return true;
1321 	} while ((memcg = parent_mem_cgroup(memcg)));
1322 	return false;
1323 }
1324 
1325 extern int memcg_expand_shrinker_maps(int new_id);
1326 
1327 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1328 				   int nid, int shrinker_id);
1329 #else
1330 #define mem_cgroup_sockets_enabled 0
1331 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1332 static inline void mem_cgroup_sk_free(struct sock *sk) { };
1333 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1334 {
1335 	return false;
1336 }
1337 
1338 static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1339 					  int nid, int shrinker_id)
1340 {
1341 }
1342 #endif
1343 
1344 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1345 void memcg_kmem_put_cache(struct kmem_cache *cachep);
1346 
1347 #ifdef CONFIG_MEMCG_KMEM
1348 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1349 			unsigned int nr_pages);
1350 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
1351 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1352 void __memcg_kmem_uncharge_page(struct page *page, int order);
1353 
1354 extern struct static_key_false memcg_kmem_enabled_key;
1355 extern struct workqueue_struct *memcg_kmem_cache_wq;
1356 
1357 extern int memcg_nr_cache_ids;
1358 void memcg_get_cache_ids(void);
1359 void memcg_put_cache_ids(void);
1360 
1361 /*
1362  * Helper macro to loop through all memcg-specific caches. Callers must still
1363  * check if the cache is valid (it is either valid or NULL).
1364  * the slab_mutex must be held when looping through those caches
1365  */
1366 #define for_each_memcg_cache_index(_idx)	\
1367 	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1368 
1369 static inline bool memcg_kmem_enabled(void)
1370 {
1371 	return static_branch_unlikely(&memcg_kmem_enabled_key);
1372 }
1373 
1374 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1375 					 int order)
1376 {
1377 	if (memcg_kmem_enabled())
1378 		return __memcg_kmem_charge_page(page, gfp, order);
1379 	return 0;
1380 }
1381 
1382 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1383 {
1384 	if (memcg_kmem_enabled())
1385 		__memcg_kmem_uncharge_page(page, order);
1386 }
1387 
1388 static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1389 				    unsigned int nr_pages)
1390 {
1391 	if (memcg_kmem_enabled())
1392 		return __memcg_kmem_charge(memcg, gfp, nr_pages);
1393 	return 0;
1394 }
1395 
1396 static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
1397 				       unsigned int nr_pages)
1398 {
1399 	if (memcg_kmem_enabled())
1400 		__memcg_kmem_uncharge(memcg, nr_pages);
1401 }
1402 
1403 /*
1404  * helper for accessing a memcg's index. It will be used as an index in the
1405  * child cache array in kmem_cache, and also to derive its name. This function
1406  * will return -1 when this is not a kmem-limited memcg.
1407  */
1408 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1409 {
1410 	return memcg ? memcg->kmemcg_id : -1;
1411 }
1412 
1413 struct mem_cgroup *mem_cgroup_from_obj(void *p);
1414 
1415 #else
1416 
1417 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1418 					 int order)
1419 {
1420 	return 0;
1421 }
1422 
1423 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1424 {
1425 }
1426 
1427 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1428 					   int order)
1429 {
1430 	return 0;
1431 }
1432 
1433 static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1434 {
1435 }
1436 
1437 #define for_each_memcg_cache_index(_idx)	\
1438 	for (; NULL; )
1439 
1440 static inline bool memcg_kmem_enabled(void)
1441 {
1442 	return false;
1443 }
1444 
1445 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1446 {
1447 	return -1;
1448 }
1449 
1450 static inline void memcg_get_cache_ids(void)
1451 {
1452 }
1453 
1454 static inline void memcg_put_cache_ids(void)
1455 {
1456 }
1457 
1458 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1459 {
1460        return NULL;
1461 }
1462 
1463 #endif /* CONFIG_MEMCG_KMEM */
1464 
1465 #endif /* _LINUX_MEMCONTROL_H */
1466