xref: /linux-6.15/include/linux/memcontrol.h (revision cfd6ed45)
1 /* memcontrol.h - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <[email protected]>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 #include <linux/hardirq.h>
25 #include <linux/jump_label.h>
26 #include <linux/page_counter.h>
27 #include <linux/vmpressure.h>
28 #include <linux/eventfd.h>
29 #include <linux/mmzone.h>
30 #include <linux/writeback.h>
31 #include <linux/page-flags.h>
32 
33 struct mem_cgroup;
34 struct page;
35 struct mm_struct;
36 struct kmem_cache;
37 
38 /*
39  * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
40  * These two lists should keep in accord with each other.
41  */
42 enum mem_cgroup_stat_index {
43 	/*
44 	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
45 	 */
46 	MEM_CGROUP_STAT_CACHE,		/* # of pages charged as cache */
47 	MEM_CGROUP_STAT_RSS,		/* # of pages charged as anon rss */
48 	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
49 	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
50 	MEM_CGROUP_STAT_DIRTY,          /* # of dirty pages in page cache */
51 	MEM_CGROUP_STAT_WRITEBACK,	/* # of pages under writeback */
52 	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
53 	MEM_CGROUP_STAT_NSTATS,
54 	/* default hierarchy stats */
55 	MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
56 	MEMCG_SLAB_RECLAIMABLE,
57 	MEMCG_SLAB_UNRECLAIMABLE,
58 	MEMCG_SOCK,
59 	MEMCG_NR_STAT,
60 };
61 
62 struct mem_cgroup_reclaim_cookie {
63 	pg_data_t *pgdat;
64 	int priority;
65 	unsigned int generation;
66 };
67 
68 enum mem_cgroup_events_index {
69 	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
70 	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
71 	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
72 	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
73 	MEM_CGROUP_EVENTS_NSTATS,
74 	/* default hierarchy events */
75 	MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
76 	MEMCG_HIGH,
77 	MEMCG_MAX,
78 	MEMCG_OOM,
79 	MEMCG_NR_EVENTS,
80 };
81 
82 /*
83  * Per memcg event counter is incremented at every pagein/pageout. With THP,
84  * it will be incremated by the number of pages. This counter is used for
85  * for trigger some periodic events. This is straightforward and better
86  * than using jiffies etc. to handle periodic memcg event.
87  */
88 enum mem_cgroup_events_target {
89 	MEM_CGROUP_TARGET_THRESH,
90 	MEM_CGROUP_TARGET_SOFTLIMIT,
91 	MEM_CGROUP_TARGET_NUMAINFO,
92 	MEM_CGROUP_NTARGETS,
93 };
94 
95 #ifdef CONFIG_MEMCG
96 
97 #define MEM_CGROUP_ID_SHIFT	16
98 #define MEM_CGROUP_ID_MAX	USHRT_MAX
99 
100 struct mem_cgroup_id {
101 	int id;
102 	atomic_t ref;
103 };
104 
105 struct mem_cgroup_stat_cpu {
106 	long count[MEMCG_NR_STAT];
107 	unsigned long events[MEMCG_NR_EVENTS];
108 	unsigned long nr_page_events;
109 	unsigned long targets[MEM_CGROUP_NTARGETS];
110 };
111 
112 struct mem_cgroup_reclaim_iter {
113 	struct mem_cgroup *position;
114 	/* scan generation, increased every round-trip */
115 	unsigned int generation;
116 };
117 
118 /*
119  * per-zone information in memory controller.
120  */
121 struct mem_cgroup_per_node {
122 	struct lruvec		lruvec;
123 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
124 
125 	struct mem_cgroup_reclaim_iter	iter[DEF_PRIORITY + 1];
126 
127 	struct rb_node		tree_node;	/* RB tree node */
128 	unsigned long		usage_in_excess;/* Set to the value by which */
129 						/* the soft limit is exceeded*/
130 	bool			on_tree;
131 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
132 						/* use container_of	   */
133 };
134 
135 struct mem_cgroup_threshold {
136 	struct eventfd_ctx *eventfd;
137 	unsigned long threshold;
138 };
139 
140 /* For threshold */
141 struct mem_cgroup_threshold_ary {
142 	/* An array index points to threshold just below or equal to usage. */
143 	int current_threshold;
144 	/* Size of entries[] */
145 	unsigned int size;
146 	/* Array of thresholds */
147 	struct mem_cgroup_threshold entries[0];
148 };
149 
150 struct mem_cgroup_thresholds {
151 	/* Primary thresholds array */
152 	struct mem_cgroup_threshold_ary *primary;
153 	/*
154 	 * Spare threshold array.
155 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
156 	 * It must be able to store at least primary->size - 1 entries.
157 	 */
158 	struct mem_cgroup_threshold_ary *spare;
159 };
160 
161 enum memcg_kmem_state {
162 	KMEM_NONE,
163 	KMEM_ALLOCATED,
164 	KMEM_ONLINE,
165 };
166 
167 /*
168  * The memory controller data structure. The memory controller controls both
169  * page cache and RSS per cgroup. We would eventually like to provide
170  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
171  * to help the administrator determine what knobs to tune.
172  */
173 struct mem_cgroup {
174 	struct cgroup_subsys_state css;
175 
176 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
177 	struct mem_cgroup_id id;
178 
179 	/* Accounted resources */
180 	struct page_counter memory;
181 	struct page_counter swap;
182 
183 	/* Legacy consumer-oriented counters */
184 	struct page_counter memsw;
185 	struct page_counter kmem;
186 	struct page_counter tcpmem;
187 
188 	/* Normal memory consumption range */
189 	unsigned long low;
190 	unsigned long high;
191 
192 	/* Range enforcement for interrupt charges */
193 	struct work_struct high_work;
194 
195 	unsigned long soft_limit;
196 
197 	/* vmpressure notifications */
198 	struct vmpressure vmpressure;
199 
200 	/*
201 	 * Should the accounting and control be hierarchical, per subtree?
202 	 */
203 	bool use_hierarchy;
204 
205 	/* protected by memcg_oom_lock */
206 	bool		oom_lock;
207 	int		under_oom;
208 
209 	int	swappiness;
210 	/* OOM-Killer disable */
211 	int		oom_kill_disable;
212 
213 	/* handle for "memory.events" */
214 	struct cgroup_file events_file;
215 
216 	/* protect arrays of thresholds */
217 	struct mutex thresholds_lock;
218 
219 	/* thresholds for memory usage. RCU-protected */
220 	struct mem_cgroup_thresholds thresholds;
221 
222 	/* thresholds for mem+swap usage. RCU-protected */
223 	struct mem_cgroup_thresholds memsw_thresholds;
224 
225 	/* For oom notifier event fd */
226 	struct list_head oom_notify;
227 
228 	/*
229 	 * Should we move charges of a task when a task is moved into this
230 	 * mem_cgroup ? And what type of charges should we move ?
231 	 */
232 	unsigned long move_charge_at_immigrate;
233 	/*
234 	 * set > 0 if pages under this cgroup are moving to other cgroup.
235 	 */
236 	atomic_t		moving_account;
237 	/* taken only while moving_account > 0 */
238 	spinlock_t		move_lock;
239 	struct task_struct	*move_lock_task;
240 	unsigned long		move_lock_flags;
241 	/*
242 	 * percpu counter.
243 	 */
244 	struct mem_cgroup_stat_cpu __percpu *stat;
245 
246 	unsigned long		socket_pressure;
247 
248 	/* Legacy tcp memory accounting */
249 	bool			tcpmem_active;
250 	int			tcpmem_pressure;
251 
252 #ifndef CONFIG_SLOB
253         /* Index in the kmem_cache->memcg_params.memcg_caches array */
254 	int kmemcg_id;
255 	enum memcg_kmem_state kmem_state;
256 	struct list_head kmem_caches;
257 #endif
258 
259 	int last_scanned_node;
260 #if MAX_NUMNODES > 1
261 	nodemask_t	scan_nodes;
262 	atomic_t	numainfo_events;
263 	atomic_t	numainfo_updating;
264 #endif
265 
266 #ifdef CONFIG_CGROUP_WRITEBACK
267 	struct list_head cgwb_list;
268 	struct wb_domain cgwb_domain;
269 #endif
270 
271 	/* List of events which userspace want to receive */
272 	struct list_head event_list;
273 	spinlock_t event_list_lock;
274 
275 	struct mem_cgroup_per_node *nodeinfo[0];
276 	/* WARNING: nodeinfo must be the last member here */
277 };
278 
279 extern struct mem_cgroup *root_mem_cgroup;
280 
281 static inline bool mem_cgroup_disabled(void)
282 {
283 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
284 }
285 
286 /**
287  * mem_cgroup_events - count memory events against a cgroup
288  * @memcg: the memory cgroup
289  * @idx: the event index
290  * @nr: the number of events to account for
291  */
292 static inline void mem_cgroup_events(struct mem_cgroup *memcg,
293 		       enum mem_cgroup_events_index idx,
294 		       unsigned int nr)
295 {
296 	this_cpu_add(memcg->stat->events[idx], nr);
297 	cgroup_file_notify(&memcg->events_file);
298 }
299 
300 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
301 
302 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
303 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
304 			  bool compound);
305 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
306 			      bool lrucare, bool compound);
307 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
308 		bool compound);
309 void mem_cgroup_uncharge(struct page *page);
310 void mem_cgroup_uncharge_list(struct list_head *page_list);
311 
312 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
313 
314 static struct mem_cgroup_per_node *
315 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
316 {
317 	return memcg->nodeinfo[nid];
318 }
319 
320 /**
321  * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
322  * @node: node of the wanted lruvec
323  * @memcg: memcg of the wanted lruvec
324  *
325  * Returns the lru list vector holding pages for a given @node or a given
326  * @memcg and @zone. This can be the node lruvec, if the memory controller
327  * is disabled.
328  */
329 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
330 				struct mem_cgroup *memcg)
331 {
332 	struct mem_cgroup_per_node *mz;
333 	struct lruvec *lruvec;
334 
335 	if (mem_cgroup_disabled()) {
336 		lruvec = node_lruvec(pgdat);
337 		goto out;
338 	}
339 
340 	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
341 	lruvec = &mz->lruvec;
342 out:
343 	/*
344 	 * Since a node can be onlined after the mem_cgroup was created,
345 	 * we have to be prepared to initialize lruvec->pgdat here;
346 	 * and if offlined then reonlined, we need to reinitialize it.
347 	 */
348 	if (unlikely(lruvec->pgdat != pgdat))
349 		lruvec->pgdat = pgdat;
350 	return lruvec;
351 }
352 
353 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
354 
355 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
356 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
357 
358 static inline
359 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
360 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
361 }
362 
363 #define mem_cgroup_from_counter(counter, member)	\
364 	container_of(counter, struct mem_cgroup, member)
365 
366 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
367 				   struct mem_cgroup *,
368 				   struct mem_cgroup_reclaim_cookie *);
369 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
370 int mem_cgroup_scan_tasks(struct mem_cgroup *,
371 			  int (*)(struct task_struct *, void *), void *);
372 
373 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
374 {
375 	if (mem_cgroup_disabled())
376 		return 0;
377 
378 	return memcg->id.id;
379 }
380 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
381 
382 /**
383  * parent_mem_cgroup - find the accounting parent of a memcg
384  * @memcg: memcg whose parent to find
385  *
386  * Returns the parent memcg, or NULL if this is the root or the memory
387  * controller is in legacy no-hierarchy mode.
388  */
389 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
390 {
391 	if (!memcg->memory.parent)
392 		return NULL;
393 	return mem_cgroup_from_counter(memcg->memory.parent, memory);
394 }
395 
396 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
397 			      struct mem_cgroup *root)
398 {
399 	if (root == memcg)
400 		return true;
401 	if (!root->use_hierarchy)
402 		return false;
403 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
404 }
405 
406 static inline bool mm_match_cgroup(struct mm_struct *mm,
407 				   struct mem_cgroup *memcg)
408 {
409 	struct mem_cgroup *task_memcg;
410 	bool match = false;
411 
412 	rcu_read_lock();
413 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
414 	if (task_memcg)
415 		match = mem_cgroup_is_descendant(task_memcg, memcg);
416 	rcu_read_unlock();
417 	return match;
418 }
419 
420 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
421 ino_t page_cgroup_ino(struct page *page);
422 
423 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
424 {
425 	if (mem_cgroup_disabled())
426 		return true;
427 	return !!(memcg->css.flags & CSS_ONLINE);
428 }
429 
430 /*
431  * For memory reclaim.
432  */
433 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
434 
435 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
436 		int zid, int nr_pages);
437 
438 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
439 					   int nid, unsigned int lru_mask);
440 
441 static inline
442 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
443 {
444 	struct mem_cgroup_per_node *mz;
445 	unsigned long nr_pages = 0;
446 	int zid;
447 
448 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
449 	for (zid = 0; zid < MAX_NR_ZONES; zid++)
450 		nr_pages += mz->lru_zone_size[zid][lru];
451 	return nr_pages;
452 }
453 
454 static inline
455 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
456 		enum lru_list lru, int zone_idx)
457 {
458 	struct mem_cgroup_per_node *mz;
459 
460 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
461 	return mz->lru_zone_size[zone_idx][lru];
462 }
463 
464 void mem_cgroup_handle_over_high(void);
465 
466 unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
467 
468 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
469 				struct task_struct *p);
470 
471 static inline void mem_cgroup_oom_enable(void)
472 {
473 	WARN_ON(current->memcg_may_oom);
474 	current->memcg_may_oom = 1;
475 }
476 
477 static inline void mem_cgroup_oom_disable(void)
478 {
479 	WARN_ON(!current->memcg_may_oom);
480 	current->memcg_may_oom = 0;
481 }
482 
483 static inline bool task_in_memcg_oom(struct task_struct *p)
484 {
485 	return p->memcg_in_oom;
486 }
487 
488 bool mem_cgroup_oom_synchronize(bool wait);
489 
490 #ifdef CONFIG_MEMCG_SWAP
491 extern int do_swap_account;
492 #endif
493 
494 void lock_page_memcg(struct page *page);
495 void unlock_page_memcg(struct page *page);
496 
497 /**
498  * mem_cgroup_update_page_stat - update page state statistics
499  * @page: the page
500  * @idx: page state item to account
501  * @val: number of pages (positive or negative)
502  *
503  * The @page must be locked or the caller must use lock_page_memcg()
504  * to prevent double accounting when the page is concurrently being
505  * moved to another memcg:
506  *
507  *   lock_page(page) or lock_page_memcg(page)
508  *   if (TestClearPageState(page))
509  *     mem_cgroup_update_page_stat(page, state, -1);
510  *   unlock_page(page) or unlock_page_memcg(page)
511  */
512 static inline void mem_cgroup_update_page_stat(struct page *page,
513 				 enum mem_cgroup_stat_index idx, int val)
514 {
515 	VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
516 
517 	if (page->mem_cgroup)
518 		this_cpu_add(page->mem_cgroup->stat->count[idx], val);
519 }
520 
521 static inline void mem_cgroup_inc_page_stat(struct page *page,
522 					    enum mem_cgroup_stat_index idx)
523 {
524 	mem_cgroup_update_page_stat(page, idx, 1);
525 }
526 
527 static inline void mem_cgroup_dec_page_stat(struct page *page,
528 					    enum mem_cgroup_stat_index idx)
529 {
530 	mem_cgroup_update_page_stat(page, idx, -1);
531 }
532 
533 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
534 						gfp_t gfp_mask,
535 						unsigned long *total_scanned);
536 
537 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
538 					     enum vm_event_item idx)
539 {
540 	struct mem_cgroup *memcg;
541 
542 	if (mem_cgroup_disabled())
543 		return;
544 
545 	rcu_read_lock();
546 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
547 	if (unlikely(!memcg))
548 		goto out;
549 
550 	switch (idx) {
551 	case PGFAULT:
552 		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
553 		break;
554 	case PGMAJFAULT:
555 		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
556 		break;
557 	default:
558 		BUG();
559 	}
560 out:
561 	rcu_read_unlock();
562 }
563 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
564 void mem_cgroup_split_huge_fixup(struct page *head);
565 #endif
566 
567 #else /* CONFIG_MEMCG */
568 
569 #define MEM_CGROUP_ID_SHIFT	0
570 #define MEM_CGROUP_ID_MAX	0
571 
572 struct mem_cgroup;
573 
574 static inline bool mem_cgroup_disabled(void)
575 {
576 	return true;
577 }
578 
579 static inline void mem_cgroup_events(struct mem_cgroup *memcg,
580 				     enum mem_cgroup_events_index idx,
581 				     unsigned int nr)
582 {
583 }
584 
585 static inline bool mem_cgroup_low(struct mem_cgroup *root,
586 				  struct mem_cgroup *memcg)
587 {
588 	return false;
589 }
590 
591 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
592 					gfp_t gfp_mask,
593 					struct mem_cgroup **memcgp,
594 					bool compound)
595 {
596 	*memcgp = NULL;
597 	return 0;
598 }
599 
600 static inline void mem_cgroup_commit_charge(struct page *page,
601 					    struct mem_cgroup *memcg,
602 					    bool lrucare, bool compound)
603 {
604 }
605 
606 static inline void mem_cgroup_cancel_charge(struct page *page,
607 					    struct mem_cgroup *memcg,
608 					    bool compound)
609 {
610 }
611 
612 static inline void mem_cgroup_uncharge(struct page *page)
613 {
614 }
615 
616 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
617 {
618 }
619 
620 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
621 {
622 }
623 
624 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
625 				struct mem_cgroup *memcg)
626 {
627 	return node_lruvec(pgdat);
628 }
629 
630 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
631 						    struct pglist_data *pgdat)
632 {
633 	return &pgdat->lruvec;
634 }
635 
636 static inline bool mm_match_cgroup(struct mm_struct *mm,
637 		struct mem_cgroup *memcg)
638 {
639 	return true;
640 }
641 
642 static inline bool task_in_mem_cgroup(struct task_struct *task,
643 				      const struct mem_cgroup *memcg)
644 {
645 	return true;
646 }
647 
648 static inline struct mem_cgroup *
649 mem_cgroup_iter(struct mem_cgroup *root,
650 		struct mem_cgroup *prev,
651 		struct mem_cgroup_reclaim_cookie *reclaim)
652 {
653 	return NULL;
654 }
655 
656 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
657 					 struct mem_cgroup *prev)
658 {
659 }
660 
661 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
662 		int (*fn)(struct task_struct *, void *), void *arg)
663 {
664 	return 0;
665 }
666 
667 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
668 {
669 	return 0;
670 }
671 
672 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
673 {
674 	WARN_ON_ONCE(id);
675 	/* XXX: This should always return root_mem_cgroup */
676 	return NULL;
677 }
678 
679 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
680 {
681 	return true;
682 }
683 
684 static inline unsigned long
685 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
686 {
687 	return 0;
688 }
689 static inline
690 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
691 		enum lru_list lru, int zone_idx)
692 {
693 	return 0;
694 }
695 
696 static inline unsigned long
697 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
698 			     int nid, unsigned int lru_mask)
699 {
700 	return 0;
701 }
702 
703 static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
704 {
705 	return 0;
706 }
707 
708 static inline void
709 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
710 {
711 }
712 
713 static inline void lock_page_memcg(struct page *page)
714 {
715 }
716 
717 static inline void unlock_page_memcg(struct page *page)
718 {
719 }
720 
721 static inline void mem_cgroup_handle_over_high(void)
722 {
723 }
724 
725 static inline void mem_cgroup_oom_enable(void)
726 {
727 }
728 
729 static inline void mem_cgroup_oom_disable(void)
730 {
731 }
732 
733 static inline bool task_in_memcg_oom(struct task_struct *p)
734 {
735 	return false;
736 }
737 
738 static inline bool mem_cgroup_oom_synchronize(bool wait)
739 {
740 	return false;
741 }
742 
743 static inline void mem_cgroup_inc_page_stat(struct page *page,
744 					    enum mem_cgroup_stat_index idx)
745 {
746 }
747 
748 static inline void mem_cgroup_dec_page_stat(struct page *page,
749 					    enum mem_cgroup_stat_index idx)
750 {
751 }
752 
753 static inline
754 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
755 					    gfp_t gfp_mask,
756 					    unsigned long *total_scanned)
757 {
758 	return 0;
759 }
760 
761 static inline void mem_cgroup_split_huge_fixup(struct page *head)
762 {
763 }
764 
765 static inline
766 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
767 {
768 }
769 #endif /* CONFIG_MEMCG */
770 
771 #ifdef CONFIG_CGROUP_WRITEBACK
772 
773 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
774 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
775 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
776 			 unsigned long *pheadroom, unsigned long *pdirty,
777 			 unsigned long *pwriteback);
778 
779 #else	/* CONFIG_CGROUP_WRITEBACK */
780 
781 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
782 {
783 	return NULL;
784 }
785 
786 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
787 				       unsigned long *pfilepages,
788 				       unsigned long *pheadroom,
789 				       unsigned long *pdirty,
790 				       unsigned long *pwriteback)
791 {
792 }
793 
794 #endif	/* CONFIG_CGROUP_WRITEBACK */
795 
796 struct sock;
797 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
798 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
799 #ifdef CONFIG_MEMCG
800 extern struct static_key_false memcg_sockets_enabled_key;
801 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
802 void mem_cgroup_sk_alloc(struct sock *sk);
803 void mem_cgroup_sk_free(struct sock *sk);
804 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
805 {
806 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
807 		return true;
808 	do {
809 		if (time_before(jiffies, memcg->socket_pressure))
810 			return true;
811 	} while ((memcg = parent_mem_cgroup(memcg)));
812 	return false;
813 }
814 #else
815 #define mem_cgroup_sockets_enabled 0
816 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
817 static inline void mem_cgroup_sk_free(struct sock *sk) { };
818 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
819 {
820 	return false;
821 }
822 #endif
823 
824 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
825 void memcg_kmem_put_cache(struct kmem_cache *cachep);
826 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
827 			    struct mem_cgroup *memcg);
828 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
829 void memcg_kmem_uncharge(struct page *page, int order);
830 
831 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
832 extern struct static_key_false memcg_kmem_enabled_key;
833 extern struct workqueue_struct *memcg_kmem_cache_wq;
834 
835 extern int memcg_nr_cache_ids;
836 void memcg_get_cache_ids(void);
837 void memcg_put_cache_ids(void);
838 
839 /*
840  * Helper macro to loop through all memcg-specific caches. Callers must still
841  * check if the cache is valid (it is either valid or NULL).
842  * the slab_mutex must be held when looping through those caches
843  */
844 #define for_each_memcg_cache_index(_idx)	\
845 	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
846 
847 static inline bool memcg_kmem_enabled(void)
848 {
849 	return static_branch_unlikely(&memcg_kmem_enabled_key);
850 }
851 
852 /*
853  * helper for accessing a memcg's index. It will be used as an index in the
854  * child cache array in kmem_cache, and also to derive its name. This function
855  * will return -1 when this is not a kmem-limited memcg.
856  */
857 static inline int memcg_cache_id(struct mem_cgroup *memcg)
858 {
859 	return memcg ? memcg->kmemcg_id : -1;
860 }
861 
862 /**
863  * memcg_kmem_update_page_stat - update kmem page state statistics
864  * @page: the page
865  * @idx: page state item to account
866  * @val: number of pages (positive or negative)
867  */
868 static inline void memcg_kmem_update_page_stat(struct page *page,
869 				enum mem_cgroup_stat_index idx, int val)
870 {
871 	if (memcg_kmem_enabled() && page->mem_cgroup)
872 		this_cpu_add(page->mem_cgroup->stat->count[idx], val);
873 }
874 
875 #else
876 #define for_each_memcg_cache_index(_idx)	\
877 	for (; NULL; )
878 
879 static inline bool memcg_kmem_enabled(void)
880 {
881 	return false;
882 }
883 
884 static inline int memcg_cache_id(struct mem_cgroup *memcg)
885 {
886 	return -1;
887 }
888 
889 static inline void memcg_get_cache_ids(void)
890 {
891 }
892 
893 static inline void memcg_put_cache_ids(void)
894 {
895 }
896 
897 static inline void memcg_kmem_update_page_stat(struct page *page,
898 				enum mem_cgroup_stat_index idx, int val)
899 {
900 }
901 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
902 
903 #endif /* _LINUX_MEMCONTROL_H */
904