xref: /linux-6.15/include/linux/list_lru.h (revision bfca85fa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4  * Authors: David Chinner and Glauber Costa
5  *
6  * Generic LRU infrastructure
7  */
8 #ifndef _LRU_LIST_H
9 #define _LRU_LIST_H
10 
11 #include <linux/list.h>
12 #include <linux/nodemask.h>
13 #include <linux/shrinker.h>
14 #include <linux/xarray.h>
15 
16 struct mem_cgroup;
17 
18 /* list_lru_walk_cb has to always return one of those */
19 enum lru_status {
20 	LRU_REMOVED,		/* item removed from list */
21 	LRU_REMOVED_RETRY,	/* item removed, but lock has been
22 				   dropped and reacquired */
23 	LRU_ROTATE,		/* item referenced, give another pass */
24 	LRU_SKIP,		/* item cannot be locked, skip */
25 	LRU_RETRY,		/* item not freeable. May drop the lock
26 				   internally, but has to return locked. */
27 	LRU_STOP,		/* stop lru list walking. May drop the lock
28 				   internally, but has to return locked. */
29 };
30 
31 struct list_lru_one {
32 	struct list_head	list;
33 	/* may become negative during memcg reparenting */
34 	long			nr_items;
35 	/* protects all fields above */
36 	spinlock_t		lock;
37 };
38 
39 struct list_lru_memcg {
40 	struct rcu_head		rcu;
41 	/* array of per cgroup per node lists, indexed by node id */
42 	struct list_lru_one	node[];
43 };
44 
45 struct list_lru_node {
46 	/* global list, used for the root cgroup in cgroup aware lrus */
47 	struct list_lru_one	lru;
48 	atomic_long_t		nr_items;
49 } ____cacheline_aligned_in_smp;
50 
51 struct list_lru {
52 	struct list_lru_node	*node;
53 #ifdef CONFIG_MEMCG
54 	struct list_head	list;
55 	int			shrinker_id;
56 	bool			memcg_aware;
57 	struct xarray		xa;
58 #endif
59 #ifdef CONFIG_LOCKDEP
60 	struct lock_class_key	*key;
61 #endif
62 };
63 
64 void list_lru_destroy(struct list_lru *lru);
65 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
66 		    struct shrinker *shrinker);
67 
68 #define list_lru_init(lru)				\
69 	__list_lru_init((lru), false, NULL)
70 #define list_lru_init_memcg(lru, shrinker)		\
71 	__list_lru_init((lru), true, shrinker)
72 
73 static inline int list_lru_init_memcg_key(struct list_lru *lru, struct shrinker *shrinker,
74 					  struct lock_class_key *key)
75 {
76 #ifdef CONFIG_LOCKDEP
77 	lru->key = key;
78 #endif
79 	return list_lru_init_memcg(lru, shrinker);
80 }
81 
82 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
83 			 gfp_t gfp);
84 void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
85 
86 /**
87  * list_lru_add: add an element to the lru list's tail
88  * @lru: the lru pointer
89  * @item: the item to be added.
90  * @nid: the node id of the sublist to add the item to.
91  * @memcg: the cgroup of the sublist to add the item to.
92  *
93  * If the element is already part of a list, this function returns doing
94  * nothing. Therefore the caller does not need to keep state about whether or
95  * not the element already belongs in the list and is allowed to lazy update
96  * it. Note however that this is valid for *a* list, not *this* list. If
97  * the caller organize itself in a way that elements can be in more than
98  * one type of list, it is up to the caller to fully remove the item from
99  * the previous list (with list_lru_del() for instance) before moving it
100  * to @lru.
101  *
102  * Return: true if the list was updated, false otherwise
103  */
104 bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
105 		    struct mem_cgroup *memcg);
106 
107 /**
108  * list_lru_add_obj: add an element to the lru list's tail
109  * @lru: the lru pointer
110  * @item: the item to be added.
111  *
112  * This function is similar to list_lru_add(), but the NUMA node and the
113  * memcg of the sublist is determined by @item list_head. This assumption is
114  * valid for slab objects LRU such as dentries, inodes, etc.
115  *
116  * Return value: true if the list was updated, false otherwise
117  */
118 bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
119 
120 /**
121  * list_lru_del: delete an element from the lru list
122  * @lru: the lru pointer
123  * @item: the item to be deleted.
124  * @nid: the node id of the sublist to delete the item from.
125  * @memcg: the cgroup of the sublist to delete the item from.
126  *
127  * This function works analogously as list_lru_add() in terms of list
128  * manipulation. The comments about an element already pertaining to
129  * a list are also valid for list_lru_del().
130  *
131  * Return: true if the list was updated, false otherwise
132  */
133 bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
134 		    struct mem_cgroup *memcg);
135 
136 /**
137  * list_lru_del_obj: delete an element from the lru list
138  * @lru: the lru pointer
139  * @item: the item to be deleted.
140  *
141  * This function is similar to list_lru_del(), but the NUMA node and the
142  * memcg of the sublist is determined by @item list_head. This assumption is
143  * valid for slab objects LRU such as dentries, inodes, etc.
144  *
145  * Return value: true if the list was updated, false otherwise.
146  */
147 bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
148 
149 /**
150  * list_lru_count_one: return the number of objects currently held by @lru
151  * @lru: the lru pointer.
152  * @nid: the node id to count from.
153  * @memcg: the cgroup to count from.
154  *
155  * There is no guarantee that the list is not updated while the count is being
156  * computed. Callers that want such a guarantee need to provide an outer lock.
157  *
158  * Return: 0 for empty lists, otherwise the number of objects
159  * currently held by @lru.
160  */
161 unsigned long list_lru_count_one(struct list_lru *lru,
162 				 int nid, struct mem_cgroup *memcg);
163 unsigned long list_lru_count_node(struct list_lru *lru, int nid);
164 
165 static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
166 						  struct shrink_control *sc)
167 {
168 	return list_lru_count_one(lru, sc->nid, sc->memcg);
169 }
170 
171 static inline unsigned long list_lru_count(struct list_lru *lru)
172 {
173 	long count = 0;
174 	int nid;
175 
176 	for_each_node_state(nid, N_NORMAL_MEMORY)
177 		count += list_lru_count_node(lru, nid);
178 
179 	return count;
180 }
181 
182 void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
183 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
184 			   struct list_head *head);
185 
186 typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
187 		struct list_lru_one *list, void *cb_arg);
188 
189 /**
190  * list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
191  * @lru: the lru pointer.
192  * @nid: the node id to scan from.
193  * @memcg: the cgroup to scan from.
194  * @isolate: callback function that is responsible for deciding what to do with
195  *  the item currently being scanned
196  * @cb_arg: opaque type that will be passed to @isolate
197  * @nr_to_walk: how many items to scan.
198  *
199  * This function will scan all elements in a particular @lru, calling the
200  * @isolate callback for each of those items, along with the current list
201  * spinlock and a caller-provided opaque. The @isolate callback can choose to
202  * drop the lock internally, but *must* return with the lock held. The callback
203  * will return an enum lru_status telling the @lru infrastructure what to
204  * do with the object being scanned.
205  *
206  * Please note that @nr_to_walk does not mean how many objects will be freed,
207  * just how many objects will be scanned.
208  *
209  * Return: the number of objects effectively removed from the LRU.
210  */
211 unsigned long list_lru_walk_one(struct list_lru *lru,
212 				int nid, struct mem_cgroup *memcg,
213 				list_lru_walk_cb isolate, void *cb_arg,
214 				unsigned long *nr_to_walk);
215 /**
216  * list_lru_walk_one_irq: walk a @lru, isolating and disposing freeable items.
217  * @lru: the lru pointer.
218  * @nid: the node id to scan from.
219  * @memcg: the cgroup to scan from.
220  * @isolate: callback function that is responsible for deciding what to do with
221  *  the item currently being scanned
222  * @cb_arg: opaque type that will be passed to @isolate
223  * @nr_to_walk: how many items to scan.
224  *
225  * Same as list_lru_walk_one() except that the spinlock is acquired with
226  * spin_lock_irq().
227  */
228 unsigned long list_lru_walk_one_irq(struct list_lru *lru,
229 				    int nid, struct mem_cgroup *memcg,
230 				    list_lru_walk_cb isolate, void *cb_arg,
231 				    unsigned long *nr_to_walk);
232 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
233 				 list_lru_walk_cb isolate, void *cb_arg,
234 				 unsigned long *nr_to_walk);
235 
236 static inline unsigned long
237 list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
238 		     list_lru_walk_cb isolate, void *cb_arg)
239 {
240 	return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
241 				 &sc->nr_to_scan);
242 }
243 
244 static inline unsigned long
245 list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
246 			 list_lru_walk_cb isolate, void *cb_arg)
247 {
248 	return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
249 				     &sc->nr_to_scan);
250 }
251 
252 static inline unsigned long
253 list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
254 	      void *cb_arg, unsigned long nr_to_walk)
255 {
256 	long isolated = 0;
257 	int nid;
258 
259 	for_each_node_state(nid, N_NORMAL_MEMORY) {
260 		isolated += list_lru_walk_node(lru, nid, isolate,
261 					       cb_arg, &nr_to_walk);
262 		if (nr_to_walk <= 0)
263 			break;
264 	}
265 	return isolated;
266 }
267 #endif /* _LRU_LIST_H */
268