1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. 4 * Authors: David Chinner and Glauber Costa 5 * 6 * Generic LRU infrastructure 7 */ 8 #ifndef _LRU_LIST_H 9 #define _LRU_LIST_H 10 11 #include <linux/list.h> 12 #include <linux/nodemask.h> 13 #include <linux/shrinker.h> 14 15 struct mem_cgroup; 16 17 /* list_lru_walk_cb has to always return one of those */ 18 enum lru_status { 19 LRU_REMOVED, /* item removed from list */ 20 LRU_REMOVED_RETRY, /* item removed, but lock has been 21 dropped and reacquired */ 22 LRU_ROTATE, /* item referenced, give another pass */ 23 LRU_SKIP, /* item cannot be locked, skip */ 24 LRU_RETRY, /* item not freeable. May drop the lock 25 internally, but has to return locked. */ 26 }; 27 28 struct list_lru_one { 29 struct list_head list; 30 /* may become negative during memcg reparenting */ 31 long nr_items; 32 }; 33 34 struct list_lru_memcg { 35 struct rcu_head rcu; 36 /* array of per cgroup lists, indexed by memcg_cache_id */ 37 struct list_lru_one *lru[0]; 38 }; 39 40 struct list_lru_node { 41 /* protects all lists on the node, including per cgroup */ 42 spinlock_t lock; 43 /* global list, used for the root cgroup in cgroup aware lrus */ 44 struct list_lru_one lru; 45 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 46 /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ 47 struct list_lru_memcg __rcu *memcg_lrus; 48 #endif 49 long nr_items; 50 } ____cacheline_aligned_in_smp; 51 52 struct list_lru { 53 struct list_lru_node *node; 54 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 55 struct list_head list; 56 #endif 57 }; 58 59 void list_lru_destroy(struct list_lru *lru); 60 int __list_lru_init(struct list_lru *lru, bool memcg_aware, 61 struct lock_class_key *key); 62 63 #define list_lru_init(lru) __list_lru_init((lru), false, NULL) 64 #define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key)) 65 #define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL) 66 67 int memcg_update_all_list_lrus(int num_memcgs); 68 void memcg_drain_all_list_lrus(int src_idx, int dst_idx); 69 70 /** 71 * list_lru_add: add an element to the lru list's tail 72 * @list_lru: the lru pointer 73 * @item: the item to be added. 74 * 75 * If the element is already part of a list, this function returns doing 76 * nothing. Therefore the caller does not need to keep state about whether or 77 * not the element already belongs in the list and is allowed to lazy update 78 * it. Note however that this is valid for *a* list, not *this* list. If 79 * the caller organize itself in a way that elements can be in more than 80 * one type of list, it is up to the caller to fully remove the item from 81 * the previous list (with list_lru_del() for instance) before moving it 82 * to @list_lru 83 * 84 * Return value: true if the list was updated, false otherwise 85 */ 86 bool list_lru_add(struct list_lru *lru, struct list_head *item); 87 88 /** 89 * list_lru_del: delete an element to the lru list 90 * @list_lru: the lru pointer 91 * @item: the item to be deleted. 92 * 93 * This function works analogously as list_lru_add in terms of list 94 * manipulation. The comments about an element already pertaining to 95 * a list are also valid for list_lru_del. 96 * 97 * Return value: true if the list was updated, false otherwise 98 */ 99 bool list_lru_del(struct list_lru *lru, struct list_head *item); 100 101 /** 102 * list_lru_count_one: return the number of objects currently held by @lru 103 * @lru: the lru pointer. 104 * @nid: the node id to count from. 105 * @memcg: the cgroup to count from. 106 * 107 * Always return a non-negative number, 0 for empty lists. There is no 108 * guarantee that the list is not updated while the count is being computed. 109 * Callers that want such a guarantee need to provide an outer lock. 110 */ 111 unsigned long list_lru_count_one(struct list_lru *lru, 112 int nid, struct mem_cgroup *memcg); 113 unsigned long list_lru_count_node(struct list_lru *lru, int nid); 114 115 static inline unsigned long list_lru_shrink_count(struct list_lru *lru, 116 struct shrink_control *sc) 117 { 118 return list_lru_count_one(lru, sc->nid, sc->memcg); 119 } 120 121 static inline unsigned long list_lru_count(struct list_lru *lru) 122 { 123 long count = 0; 124 int nid; 125 126 for_each_node_state(nid, N_NORMAL_MEMORY) 127 count += list_lru_count_node(lru, nid); 128 129 return count; 130 } 131 132 void list_lru_isolate(struct list_lru_one *list, struct list_head *item); 133 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, 134 struct list_head *head); 135 136 typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, 137 struct list_lru_one *list, spinlock_t *lock, void *cb_arg); 138 139 /** 140 * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items. 141 * @lru: the lru pointer. 142 * @nid: the node id to scan from. 143 * @memcg: the cgroup to scan from. 144 * @isolate: callback function that is resposible for deciding what to do with 145 * the item currently being scanned 146 * @cb_arg: opaque type that will be passed to @isolate 147 * @nr_to_walk: how many items to scan. 148 * 149 * This function will scan all elements in a particular list_lru, calling the 150 * @isolate callback for each of those items, along with the current list 151 * spinlock and a caller-provided opaque. The @isolate callback can choose to 152 * drop the lock internally, but *must* return with the lock held. The callback 153 * will return an enum lru_status telling the list_lru infrastructure what to 154 * do with the object being scanned. 155 * 156 * Please note that nr_to_walk does not mean how many objects will be freed, 157 * just how many objects will be scanned. 158 * 159 * Return value: the number of objects effectively removed from the LRU. 160 */ 161 unsigned long list_lru_walk_one(struct list_lru *lru, 162 int nid, struct mem_cgroup *memcg, 163 list_lru_walk_cb isolate, void *cb_arg, 164 unsigned long *nr_to_walk); 165 unsigned long list_lru_walk_node(struct list_lru *lru, int nid, 166 list_lru_walk_cb isolate, void *cb_arg, 167 unsigned long *nr_to_walk); 168 169 static inline unsigned long 170 list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, 171 list_lru_walk_cb isolate, void *cb_arg) 172 { 173 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, 174 &sc->nr_to_scan); 175 } 176 177 static inline unsigned long 178 list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, 179 void *cb_arg, unsigned long nr_to_walk) 180 { 181 long isolated = 0; 182 int nid; 183 184 for_each_node_state(nid, N_NORMAL_MEMORY) { 185 isolated += list_lru_walk_node(lru, nid, isolate, 186 cb_arg, &nr_to_walk); 187 if (nr_to_walk <= 0) 188 break; 189 } 190 return isolated; 191 } 192 #endif /* _LRU_LIST_H */ 193