1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. 4 * Authors: David Chinner and Glauber Costa 5 * 6 * Generic LRU infrastructure 7 */ 8 #ifndef _LRU_LIST_H 9 #define _LRU_LIST_H 10 11 #include <linux/list.h> 12 #include <linux/nodemask.h> 13 #include <linux/shrinker.h> 14 15 struct mem_cgroup; 16 17 /* list_lru_walk_cb has to always return one of those */ 18 enum lru_status { 19 LRU_REMOVED, /* item removed from list */ 20 LRU_REMOVED_RETRY, /* item removed, but lock has been 21 dropped and reacquired */ 22 LRU_ROTATE, /* item referenced, give another pass */ 23 LRU_SKIP, /* item cannot be locked, skip */ 24 LRU_RETRY, /* item not freeable. May drop the lock 25 internally, but has to return locked. */ 26 }; 27 28 struct list_lru_one { 29 struct list_head list; 30 /* may become negative during memcg reparenting */ 31 long nr_items; 32 }; 33 34 struct list_lru_memcg { 35 /* array of per cgroup lists, indexed by memcg_cache_id */ 36 struct list_lru_one *lru[0]; 37 }; 38 39 struct list_lru_node { 40 /* protects all lists on the node, including per cgroup */ 41 spinlock_t lock; 42 /* global list, used for the root cgroup in cgroup aware lrus */ 43 struct list_lru_one lru; 44 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 45 /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ 46 struct list_lru_memcg *memcg_lrus; 47 #endif 48 long nr_items; 49 } ____cacheline_aligned_in_smp; 50 51 struct list_lru { 52 struct list_lru_node *node; 53 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 54 struct list_head list; 55 #endif 56 }; 57 58 void list_lru_destroy(struct list_lru *lru); 59 int __list_lru_init(struct list_lru *lru, bool memcg_aware, 60 struct lock_class_key *key); 61 62 #define list_lru_init(lru) __list_lru_init((lru), false, NULL) 63 #define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key)) 64 #define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL) 65 66 int memcg_update_all_list_lrus(int num_memcgs); 67 void memcg_drain_all_list_lrus(int src_idx, int dst_idx); 68 69 /** 70 * list_lru_add: add an element to the lru list's tail 71 * @list_lru: the lru pointer 72 * @item: the item to be added. 73 * 74 * If the element is already part of a list, this function returns doing 75 * nothing. Therefore the caller does not need to keep state about whether or 76 * not the element already belongs in the list and is allowed to lazy update 77 * it. Note however that this is valid for *a* list, not *this* list. If 78 * the caller organize itself in a way that elements can be in more than 79 * one type of list, it is up to the caller to fully remove the item from 80 * the previous list (with list_lru_del() for instance) before moving it 81 * to @list_lru 82 * 83 * Return value: true if the list was updated, false otherwise 84 */ 85 bool list_lru_add(struct list_lru *lru, struct list_head *item); 86 87 /** 88 * list_lru_del: delete an element to the lru list 89 * @list_lru: the lru pointer 90 * @item: the item to be deleted. 91 * 92 * This function works analogously as list_lru_add in terms of list 93 * manipulation. The comments about an element already pertaining to 94 * a list are also valid for list_lru_del. 95 * 96 * Return value: true if the list was updated, false otherwise 97 */ 98 bool list_lru_del(struct list_lru *lru, struct list_head *item); 99 100 /** 101 * list_lru_count_one: return the number of objects currently held by @lru 102 * @lru: the lru pointer. 103 * @nid: the node id to count from. 104 * @memcg: the cgroup to count from. 105 * 106 * Always return a non-negative number, 0 for empty lists. There is no 107 * guarantee that the list is not updated while the count is being computed. 108 * Callers that want such a guarantee need to provide an outer lock. 109 */ 110 unsigned long list_lru_count_one(struct list_lru *lru, 111 int nid, struct mem_cgroup *memcg); 112 unsigned long list_lru_count_node(struct list_lru *lru, int nid); 113 114 static inline unsigned long list_lru_shrink_count(struct list_lru *lru, 115 struct shrink_control *sc) 116 { 117 return list_lru_count_one(lru, sc->nid, sc->memcg); 118 } 119 120 static inline unsigned long list_lru_count(struct list_lru *lru) 121 { 122 long count = 0; 123 int nid; 124 125 for_each_node_state(nid, N_NORMAL_MEMORY) 126 count += list_lru_count_node(lru, nid); 127 128 return count; 129 } 130 131 void list_lru_isolate(struct list_lru_one *list, struct list_head *item); 132 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, 133 struct list_head *head); 134 135 typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, 136 struct list_lru_one *list, spinlock_t *lock, void *cb_arg); 137 138 /** 139 * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items. 140 * @lru: the lru pointer. 141 * @nid: the node id to scan from. 142 * @memcg: the cgroup to scan from. 143 * @isolate: callback function that is resposible for deciding what to do with 144 * the item currently being scanned 145 * @cb_arg: opaque type that will be passed to @isolate 146 * @nr_to_walk: how many items to scan. 147 * 148 * This function will scan all elements in a particular list_lru, calling the 149 * @isolate callback for each of those items, along with the current list 150 * spinlock and a caller-provided opaque. The @isolate callback can choose to 151 * drop the lock internally, but *must* return with the lock held. The callback 152 * will return an enum lru_status telling the list_lru infrastructure what to 153 * do with the object being scanned. 154 * 155 * Please note that nr_to_walk does not mean how many objects will be freed, 156 * just how many objects will be scanned. 157 * 158 * Return value: the number of objects effectively removed from the LRU. 159 */ 160 unsigned long list_lru_walk_one(struct list_lru *lru, 161 int nid, struct mem_cgroup *memcg, 162 list_lru_walk_cb isolate, void *cb_arg, 163 unsigned long *nr_to_walk); 164 unsigned long list_lru_walk_node(struct list_lru *lru, int nid, 165 list_lru_walk_cb isolate, void *cb_arg, 166 unsigned long *nr_to_walk); 167 168 static inline unsigned long 169 list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, 170 list_lru_walk_cb isolate, void *cb_arg) 171 { 172 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, 173 &sc->nr_to_scan); 174 } 175 176 static inline unsigned long 177 list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, 178 void *cb_arg, unsigned long nr_to_walk) 179 { 180 long isolated = 0; 181 int nid; 182 183 for_each_node_state(nid, N_NORMAL_MEMORY) { 184 isolated += list_lru_walk_node(lru, nid, isolate, 185 cb_arg, &nr_to_walk); 186 if (nr_to_walk <= 0) 187 break; 188 } 189 return isolated; 190 } 191 #endif /* _LRU_LIST_H */ 192