xref: /linux-6.15/include/linux/sunrpc/cache.h (revision 20dd026d)
1 /*
2  * include/linux/sunrpc/cache.h
3  *
4  * Generic code for various authentication-related caches
5  * used by sunrpc clients and servers.
6  *
7  * Copyright (C) 2002 Neil Brown <[email protected]>
8  *
9  * Released under terms in GPL version 2.  See COPYING.
10  *
11  */
12 
13 #ifndef _LINUX_SUNRPC_CACHE_H_
14 #define _LINUX_SUNRPC_CACHE_H_
15 
16 #include <linux/slab.h>
17 #include <asm/atomic.h>
18 #include <linux/proc_fs.h>
19 
20 /*
21  * Each cache requires:
22  *  - A 'struct cache_detail' which contains information specific to the cache
23  *    for common code to use.
24  *  - An item structure that must contain a "struct cache_head"
25  *  - A lookup function defined using DefineCacheLookup
26  *  - A 'put' function that can release a cache item. It will only
27  *    be called after cache_put has succeed, so there are guarantee
28  *    to be no references.
29  *  - A function to calculate a hash of an item's key.
30  *
31  * as well as assorted code fragments (e.g. compare keys) and numbers
32  * (e.g. hash size, goal_age, etc).
33  *
34  * Each cache must be registered so that it can be cleaned regularly.
35  * When the cache is unregistered, it is flushed completely.
36  *
37  * Entries have a ref count and a 'hashed' flag which counts the existance
38  * in the hash table.
39  * We only expire entries when refcount is zero.
40  * Existance in the cache is counted  the refcount.
41  */
42 
43 /* Every cache item has a common header that is used
44  * for expiring and refreshing entries.
45  *
46  */
47 struct cache_head {
48 	struct cache_head * next;
49 	time_t		expiry_time;	/* After time time, don't use the data */
50 	time_t		last_refresh;   /* If CACHE_PENDING, this is when upcall
51 					 * was sent, else this is when update was received
52 					 */
53 	atomic_t 	refcnt;
54 	unsigned long	flags;
55 };
56 #define	CACHE_VALID	0	/* Entry contains valid data */
57 #define	CACHE_NEGATIVE	1	/* Negative entry - there is no match for the key */
58 #define	CACHE_PENDING	2	/* An upcall has been sent but no reply received yet*/
59 
60 #define	CACHE_NEW_EXPIRY 120	/* keep new things pending confirmation for 120 seconds */
61 
62 struct cache_detail {
63 	struct module *		owner;
64 	int			hash_size;
65 	struct cache_head **	hash_table;
66 	rwlock_t		hash_lock;
67 
68 	atomic_t		inuse; /* active user-space update or lookup */
69 
70 	char			*name;
71 	void			(*cache_put)(struct cache_head *,
72 					     struct cache_detail*);
73 
74 	void			(*cache_request)(struct cache_detail *cd,
75 						 struct cache_head *h,
76 						 char **bpp, int *blen);
77 	int			(*cache_parse)(struct cache_detail *,
78 					       char *buf, int len);
79 
80 	int			(*cache_show)(struct seq_file *m,
81 					      struct cache_detail *cd,
82 					      struct cache_head *h);
83 
84 	/* fields below this comment are for internal use
85 	 * and should not be touched by cache owners
86 	 */
87 	time_t			flush_time;		/* flush all cache items with last_refresh
88 							 * earlier than this */
89 	struct list_head	others;
90 	time_t			nextcheck;
91 	int			entries;
92 
93 	/* fields for communication over channel */
94 	struct list_head	queue;
95 	struct proc_dir_entry	*proc_ent;
96 	struct proc_dir_entry   *flush_ent, *channel_ent, *content_ent;
97 
98 	atomic_t		readers;		/* how many time is /chennel open */
99 	time_t			last_close;		/* if no readers, when did last close */
100 	time_t			last_warn;		/* when we last warned about no readers */
101 	void			(*warn_no_listener)(struct cache_detail *cd);
102 };
103 
104 
105 /* this must be embedded in any request structure that
106  * identifies an object that will want a callback on
107  * a cache fill
108  */
109 struct cache_req {
110 	struct cache_deferred_req *(*defer)(struct cache_req *req);
111 };
112 /* this must be embedded in a deferred_request that is being
113  * delayed awaiting cache-fill
114  */
115 struct cache_deferred_req {
116 	struct list_head	hash;	/* on hash chain */
117 	struct list_head	recent; /* on fifo */
118 	struct cache_head	*item;  /* cache item we wait on */
119 	time_t			recv_time;
120 	void			*owner; /* we might need to discard all defered requests
121 					 * owned by someone */
122 	void			(*revisit)(struct cache_deferred_req *req,
123 					   int too_many);
124 };
125 
126 /*
127  * just like a template in C++, this macro does cache lookup
128  * for us.
129  * The function is passed some sort of HANDLE from which a cache_detail
130  * structure can be determined (via SETUP, DETAIL), a template
131  * cache entry (type RTN*), and a "set" flag.  Using the HASHFN and the
132  * TEST, the function will try to find a matching cache entry in the cache.
133  * If "set" == 0 :
134  *    If an entry is found, it is returned
135  *    If no entry is found, a new non-VALID entry is created.
136  * If "set" == 1 and INPLACE == 0 :
137  *    If no entry is found a new one is inserted with data from "template"
138  *    If a non-CACHE_VALID entry is found, it is updated from template using UPDATE
139  *    If a CACHE_VALID entry is found, a new entry is swapped in with data
140  *       from "template"
141  * If set == 1, and INPLACE == 1 :
142  *    As above, except that if a CACHE_VALID entry is found, we UPDATE in place
143  *       instead of swapping in a new entry.
144  *
145  * If the passed handle has the CACHE_NEGATIVE flag set, then UPDATE is not
146  * run but insteead CACHE_NEGATIVE is set in any new item.
147 
148  *  In any case, the new entry is returned with a reference count.
149  *
150  *
151  * RTN is a struct type for a cache entry
152  * MEMBER is the member of the cache which is cache_head, which must be first
153  * FNAME is the name for the function
154  * ARGS are arguments to function and must contain RTN *item, int set.  May
155  *   also contain something to be usedby SETUP or DETAIL to find cache_detail.
156  * SETUP  locates the cache detail and makes it available as...
157  * DETAIL identifies the cache detail, possibly set up by SETUP
158  * HASHFN returns a hash value of the cache entry "item"
159  * TEST  tests if "tmp" matches "item"
160  * INIT copies key information from "item" to "new"
161  * UPDATE copies content information from "item" to "tmp"
162  * INPLACE is true if updates can happen inplace rather than allocating a new structure
163  *
164  * WARNING: any substantial changes to this must be reflected in
165  *   net/sunrpc/svcauth.c(auth_domain_lookup)
166  *  which is a similar routine that is open-coded.
167  */
168 #define DefineCacheLookup(RTN,MEMBER,FNAME,ARGS,SETUP,DETAIL,HASHFN,TEST,INIT,UPDATE,INPLACE)	\
169 RTN *FNAME ARGS										\
170 {											\
171 	RTN *tmp, *new=NULL;								\
172 	struct cache_head **hp, **head;							\
173 	SETUP;										\
174 	head = &(DETAIL)->hash_table[HASHFN];						\
175  retry:											\
176 	if (set||new) write_lock(&(DETAIL)->hash_lock);					\
177 	else read_lock(&(DETAIL)->hash_lock);						\
178 	for(hp=head; *hp != NULL; hp = &tmp->MEMBER.next) {				\
179 		tmp = container_of(*hp, RTN, MEMBER);					\
180 		if (TEST) { /* found a match */						\
181 											\
182 			if (set && !INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \
183 				break;							\
184 											\
185 			if (new)							\
186 				{INIT;}							\
187 			if (set) {							\
188 				if (!INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\
189 				{ /* need to swap in new */				\
190 					RTN *t2;					\
191 											\
192 					new->MEMBER.next = tmp->MEMBER.next;		\
193 					*hp = &new->MEMBER;				\
194 					tmp->MEMBER.next = NULL;			\
195 					t2 = tmp; tmp = new; new = t2;			\
196 				}							\
197 				if (test_bit(CACHE_NEGATIVE,  &item->MEMBER.flags))	\
198 					set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags);	\
199 				else {							\
200 					UPDATE;						\
201 					clear_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags);	\
202 				}							\
203 			}								\
204 			cache_get(&tmp->MEMBER);					\
205 			if (set||new) write_unlock(&(DETAIL)->hash_lock);		\
206 			else read_unlock(&(DETAIL)->hash_lock);				\
207 			if (set)							\
208 				cache_fresh(DETAIL, &tmp->MEMBER, item->MEMBER.expiry_time); \
209 			if (set && !INPLACE && new) cache_fresh(DETAIL, &new->MEMBER, 0);	\
210 			if (new) (DETAIL)->cache_put(&new->MEMBER, DETAIL);		\
211 			return tmp;							\
212 		}									\
213 	}										\
214 	/* Didn't find anything */							\
215 	if (new) {									\
216 		INIT;									\
217 		new->MEMBER.next = *head;						\
218 		*head = &new->MEMBER;							\
219 		(DETAIL)->entries ++;							\
220 		cache_get(&new->MEMBER);						\
221 		if (set) {								\
222 			tmp = new;							\
223 			if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags))		\
224 				set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags);		\
225 			else {UPDATE;}							\
226 		}									\
227 	}										\
228 	if (set||new) write_unlock(&(DETAIL)->hash_lock);				\
229 	else read_unlock(&(DETAIL)->hash_lock);						\
230 	if (new && set)									\
231 		cache_fresh(DETAIL, &new->MEMBER, item->MEMBER.expiry_time);		\
232 	if (new)				       					\
233 		return new;								\
234 	new = kmalloc(sizeof(*new), GFP_KERNEL);					\
235 	if (new) {									\
236 		cache_init(&new->MEMBER);						\
237 		goto retry;								\
238 	}										\
239 	return NULL;									\
240 }
241 
242 #define DefineSimpleCacheLookup(STRUCT,INPLACE)	\
243 	DefineCacheLookup(struct STRUCT, h, STRUCT##_lookup, (struct STRUCT *item, int set), /*no setup */,	\
244 			  & STRUCT##_cache, STRUCT##_hash(item), STRUCT##_match(item, tmp),\
245 			  STRUCT##_init(new, item), STRUCT##_update(tmp, item),INPLACE)
246 
247 #define cache_for_each(pos, detail, index, member) 						\
248 	for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ;		\
249 	     ({if (index==0)read_unlock(&(detail)->hash_lock); index--;});			\
250 		)										\
251 		for (pos = container_of((detail)->hash_table[index], typeof(*pos), member);	\
252 		     &pos->member;								\
253 		     pos = container_of(pos->member.next, typeof(*pos), member))
254 
255 
256 
257 extern void cache_clean_deferred(void *owner);
258 
259 static inline struct cache_head  *cache_get(struct cache_head *h)
260 {
261 	atomic_inc(&h->refcnt);
262 	return h;
263 }
264 
265 
266 static inline int cache_put(struct cache_head *h, struct cache_detail *cd)
267 {
268 	if (atomic_read(&h->refcnt) <= 2 &&
269 	    h->expiry_time < cd->nextcheck)
270 		cd->nextcheck = h->expiry_time;
271 	return atomic_dec_and_test(&h->refcnt);
272 }
273 
274 extern void cache_init(struct cache_head *h);
275 extern void cache_fresh(struct cache_detail *detail,
276 			struct cache_head *head, time_t expiry);
277 extern int cache_check(struct cache_detail *detail,
278 		       struct cache_head *h, struct cache_req *rqstp);
279 extern void cache_flush(void);
280 extern void cache_purge(struct cache_detail *detail);
281 #define NEVER (0x7FFFFFFF)
282 extern void cache_register(struct cache_detail *cd);
283 extern int cache_unregister(struct cache_detail *cd);
284 
285 extern void qword_add(char **bpp, int *lp, char *str);
286 extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
287 extern int qword_get(char **bpp, char *dest, int bufsize);
288 
289 static inline int get_int(char **bpp, int *anint)
290 {
291 	char buf[50];
292 	char *ep;
293 	int rv;
294 	int len = qword_get(bpp, buf, 50);
295 	if (len < 0) return -EINVAL;
296 	if (len ==0) return -ENOENT;
297 	rv = simple_strtol(buf, &ep, 0);
298 	if (*ep) return -EINVAL;
299 	*anint = rv;
300 	return 0;
301 }
302 
303 static inline time_t get_expiry(char **bpp)
304 {
305 	int rv;
306 	if (get_int(bpp, &rv))
307 		return 0;
308 	if (rv < 0)
309 		return 0;
310 	return rv;
311 }
312 
313 #endif /*  _LINUX_SUNRPC_CACHE_H_ */
314