xref: /linux-6.15/include/linux/percpu.h (revision e756bc56)
1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
3 
4 #include <linux/preempt.h>
5 #include <linux/smp.h>
6 #include <linux/cpumask.h>
7 #include <linux/pfn.h>
8 #include <linux/init.h>
9 
10 #include <asm/percpu.h>
11 
12 /* enough to cover all DEFINE_PER_CPUs in modules */
13 #ifdef CONFIG_MODULES
14 #define PERCPU_MODULE_RESERVE		(8 << 10)
15 #else
16 #define PERCPU_MODULE_RESERVE		0
17 #endif
18 
19 #ifndef PERCPU_ENOUGH_ROOM
20 #define PERCPU_ENOUGH_ROOM						\
21 	(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +	\
22 	 PERCPU_MODULE_RESERVE)
23 #endif
24 
25 /*
26  * Must be an lvalue. Since @var must be a simple identifier,
27  * we force a syntax error here if it isn't.
28  */
29 #define get_cpu_var(var) (*({				\
30 	preempt_disable();				\
31 	&__get_cpu_var(var); }))
32 
33 /*
34  * The weird & is necessary because sparse considers (void)(var) to be
35  * a direct dereference of percpu variable (var).
36  */
37 #define put_cpu_var(var) do {				\
38 	(void)&(var);					\
39 	preempt_enable();				\
40 } while (0)
41 
42 #define get_cpu_ptr(var) ({				\
43 	preempt_disable();				\
44 	this_cpu_ptr(var); })
45 
46 #define put_cpu_ptr(var) do {				\
47 	(void)(var);					\
48 	preempt_enable();				\
49 } while (0)
50 
51 /* minimum unit size, also is the maximum supported allocation size */
52 #define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
53 
54 /*
55  * Percpu allocator can serve percpu allocations before slab is
56  * initialized which allows slab to depend on the percpu allocator.
57  * The following two parameters decide how much resource to
58  * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or
59  * larger than PERCPU_DYNAMIC_EARLY_SIZE.
60  */
61 #define PERCPU_DYNAMIC_EARLY_SLOTS	128
62 #define PERCPU_DYNAMIC_EARLY_SIZE	(12 << 10)
63 
64 /*
65  * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
66  * back on the first chunk for dynamic percpu allocation if arch is
67  * manually allocating and mapping it for faster access (as a part of
68  * large page mapping for example).
69  *
70  * The following values give between one and two pages of free space
71  * after typical minimal boot (2-way SMP, single disk and NIC) with
72  * both defconfig and a distro config on x86_64 and 32.  More
73  * intelligent way to determine this would be nice.
74  */
75 #if BITS_PER_LONG > 32
76 #define PERCPU_DYNAMIC_RESERVE		(20 << 10)
77 #else
78 #define PERCPU_DYNAMIC_RESERVE		(12 << 10)
79 #endif
80 
81 extern void *pcpu_base_addr;
82 extern const unsigned long *pcpu_unit_offsets;
83 
84 struct pcpu_group_info {
85 	int			nr_units;	/* aligned # of units */
86 	unsigned long		base_offset;	/* base address offset */
87 	unsigned int		*cpu_map;	/* unit->cpu map, empty
88 						 * entries contain NR_CPUS */
89 };
90 
91 struct pcpu_alloc_info {
92 	size_t			static_size;
93 	size_t			reserved_size;
94 	size_t			dyn_size;
95 	size_t			unit_size;
96 	size_t			atom_size;
97 	size_t			alloc_size;
98 	size_t			__ai_size;	/* internal, don't use */
99 	int			nr_groups;	/* 0 if grouping unnecessary */
100 	struct pcpu_group_info	groups[];
101 };
102 
103 enum pcpu_fc {
104 	PCPU_FC_AUTO,
105 	PCPU_FC_EMBED,
106 	PCPU_FC_PAGE,
107 
108 	PCPU_FC_NR,
109 };
110 extern const char * const pcpu_fc_names[PCPU_FC_NR];
111 
112 extern enum pcpu_fc pcpu_chosen_fc;
113 
114 typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
115 				     size_t align);
116 typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
117 typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
118 typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
119 
120 extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
121 							     int nr_units);
122 extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
123 
124 extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
125 					 void *base_addr);
126 
127 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
128 extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
129 				size_t atom_size,
130 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
131 				pcpu_fc_alloc_fn_t alloc_fn,
132 				pcpu_fc_free_fn_t free_fn);
133 #endif
134 
135 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
136 extern int __init pcpu_page_first_chunk(size_t reserved_size,
137 				pcpu_fc_alloc_fn_t alloc_fn,
138 				pcpu_fc_free_fn_t free_fn,
139 				pcpu_fc_populate_pte_fn_t populate_pte_fn);
140 #endif
141 
142 /*
143  * Use this to get to a cpu's version of the per-cpu object
144  * dynamically allocated. Non-atomic access to the current CPU's
145  * version should probably be combined with get_cpu()/put_cpu().
146  */
147 #ifdef CONFIG_SMP
148 #define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
149 #else
150 #define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
151 #endif
152 
153 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
154 extern bool is_kernel_percpu_address(unsigned long addr);
155 
156 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
157 extern void __init setup_per_cpu_areas(void);
158 #endif
159 extern void __init percpu_init_late(void);
160 
161 extern void __percpu *__alloc_percpu(size_t size, size_t align);
162 extern void free_percpu(void __percpu *__pdata);
163 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
164 
165 #define alloc_percpu(type)	\
166 	(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
167 
168 /*
169  * Branching function to split up a function into a set of functions that
170  * are called for different scalar sizes of the objects handled.
171  */
172 
173 extern void __bad_size_call_parameter(void);
174 
175 #define __pcpu_size_call_return(stem, variable)				\
176 ({	typeof(variable) pscr_ret__;					\
177 	__verify_pcpu_ptr(&(variable));					\
178 	switch(sizeof(variable)) {					\
179 	case 1: pscr_ret__ = stem##1(variable);break;			\
180 	case 2: pscr_ret__ = stem##2(variable);break;			\
181 	case 4: pscr_ret__ = stem##4(variable);break;			\
182 	case 8: pscr_ret__ = stem##8(variable);break;			\
183 	default:							\
184 		__bad_size_call_parameter();break;			\
185 	}								\
186 	pscr_ret__;							\
187 })
188 
189 #define __pcpu_size_call_return2(stem, variable, ...)			\
190 ({									\
191 	typeof(variable) pscr2_ret__;					\
192 	__verify_pcpu_ptr(&(variable));					\
193 	switch(sizeof(variable)) {					\
194 	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
195 	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
196 	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
197 	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
198 	default:							\
199 		__bad_size_call_parameter(); break;			\
200 	}								\
201 	pscr2_ret__;							\
202 })
203 
204 /*
205  * Special handling for cmpxchg_double.  cmpxchg_double is passed two
206  * percpu variables.  The first has to be aligned to a double word
207  * boundary and the second has to follow directly thereafter.
208  * We enforce this on all architectures even if they don't support
209  * a double cmpxchg instruction, since it's a cheap requirement, and it
210  * avoids breaking the requirement for architectures with the instruction.
211  */
212 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)		\
213 ({									\
214 	bool pdcrb_ret__;						\
215 	__verify_pcpu_ptr(&pcp1);					\
216 	BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));			\
217 	VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));		\
218 	VM_BUG_ON((unsigned long)(&pcp2) !=				\
219 		  (unsigned long)(&pcp1) + sizeof(pcp1));		\
220 	switch(sizeof(pcp1)) {						\
221 	case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;	\
222 	case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;	\
223 	case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;	\
224 	case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;	\
225 	default:							\
226 		__bad_size_call_parameter(); break;			\
227 	}								\
228 	pdcrb_ret__;							\
229 })
230 
231 #define __pcpu_size_call(stem, variable, ...)				\
232 do {									\
233 	__verify_pcpu_ptr(&(variable));					\
234 	switch(sizeof(variable)) {					\
235 		case 1: stem##1(variable, __VA_ARGS__);break;		\
236 		case 2: stem##2(variable, __VA_ARGS__);break;		\
237 		case 4: stem##4(variable, __VA_ARGS__);break;		\
238 		case 8: stem##8(variable, __VA_ARGS__);break;		\
239 		default: 						\
240 			__bad_size_call_parameter();break;		\
241 	}								\
242 } while (0)
243 
244 /*
245  * Optimized manipulation for memory allocated through the per cpu
246  * allocator or for addresses of per cpu variables.
247  *
248  * These operation guarantee exclusivity of access for other operations
249  * on the *same* processor. The assumption is that per cpu data is only
250  * accessed by a single processor instance (the current one).
251  *
252  * The first group is used for accesses that must be done in a
253  * preemption safe way since we know that the context is not preempt
254  * safe. Interrupts may occur. If the interrupt modifies the variable
255  * too then RMW actions will not be reliable.
256  *
257  * The arch code can provide optimized functions in two ways:
258  *
259  * 1. Override the function completely. F.e. define this_cpu_add().
260  *    The arch must then ensure that the various scalar format passed
261  *    are handled correctly.
262  *
263  * 2. Provide functions for certain scalar sizes. F.e. provide
264  *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
265  *    sized RMW actions. If arch code does not provide operations for
266  *    a scalar size then the fallback in the generic code will be
267  *    used.
268  */
269 
270 #define _this_cpu_generic_read(pcp)					\
271 ({	typeof(pcp) ret__;						\
272 	preempt_disable();						\
273 	ret__ = *this_cpu_ptr(&(pcp));					\
274 	preempt_enable();						\
275 	ret__;								\
276 })
277 
278 #ifndef this_cpu_read
279 # ifndef this_cpu_read_1
280 #  define this_cpu_read_1(pcp)	_this_cpu_generic_read(pcp)
281 # endif
282 # ifndef this_cpu_read_2
283 #  define this_cpu_read_2(pcp)	_this_cpu_generic_read(pcp)
284 # endif
285 # ifndef this_cpu_read_4
286 #  define this_cpu_read_4(pcp)	_this_cpu_generic_read(pcp)
287 # endif
288 # ifndef this_cpu_read_8
289 #  define this_cpu_read_8(pcp)	_this_cpu_generic_read(pcp)
290 # endif
291 # define this_cpu_read(pcp)	__pcpu_size_call_return(this_cpu_read_, (pcp))
292 #endif
293 
294 #define _this_cpu_generic_to_op(pcp, val, op)				\
295 do {									\
296 	unsigned long flags;						\
297 	raw_local_irq_save(flags);					\
298 	*__this_cpu_ptr(&(pcp)) op val;					\
299 	raw_local_irq_restore(flags);					\
300 } while (0)
301 
302 #ifndef this_cpu_write
303 # ifndef this_cpu_write_1
304 #  define this_cpu_write_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
305 # endif
306 # ifndef this_cpu_write_2
307 #  define this_cpu_write_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
308 # endif
309 # ifndef this_cpu_write_4
310 #  define this_cpu_write_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
311 # endif
312 # ifndef this_cpu_write_8
313 #  define this_cpu_write_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
314 # endif
315 # define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, (pcp), (val))
316 #endif
317 
318 #ifndef this_cpu_add
319 # ifndef this_cpu_add_1
320 #  define this_cpu_add_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
321 # endif
322 # ifndef this_cpu_add_2
323 #  define this_cpu_add_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
324 # endif
325 # ifndef this_cpu_add_4
326 #  define this_cpu_add_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
327 # endif
328 # ifndef this_cpu_add_8
329 #  define this_cpu_add_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
330 # endif
331 # define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, (pcp), (val))
332 #endif
333 
334 #ifndef this_cpu_sub
335 # define this_cpu_sub(pcp, val)		this_cpu_add((pcp), -(typeof(pcp))(val))
336 #endif
337 
338 #ifndef this_cpu_inc
339 # define this_cpu_inc(pcp)		this_cpu_add((pcp), 1)
340 #endif
341 
342 #ifndef this_cpu_dec
343 # define this_cpu_dec(pcp)		this_cpu_sub((pcp), 1)
344 #endif
345 
346 #ifndef this_cpu_and
347 # ifndef this_cpu_and_1
348 #  define this_cpu_and_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
349 # endif
350 # ifndef this_cpu_and_2
351 #  define this_cpu_and_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
352 # endif
353 # ifndef this_cpu_and_4
354 #  define this_cpu_and_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
355 # endif
356 # ifndef this_cpu_and_8
357 #  define this_cpu_and_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
358 # endif
359 # define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, (pcp), (val))
360 #endif
361 
362 #ifndef this_cpu_or
363 # ifndef this_cpu_or_1
364 #  define this_cpu_or_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
365 # endif
366 # ifndef this_cpu_or_2
367 #  define this_cpu_or_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
368 # endif
369 # ifndef this_cpu_or_4
370 #  define this_cpu_or_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
371 # endif
372 # ifndef this_cpu_or_8
373 #  define this_cpu_or_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
374 # endif
375 # define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
376 #endif
377 
378 #define _this_cpu_generic_add_return(pcp, val)				\
379 ({									\
380 	typeof(pcp) ret__;						\
381 	unsigned long flags;						\
382 	raw_local_irq_save(flags);					\
383 	__this_cpu_add(pcp, val);					\
384 	ret__ = __this_cpu_read(pcp);					\
385 	raw_local_irq_restore(flags);					\
386 	ret__;								\
387 })
388 
389 #ifndef this_cpu_add_return
390 # ifndef this_cpu_add_return_1
391 #  define this_cpu_add_return_1(pcp, val)	_this_cpu_generic_add_return(pcp, val)
392 # endif
393 # ifndef this_cpu_add_return_2
394 #  define this_cpu_add_return_2(pcp, val)	_this_cpu_generic_add_return(pcp, val)
395 # endif
396 # ifndef this_cpu_add_return_4
397 #  define this_cpu_add_return_4(pcp, val)	_this_cpu_generic_add_return(pcp, val)
398 # endif
399 # ifndef this_cpu_add_return_8
400 #  define this_cpu_add_return_8(pcp, val)	_this_cpu_generic_add_return(pcp, val)
401 # endif
402 # define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
403 #endif
404 
405 #define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(typeof(pcp))(val))
406 #define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
407 #define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
408 
409 #define _this_cpu_generic_xchg(pcp, nval)				\
410 ({	typeof(pcp) ret__;						\
411 	unsigned long flags;						\
412 	raw_local_irq_save(flags);					\
413 	ret__ = __this_cpu_read(pcp);					\
414 	__this_cpu_write(pcp, nval);					\
415 	raw_local_irq_restore(flags);					\
416 	ret__;								\
417 })
418 
419 #ifndef this_cpu_xchg
420 # ifndef this_cpu_xchg_1
421 #  define this_cpu_xchg_1(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
422 # endif
423 # ifndef this_cpu_xchg_2
424 #  define this_cpu_xchg_2(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
425 # endif
426 # ifndef this_cpu_xchg_4
427 #  define this_cpu_xchg_4(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
428 # endif
429 # ifndef this_cpu_xchg_8
430 #  define this_cpu_xchg_8(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
431 # endif
432 # define this_cpu_xchg(pcp, nval)	\
433 	__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
434 #endif
435 
436 #define _this_cpu_generic_cmpxchg(pcp, oval, nval)			\
437 ({									\
438 	typeof(pcp) ret__;						\
439 	unsigned long flags;						\
440 	raw_local_irq_save(flags);					\
441 	ret__ = __this_cpu_read(pcp);					\
442 	if (ret__ == (oval))						\
443 		__this_cpu_write(pcp, nval);				\
444 	raw_local_irq_restore(flags);					\
445 	ret__;								\
446 })
447 
448 #ifndef this_cpu_cmpxchg
449 # ifndef this_cpu_cmpxchg_1
450 #  define this_cpu_cmpxchg_1(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
451 # endif
452 # ifndef this_cpu_cmpxchg_2
453 #  define this_cpu_cmpxchg_2(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
454 # endif
455 # ifndef this_cpu_cmpxchg_4
456 #  define this_cpu_cmpxchg_4(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
457 # endif
458 # ifndef this_cpu_cmpxchg_8
459 #  define this_cpu_cmpxchg_8(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
460 # endif
461 # define this_cpu_cmpxchg(pcp, oval, nval)	\
462 	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
463 #endif
464 
465 /*
466  * cmpxchg_double replaces two adjacent scalars at once.  The first
467  * two parameters are per cpu variables which have to be of the same
468  * size.  A truth value is returned to indicate success or failure
469  * (since a double register result is difficult to handle).  There is
470  * very limited hardware support for these operations, so only certain
471  * sizes may work.
472  */
473 #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
474 ({									\
475 	int ret__;							\
476 	unsigned long flags;						\
477 	raw_local_irq_save(flags);					\
478 	ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,		\
479 			oval1, oval2, nval1, nval2);			\
480 	raw_local_irq_restore(flags);					\
481 	ret__;								\
482 })
483 
484 #ifndef this_cpu_cmpxchg_double
485 # ifndef this_cpu_cmpxchg_double_1
486 #  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
487 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
488 # endif
489 # ifndef this_cpu_cmpxchg_double_2
490 #  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
491 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
492 # endif
493 # ifndef this_cpu_cmpxchg_double_4
494 #  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
495 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
496 # endif
497 # ifndef this_cpu_cmpxchg_double_8
498 #  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
499 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
500 # endif
501 # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
502 	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
503 #endif
504 
505 /*
506  * Generic percpu operations for context that are safe from preemption/interrupts.
507  * Either we do not care about races or the caller has the
508  * responsibility of handling preemption/interrupt issues. Arch code can still
509  * override these instructions since the arch per cpu code may be more
510  * efficient and may actually get race freeness for free (that is the
511  * case for x86 for example).
512  *
513  * If there is no other protection through preempt disable and/or
514  * disabling interupts then one of these RMW operations can show unexpected
515  * behavior because the execution thread was rescheduled on another processor
516  * or an interrupt occurred and the same percpu variable was modified from
517  * the interrupt context.
518  */
519 #ifndef __this_cpu_read
520 # ifndef __this_cpu_read_1
521 #  define __this_cpu_read_1(pcp)	(*__this_cpu_ptr(&(pcp)))
522 # endif
523 # ifndef __this_cpu_read_2
524 #  define __this_cpu_read_2(pcp)	(*__this_cpu_ptr(&(pcp)))
525 # endif
526 # ifndef __this_cpu_read_4
527 #  define __this_cpu_read_4(pcp)	(*__this_cpu_ptr(&(pcp)))
528 # endif
529 # ifndef __this_cpu_read_8
530 #  define __this_cpu_read_8(pcp)	(*__this_cpu_ptr(&(pcp)))
531 # endif
532 # define __this_cpu_read(pcp)	__pcpu_size_call_return(__this_cpu_read_, (pcp))
533 #endif
534 
535 #define __this_cpu_generic_to_op(pcp, val, op)				\
536 do {									\
537 	*__this_cpu_ptr(&(pcp)) op val;					\
538 } while (0)
539 
540 #ifndef __this_cpu_write
541 # ifndef __this_cpu_write_1
542 #  define __this_cpu_write_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
543 # endif
544 # ifndef __this_cpu_write_2
545 #  define __this_cpu_write_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
546 # endif
547 # ifndef __this_cpu_write_4
548 #  define __this_cpu_write_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
549 # endif
550 # ifndef __this_cpu_write_8
551 #  define __this_cpu_write_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
552 # endif
553 # define __this_cpu_write(pcp, val)	__pcpu_size_call(__this_cpu_write_, (pcp), (val))
554 #endif
555 
556 #ifndef __this_cpu_add
557 # ifndef __this_cpu_add_1
558 #  define __this_cpu_add_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
559 # endif
560 # ifndef __this_cpu_add_2
561 #  define __this_cpu_add_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
562 # endif
563 # ifndef __this_cpu_add_4
564 #  define __this_cpu_add_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
565 # endif
566 # ifndef __this_cpu_add_8
567 #  define __this_cpu_add_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
568 # endif
569 # define __this_cpu_add(pcp, val)	__pcpu_size_call(__this_cpu_add_, (pcp), (val))
570 #endif
571 
572 #ifndef __this_cpu_sub
573 # define __this_cpu_sub(pcp, val)	__this_cpu_add((pcp), -(typeof(pcp))(val))
574 #endif
575 
576 #ifndef __this_cpu_inc
577 # define __this_cpu_inc(pcp)		__this_cpu_add((pcp), 1)
578 #endif
579 
580 #ifndef __this_cpu_dec
581 # define __this_cpu_dec(pcp)		__this_cpu_sub((pcp), 1)
582 #endif
583 
584 #ifndef __this_cpu_and
585 # ifndef __this_cpu_and_1
586 #  define __this_cpu_and_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
587 # endif
588 # ifndef __this_cpu_and_2
589 #  define __this_cpu_and_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
590 # endif
591 # ifndef __this_cpu_and_4
592 #  define __this_cpu_and_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
593 # endif
594 # ifndef __this_cpu_and_8
595 #  define __this_cpu_and_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
596 # endif
597 # define __this_cpu_and(pcp, val)	__pcpu_size_call(__this_cpu_and_, (pcp), (val))
598 #endif
599 
600 #ifndef __this_cpu_or
601 # ifndef __this_cpu_or_1
602 #  define __this_cpu_or_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
603 # endif
604 # ifndef __this_cpu_or_2
605 #  define __this_cpu_or_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
606 # endif
607 # ifndef __this_cpu_or_4
608 #  define __this_cpu_or_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
609 # endif
610 # ifndef __this_cpu_or_8
611 #  define __this_cpu_or_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
612 # endif
613 # define __this_cpu_or(pcp, val)	__pcpu_size_call(__this_cpu_or_, (pcp), (val))
614 #endif
615 
616 #define __this_cpu_generic_add_return(pcp, val)				\
617 ({									\
618 	__this_cpu_add(pcp, val);					\
619 	__this_cpu_read(pcp);						\
620 })
621 
622 #ifndef __this_cpu_add_return
623 # ifndef __this_cpu_add_return_1
624 #  define __this_cpu_add_return_1(pcp, val)	__this_cpu_generic_add_return(pcp, val)
625 # endif
626 # ifndef __this_cpu_add_return_2
627 #  define __this_cpu_add_return_2(pcp, val)	__this_cpu_generic_add_return(pcp, val)
628 # endif
629 # ifndef __this_cpu_add_return_4
630 #  define __this_cpu_add_return_4(pcp, val)	__this_cpu_generic_add_return(pcp, val)
631 # endif
632 # ifndef __this_cpu_add_return_8
633 #  define __this_cpu_add_return_8(pcp, val)	__this_cpu_generic_add_return(pcp, val)
634 # endif
635 # define __this_cpu_add_return(pcp, val)	\
636 	__pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
637 #endif
638 
639 #define __this_cpu_sub_return(pcp, val)	__this_cpu_add_return(pcp, -(typeof(pcp))(val))
640 #define __this_cpu_inc_return(pcp)	__this_cpu_add_return(pcp, 1)
641 #define __this_cpu_dec_return(pcp)	__this_cpu_add_return(pcp, -1)
642 
643 #define __this_cpu_generic_xchg(pcp, nval)				\
644 ({	typeof(pcp) ret__;						\
645 	ret__ = __this_cpu_read(pcp);					\
646 	__this_cpu_write(pcp, nval);					\
647 	ret__;								\
648 })
649 
650 #ifndef __this_cpu_xchg
651 # ifndef __this_cpu_xchg_1
652 #  define __this_cpu_xchg_1(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
653 # endif
654 # ifndef __this_cpu_xchg_2
655 #  define __this_cpu_xchg_2(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
656 # endif
657 # ifndef __this_cpu_xchg_4
658 #  define __this_cpu_xchg_4(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
659 # endif
660 # ifndef __this_cpu_xchg_8
661 #  define __this_cpu_xchg_8(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
662 # endif
663 # define __this_cpu_xchg(pcp, nval)	\
664 	__pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
665 #endif
666 
667 #define __this_cpu_generic_cmpxchg(pcp, oval, nval)			\
668 ({									\
669 	typeof(pcp) ret__;						\
670 	ret__ = __this_cpu_read(pcp);					\
671 	if (ret__ == (oval))						\
672 		__this_cpu_write(pcp, nval);				\
673 	ret__;								\
674 })
675 
676 #ifndef __this_cpu_cmpxchg
677 # ifndef __this_cpu_cmpxchg_1
678 #  define __this_cpu_cmpxchg_1(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
679 # endif
680 # ifndef __this_cpu_cmpxchg_2
681 #  define __this_cpu_cmpxchg_2(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
682 # endif
683 # ifndef __this_cpu_cmpxchg_4
684 #  define __this_cpu_cmpxchg_4(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
685 # endif
686 # ifndef __this_cpu_cmpxchg_8
687 #  define __this_cpu_cmpxchg_8(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
688 # endif
689 # define __this_cpu_cmpxchg(pcp, oval, nval)	\
690 	__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
691 #endif
692 
693 #define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
694 ({									\
695 	int __ret = 0;							\
696 	if (__this_cpu_read(pcp1) == (oval1) &&				\
697 			 __this_cpu_read(pcp2)  == (oval2)) {		\
698 		__this_cpu_write(pcp1, (nval1));			\
699 		__this_cpu_write(pcp2, (nval2));			\
700 		__ret = 1;						\
701 	}								\
702 	(__ret);							\
703 })
704 
705 #ifndef __this_cpu_cmpxchg_double
706 # ifndef __this_cpu_cmpxchg_double_1
707 #  define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
708 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
709 # endif
710 # ifndef __this_cpu_cmpxchg_double_2
711 #  define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
712 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
713 # endif
714 # ifndef __this_cpu_cmpxchg_double_4
715 #  define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
716 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
717 # endif
718 # ifndef __this_cpu_cmpxchg_double_8
719 #  define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
720 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
721 # endif
722 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
723 	__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
724 #endif
725 
726 #endif /* __LINUX_PERCPU_H */
727