xref: /linux-6.15/include/linux/percpu.h (revision 9d54c8a3)
1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
3 
4 #include <linux/mmdebug.h>
5 #include <linux/preempt.h>
6 #include <linux/smp.h>
7 #include <linux/cpumask.h>
8 #include <linux/pfn.h>
9 #include <linux/init.h>
10 
11 #include <asm/percpu.h>
12 
13 /* enough to cover all DEFINE_PER_CPUs in modules */
14 #ifdef CONFIG_MODULES
15 #define PERCPU_MODULE_RESERVE		(8 << 10)
16 #else
17 #define PERCPU_MODULE_RESERVE		0
18 #endif
19 
20 #ifndef PERCPU_ENOUGH_ROOM
21 #define PERCPU_ENOUGH_ROOM						\
22 	(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +	\
23 	 PERCPU_MODULE_RESERVE)
24 #endif
25 
26 /*
27  * Must be an lvalue. Since @var must be a simple identifier,
28  * we force a syntax error here if it isn't.
29  */
30 #define get_cpu_var(var) (*({				\
31 	preempt_disable();				\
32 	&__get_cpu_var(var); }))
33 
34 /*
35  * The weird & is necessary because sparse considers (void)(var) to be
36  * a direct dereference of percpu variable (var).
37  */
38 #define put_cpu_var(var) do {				\
39 	(void)&(var);					\
40 	preempt_enable();				\
41 } while (0)
42 
43 #define get_cpu_ptr(var) ({				\
44 	preempt_disable();				\
45 	this_cpu_ptr(var); })
46 
47 #define put_cpu_ptr(var) do {				\
48 	(void)(var);					\
49 	preempt_enable();				\
50 } while (0)
51 
52 /* minimum unit size, also is the maximum supported allocation size */
53 #define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
54 
55 /*
56  * Percpu allocator can serve percpu allocations before slab is
57  * initialized which allows slab to depend on the percpu allocator.
58  * The following two parameters decide how much resource to
59  * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or
60  * larger than PERCPU_DYNAMIC_EARLY_SIZE.
61  */
62 #define PERCPU_DYNAMIC_EARLY_SLOTS	128
63 #define PERCPU_DYNAMIC_EARLY_SIZE	(12 << 10)
64 
65 /*
66  * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
67  * back on the first chunk for dynamic percpu allocation if arch is
68  * manually allocating and mapping it for faster access (as a part of
69  * large page mapping for example).
70  *
71  * The following values give between one and two pages of free space
72  * after typical minimal boot (2-way SMP, single disk and NIC) with
73  * both defconfig and a distro config on x86_64 and 32.  More
74  * intelligent way to determine this would be nice.
75  */
76 #if BITS_PER_LONG > 32
77 #define PERCPU_DYNAMIC_RESERVE		(20 << 10)
78 #else
79 #define PERCPU_DYNAMIC_RESERVE		(12 << 10)
80 #endif
81 
82 extern void *pcpu_base_addr;
83 extern const unsigned long *pcpu_unit_offsets;
84 
85 struct pcpu_group_info {
86 	int			nr_units;	/* aligned # of units */
87 	unsigned long		base_offset;	/* base address offset */
88 	unsigned int		*cpu_map;	/* unit->cpu map, empty
89 						 * entries contain NR_CPUS */
90 };
91 
92 struct pcpu_alloc_info {
93 	size_t			static_size;
94 	size_t			reserved_size;
95 	size_t			dyn_size;
96 	size_t			unit_size;
97 	size_t			atom_size;
98 	size_t			alloc_size;
99 	size_t			__ai_size;	/* internal, don't use */
100 	int			nr_groups;	/* 0 if grouping unnecessary */
101 	struct pcpu_group_info	groups[];
102 };
103 
104 enum pcpu_fc {
105 	PCPU_FC_AUTO,
106 	PCPU_FC_EMBED,
107 	PCPU_FC_PAGE,
108 
109 	PCPU_FC_NR,
110 };
111 extern const char * const pcpu_fc_names[PCPU_FC_NR];
112 
113 extern enum pcpu_fc pcpu_chosen_fc;
114 
115 typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
116 				     size_t align);
117 typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
118 typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
119 typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
120 
121 extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
122 							     int nr_units);
123 extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
124 
125 extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
126 					 void *base_addr);
127 
128 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
129 extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
130 				size_t atom_size,
131 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
132 				pcpu_fc_alloc_fn_t alloc_fn,
133 				pcpu_fc_free_fn_t free_fn);
134 #endif
135 
136 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
137 extern int __init pcpu_page_first_chunk(size_t reserved_size,
138 				pcpu_fc_alloc_fn_t alloc_fn,
139 				pcpu_fc_free_fn_t free_fn,
140 				pcpu_fc_populate_pte_fn_t populate_pte_fn);
141 #endif
142 
143 /*
144  * Use this to get to a cpu's version of the per-cpu object
145  * dynamically allocated. Non-atomic access to the current CPU's
146  * version should probably be combined with get_cpu()/put_cpu().
147  */
148 #ifdef CONFIG_SMP
149 #define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
150 #else
151 #define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
152 #endif
153 
154 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
155 extern bool is_kernel_percpu_address(unsigned long addr);
156 
157 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
158 extern void __init setup_per_cpu_areas(void);
159 #endif
160 extern void __init percpu_init_late(void);
161 
162 extern void __percpu *__alloc_percpu(size_t size, size_t align);
163 extern void free_percpu(void __percpu *__pdata);
164 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
165 
166 #define alloc_percpu(type)	\
167 	(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
168 
169 /*
170  * Branching function to split up a function into a set of functions that
171  * are called for different scalar sizes of the objects handled.
172  */
173 
174 extern void __bad_size_call_parameter(void);
175 
176 #define __pcpu_size_call_return(stem, variable)				\
177 ({	typeof(variable) pscr_ret__;					\
178 	__verify_pcpu_ptr(&(variable));					\
179 	switch(sizeof(variable)) {					\
180 	case 1: pscr_ret__ = stem##1(variable);break;			\
181 	case 2: pscr_ret__ = stem##2(variable);break;			\
182 	case 4: pscr_ret__ = stem##4(variable);break;			\
183 	case 8: pscr_ret__ = stem##8(variable);break;			\
184 	default:							\
185 		__bad_size_call_parameter();break;			\
186 	}								\
187 	pscr_ret__;							\
188 })
189 
190 #define __pcpu_size_call_return2(stem, variable, ...)			\
191 ({									\
192 	typeof(variable) pscr2_ret__;					\
193 	__verify_pcpu_ptr(&(variable));					\
194 	switch(sizeof(variable)) {					\
195 	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
196 	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
197 	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
198 	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
199 	default:							\
200 		__bad_size_call_parameter(); break;			\
201 	}								\
202 	pscr2_ret__;							\
203 })
204 
205 /*
206  * Special handling for cmpxchg_double.  cmpxchg_double is passed two
207  * percpu variables.  The first has to be aligned to a double word
208  * boundary and the second has to follow directly thereafter.
209  * We enforce this on all architectures even if they don't support
210  * a double cmpxchg instruction, since it's a cheap requirement, and it
211  * avoids breaking the requirement for architectures with the instruction.
212  */
213 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)		\
214 ({									\
215 	bool pdcrb_ret__;						\
216 	__verify_pcpu_ptr(&pcp1);					\
217 	BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));			\
218 	VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));		\
219 	VM_BUG_ON((unsigned long)(&pcp2) !=				\
220 		  (unsigned long)(&pcp1) + sizeof(pcp1));		\
221 	switch(sizeof(pcp1)) {						\
222 	case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;	\
223 	case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;	\
224 	case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;	\
225 	case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;	\
226 	default:							\
227 		__bad_size_call_parameter(); break;			\
228 	}								\
229 	pdcrb_ret__;							\
230 })
231 
232 #define __pcpu_size_call(stem, variable, ...)				\
233 do {									\
234 	__verify_pcpu_ptr(&(variable));					\
235 	switch(sizeof(variable)) {					\
236 		case 1: stem##1(variable, __VA_ARGS__);break;		\
237 		case 2: stem##2(variable, __VA_ARGS__);break;		\
238 		case 4: stem##4(variable, __VA_ARGS__);break;		\
239 		case 8: stem##8(variable, __VA_ARGS__);break;		\
240 		default: 						\
241 			__bad_size_call_parameter();break;		\
242 	}								\
243 } while (0)
244 
245 /*
246  * Optimized manipulation for memory allocated through the per cpu
247  * allocator or for addresses of per cpu variables.
248  *
249  * These operation guarantee exclusivity of access for other operations
250  * on the *same* processor. The assumption is that per cpu data is only
251  * accessed by a single processor instance (the current one).
252  *
253  * The first group is used for accesses that must be done in a
254  * preemption safe way since we know that the context is not preempt
255  * safe. Interrupts may occur. If the interrupt modifies the variable
256  * too then RMW actions will not be reliable.
257  *
258  * The arch code can provide optimized functions in two ways:
259  *
260  * 1. Override the function completely. F.e. define this_cpu_add().
261  *    The arch must then ensure that the various scalar format passed
262  *    are handled correctly.
263  *
264  * 2. Provide functions for certain scalar sizes. F.e. provide
265  *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
266  *    sized RMW actions. If arch code does not provide operations for
267  *    a scalar size then the fallback in the generic code will be
268  *    used.
269  */
270 
271 #define _this_cpu_generic_read(pcp)					\
272 ({	typeof(pcp) ret__;						\
273 	preempt_disable();						\
274 	ret__ = *this_cpu_ptr(&(pcp));					\
275 	preempt_enable();						\
276 	ret__;								\
277 })
278 
279 #ifndef this_cpu_read
280 # ifndef this_cpu_read_1
281 #  define this_cpu_read_1(pcp)	_this_cpu_generic_read(pcp)
282 # endif
283 # ifndef this_cpu_read_2
284 #  define this_cpu_read_2(pcp)	_this_cpu_generic_read(pcp)
285 # endif
286 # ifndef this_cpu_read_4
287 #  define this_cpu_read_4(pcp)	_this_cpu_generic_read(pcp)
288 # endif
289 # ifndef this_cpu_read_8
290 #  define this_cpu_read_8(pcp)	_this_cpu_generic_read(pcp)
291 # endif
292 # define this_cpu_read(pcp)	__pcpu_size_call_return(this_cpu_read_, (pcp))
293 #endif
294 
295 #define _this_cpu_generic_to_op(pcp, val, op)				\
296 do {									\
297 	unsigned long flags;						\
298 	raw_local_irq_save(flags);					\
299 	*__this_cpu_ptr(&(pcp)) op val;					\
300 	raw_local_irq_restore(flags);					\
301 } while (0)
302 
303 #ifndef this_cpu_write
304 # ifndef this_cpu_write_1
305 #  define this_cpu_write_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
306 # endif
307 # ifndef this_cpu_write_2
308 #  define this_cpu_write_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
309 # endif
310 # ifndef this_cpu_write_4
311 #  define this_cpu_write_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
312 # endif
313 # ifndef this_cpu_write_8
314 #  define this_cpu_write_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
315 # endif
316 # define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, (pcp), (val))
317 #endif
318 
319 #ifndef this_cpu_add
320 # ifndef this_cpu_add_1
321 #  define this_cpu_add_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
322 # endif
323 # ifndef this_cpu_add_2
324 #  define this_cpu_add_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
325 # endif
326 # ifndef this_cpu_add_4
327 #  define this_cpu_add_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
328 # endif
329 # ifndef this_cpu_add_8
330 #  define this_cpu_add_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
331 # endif
332 # define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, (pcp), (val))
333 #endif
334 
335 #ifndef this_cpu_sub
336 # define this_cpu_sub(pcp, val)		this_cpu_add((pcp), -(typeof(pcp))(val))
337 #endif
338 
339 #ifndef this_cpu_inc
340 # define this_cpu_inc(pcp)		this_cpu_add((pcp), 1)
341 #endif
342 
343 #ifndef this_cpu_dec
344 # define this_cpu_dec(pcp)		this_cpu_sub((pcp), 1)
345 #endif
346 
347 #ifndef this_cpu_and
348 # ifndef this_cpu_and_1
349 #  define this_cpu_and_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
350 # endif
351 # ifndef this_cpu_and_2
352 #  define this_cpu_and_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
353 # endif
354 # ifndef this_cpu_and_4
355 #  define this_cpu_and_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
356 # endif
357 # ifndef this_cpu_and_8
358 #  define this_cpu_and_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
359 # endif
360 # define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, (pcp), (val))
361 #endif
362 
363 #ifndef this_cpu_or
364 # ifndef this_cpu_or_1
365 #  define this_cpu_or_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
366 # endif
367 # ifndef this_cpu_or_2
368 #  define this_cpu_or_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
369 # endif
370 # ifndef this_cpu_or_4
371 #  define this_cpu_or_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
372 # endif
373 # ifndef this_cpu_or_8
374 #  define this_cpu_or_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
375 # endif
376 # define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
377 #endif
378 
379 #define _this_cpu_generic_add_return(pcp, val)				\
380 ({									\
381 	typeof(pcp) ret__;						\
382 	unsigned long flags;						\
383 	raw_local_irq_save(flags);					\
384 	__this_cpu_add(pcp, val);					\
385 	ret__ = __this_cpu_read(pcp);					\
386 	raw_local_irq_restore(flags);					\
387 	ret__;								\
388 })
389 
390 #ifndef this_cpu_add_return
391 # ifndef this_cpu_add_return_1
392 #  define this_cpu_add_return_1(pcp, val)	_this_cpu_generic_add_return(pcp, val)
393 # endif
394 # ifndef this_cpu_add_return_2
395 #  define this_cpu_add_return_2(pcp, val)	_this_cpu_generic_add_return(pcp, val)
396 # endif
397 # ifndef this_cpu_add_return_4
398 #  define this_cpu_add_return_4(pcp, val)	_this_cpu_generic_add_return(pcp, val)
399 # endif
400 # ifndef this_cpu_add_return_8
401 #  define this_cpu_add_return_8(pcp, val)	_this_cpu_generic_add_return(pcp, val)
402 # endif
403 # define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
404 #endif
405 
406 #define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(typeof(pcp))(val))
407 #define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
408 #define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
409 
410 #define _this_cpu_generic_xchg(pcp, nval)				\
411 ({	typeof(pcp) ret__;						\
412 	unsigned long flags;						\
413 	raw_local_irq_save(flags);					\
414 	ret__ = __this_cpu_read(pcp);					\
415 	__this_cpu_write(pcp, nval);					\
416 	raw_local_irq_restore(flags);					\
417 	ret__;								\
418 })
419 
420 #ifndef this_cpu_xchg
421 # ifndef this_cpu_xchg_1
422 #  define this_cpu_xchg_1(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
423 # endif
424 # ifndef this_cpu_xchg_2
425 #  define this_cpu_xchg_2(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
426 # endif
427 # ifndef this_cpu_xchg_4
428 #  define this_cpu_xchg_4(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
429 # endif
430 # ifndef this_cpu_xchg_8
431 #  define this_cpu_xchg_8(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
432 # endif
433 # define this_cpu_xchg(pcp, nval)	\
434 	__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
435 #endif
436 
437 #define _this_cpu_generic_cmpxchg(pcp, oval, nval)			\
438 ({									\
439 	typeof(pcp) ret__;						\
440 	unsigned long flags;						\
441 	raw_local_irq_save(flags);					\
442 	ret__ = __this_cpu_read(pcp);					\
443 	if (ret__ == (oval))						\
444 		__this_cpu_write(pcp, nval);				\
445 	raw_local_irq_restore(flags);					\
446 	ret__;								\
447 })
448 
449 #ifndef this_cpu_cmpxchg
450 # ifndef this_cpu_cmpxchg_1
451 #  define this_cpu_cmpxchg_1(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
452 # endif
453 # ifndef this_cpu_cmpxchg_2
454 #  define this_cpu_cmpxchg_2(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
455 # endif
456 # ifndef this_cpu_cmpxchg_4
457 #  define this_cpu_cmpxchg_4(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
458 # endif
459 # ifndef this_cpu_cmpxchg_8
460 #  define this_cpu_cmpxchg_8(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
461 # endif
462 # define this_cpu_cmpxchg(pcp, oval, nval)	\
463 	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
464 #endif
465 
466 /*
467  * cmpxchg_double replaces two adjacent scalars at once.  The first
468  * two parameters are per cpu variables which have to be of the same
469  * size.  A truth value is returned to indicate success or failure
470  * (since a double register result is difficult to handle).  There is
471  * very limited hardware support for these operations, so only certain
472  * sizes may work.
473  */
474 #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
475 ({									\
476 	int ret__;							\
477 	unsigned long flags;						\
478 	raw_local_irq_save(flags);					\
479 	ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,		\
480 			oval1, oval2, nval1, nval2);			\
481 	raw_local_irq_restore(flags);					\
482 	ret__;								\
483 })
484 
485 #ifndef this_cpu_cmpxchg_double
486 # ifndef this_cpu_cmpxchg_double_1
487 #  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
488 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
489 # endif
490 # ifndef this_cpu_cmpxchg_double_2
491 #  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
492 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
493 # endif
494 # ifndef this_cpu_cmpxchg_double_4
495 #  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
496 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
497 # endif
498 # ifndef this_cpu_cmpxchg_double_8
499 #  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
500 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
501 # endif
502 # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
503 	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
504 #endif
505 
506 /*
507  * Generic percpu operations for context that are safe from preemption/interrupts.
508  * Either we do not care about races or the caller has the
509  * responsibility of handling preemption/interrupt issues. Arch code can still
510  * override these instructions since the arch per cpu code may be more
511  * efficient and may actually get race freeness for free (that is the
512  * case for x86 for example).
513  *
514  * If there is no other protection through preempt disable and/or
515  * disabling interupts then one of these RMW operations can show unexpected
516  * behavior because the execution thread was rescheduled on another processor
517  * or an interrupt occurred and the same percpu variable was modified from
518  * the interrupt context.
519  */
520 #ifndef __this_cpu_read
521 # ifndef __this_cpu_read_1
522 #  define __this_cpu_read_1(pcp)	(*__this_cpu_ptr(&(pcp)))
523 # endif
524 # ifndef __this_cpu_read_2
525 #  define __this_cpu_read_2(pcp)	(*__this_cpu_ptr(&(pcp)))
526 # endif
527 # ifndef __this_cpu_read_4
528 #  define __this_cpu_read_4(pcp)	(*__this_cpu_ptr(&(pcp)))
529 # endif
530 # ifndef __this_cpu_read_8
531 #  define __this_cpu_read_8(pcp)	(*__this_cpu_ptr(&(pcp)))
532 # endif
533 # define __this_cpu_read(pcp)	__pcpu_size_call_return(__this_cpu_read_, (pcp))
534 #endif
535 
536 #define __this_cpu_generic_to_op(pcp, val, op)				\
537 do {									\
538 	*__this_cpu_ptr(&(pcp)) op val;					\
539 } while (0)
540 
541 #ifndef __this_cpu_write
542 # ifndef __this_cpu_write_1
543 #  define __this_cpu_write_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
544 # endif
545 # ifndef __this_cpu_write_2
546 #  define __this_cpu_write_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
547 # endif
548 # ifndef __this_cpu_write_4
549 #  define __this_cpu_write_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
550 # endif
551 # ifndef __this_cpu_write_8
552 #  define __this_cpu_write_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
553 # endif
554 # define __this_cpu_write(pcp, val)	__pcpu_size_call(__this_cpu_write_, (pcp), (val))
555 #endif
556 
557 #ifndef __this_cpu_add
558 # ifndef __this_cpu_add_1
559 #  define __this_cpu_add_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
560 # endif
561 # ifndef __this_cpu_add_2
562 #  define __this_cpu_add_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
563 # endif
564 # ifndef __this_cpu_add_4
565 #  define __this_cpu_add_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
566 # endif
567 # ifndef __this_cpu_add_8
568 #  define __this_cpu_add_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
569 # endif
570 # define __this_cpu_add(pcp, val)	__pcpu_size_call(__this_cpu_add_, (pcp), (val))
571 #endif
572 
573 #ifndef __this_cpu_sub
574 # define __this_cpu_sub(pcp, val)	__this_cpu_add((pcp), -(typeof(pcp))(val))
575 #endif
576 
577 #ifndef __this_cpu_inc
578 # define __this_cpu_inc(pcp)		__this_cpu_add((pcp), 1)
579 #endif
580 
581 #ifndef __this_cpu_dec
582 # define __this_cpu_dec(pcp)		__this_cpu_sub((pcp), 1)
583 #endif
584 
585 #ifndef __this_cpu_and
586 # ifndef __this_cpu_and_1
587 #  define __this_cpu_and_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
588 # endif
589 # ifndef __this_cpu_and_2
590 #  define __this_cpu_and_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
591 # endif
592 # ifndef __this_cpu_and_4
593 #  define __this_cpu_and_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
594 # endif
595 # ifndef __this_cpu_and_8
596 #  define __this_cpu_and_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
597 # endif
598 # define __this_cpu_and(pcp, val)	__pcpu_size_call(__this_cpu_and_, (pcp), (val))
599 #endif
600 
601 #ifndef __this_cpu_or
602 # ifndef __this_cpu_or_1
603 #  define __this_cpu_or_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
604 # endif
605 # ifndef __this_cpu_or_2
606 #  define __this_cpu_or_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
607 # endif
608 # ifndef __this_cpu_or_4
609 #  define __this_cpu_or_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
610 # endif
611 # ifndef __this_cpu_or_8
612 #  define __this_cpu_or_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
613 # endif
614 # define __this_cpu_or(pcp, val)	__pcpu_size_call(__this_cpu_or_, (pcp), (val))
615 #endif
616 
617 #define __this_cpu_generic_add_return(pcp, val)				\
618 ({									\
619 	__this_cpu_add(pcp, val);					\
620 	__this_cpu_read(pcp);						\
621 })
622 
623 #ifndef __this_cpu_add_return
624 # ifndef __this_cpu_add_return_1
625 #  define __this_cpu_add_return_1(pcp, val)	__this_cpu_generic_add_return(pcp, val)
626 # endif
627 # ifndef __this_cpu_add_return_2
628 #  define __this_cpu_add_return_2(pcp, val)	__this_cpu_generic_add_return(pcp, val)
629 # endif
630 # ifndef __this_cpu_add_return_4
631 #  define __this_cpu_add_return_4(pcp, val)	__this_cpu_generic_add_return(pcp, val)
632 # endif
633 # ifndef __this_cpu_add_return_8
634 #  define __this_cpu_add_return_8(pcp, val)	__this_cpu_generic_add_return(pcp, val)
635 # endif
636 # define __this_cpu_add_return(pcp, val)	\
637 	__pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
638 #endif
639 
640 #define __this_cpu_sub_return(pcp, val)	__this_cpu_add_return(pcp, -(typeof(pcp))(val))
641 #define __this_cpu_inc_return(pcp)	__this_cpu_add_return(pcp, 1)
642 #define __this_cpu_dec_return(pcp)	__this_cpu_add_return(pcp, -1)
643 
644 #define __this_cpu_generic_xchg(pcp, nval)				\
645 ({	typeof(pcp) ret__;						\
646 	ret__ = __this_cpu_read(pcp);					\
647 	__this_cpu_write(pcp, nval);					\
648 	ret__;								\
649 })
650 
651 #ifndef __this_cpu_xchg
652 # ifndef __this_cpu_xchg_1
653 #  define __this_cpu_xchg_1(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
654 # endif
655 # ifndef __this_cpu_xchg_2
656 #  define __this_cpu_xchg_2(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
657 # endif
658 # ifndef __this_cpu_xchg_4
659 #  define __this_cpu_xchg_4(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
660 # endif
661 # ifndef __this_cpu_xchg_8
662 #  define __this_cpu_xchg_8(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
663 # endif
664 # define __this_cpu_xchg(pcp, nval)	\
665 	__pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
666 #endif
667 
668 #define __this_cpu_generic_cmpxchg(pcp, oval, nval)			\
669 ({									\
670 	typeof(pcp) ret__;						\
671 	ret__ = __this_cpu_read(pcp);					\
672 	if (ret__ == (oval))						\
673 		__this_cpu_write(pcp, nval);				\
674 	ret__;								\
675 })
676 
677 #ifndef __this_cpu_cmpxchg
678 # ifndef __this_cpu_cmpxchg_1
679 #  define __this_cpu_cmpxchg_1(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
680 # endif
681 # ifndef __this_cpu_cmpxchg_2
682 #  define __this_cpu_cmpxchg_2(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
683 # endif
684 # ifndef __this_cpu_cmpxchg_4
685 #  define __this_cpu_cmpxchg_4(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
686 # endif
687 # ifndef __this_cpu_cmpxchg_8
688 #  define __this_cpu_cmpxchg_8(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
689 # endif
690 # define __this_cpu_cmpxchg(pcp, oval, nval)	\
691 	__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
692 #endif
693 
694 #define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
695 ({									\
696 	int __ret = 0;							\
697 	if (__this_cpu_read(pcp1) == (oval1) &&				\
698 			 __this_cpu_read(pcp2)  == (oval2)) {		\
699 		__this_cpu_write(pcp1, (nval1));			\
700 		__this_cpu_write(pcp2, (nval2));			\
701 		__ret = 1;						\
702 	}								\
703 	(__ret);							\
704 })
705 
706 #ifndef __this_cpu_cmpxchg_double
707 # ifndef __this_cpu_cmpxchg_double_1
708 #  define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
709 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
710 # endif
711 # ifndef __this_cpu_cmpxchg_double_2
712 #  define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
713 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
714 # endif
715 # ifndef __this_cpu_cmpxchg_double_4
716 #  define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
717 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
718 # endif
719 # ifndef __this_cpu_cmpxchg_double_8
720 #  define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
721 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
722 # endif
723 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
724 	__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
725 #endif
726 
727 #endif /* __LINUX_PERCPU_H */
728