xref: /linux-6.15/include/linux/percpu.h (revision dec102aa)
1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
3 
4 #include <linux/mmdebug.h>
5 #include <linux/preempt.h>
6 #include <linux/smp.h>
7 #include <linux/cpumask.h>
8 #include <linux/pfn.h>
9 #include <linux/init.h>
10 
11 #include <asm/percpu.h>
12 
13 /* enough to cover all DEFINE_PER_CPUs in modules */
14 #ifdef CONFIG_MODULES
15 #define PERCPU_MODULE_RESERVE		(8 << 10)
16 #else
17 #define PERCPU_MODULE_RESERVE		0
18 #endif
19 
20 #ifndef PERCPU_ENOUGH_ROOM
21 #define PERCPU_ENOUGH_ROOM						\
22 	(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +	\
23 	 PERCPU_MODULE_RESERVE)
24 #endif
25 
26 /*
27  * Must be an lvalue. Since @var must be a simple identifier,
28  * we force a syntax error here if it isn't.
29  */
30 #define get_cpu_var(var) (*({				\
31 	preempt_disable();				\
32 	&__get_cpu_var(var); }))
33 
34 /*
35  * The weird & is necessary because sparse considers (void)(var) to be
36  * a direct dereference of percpu variable (var).
37  */
38 #define put_cpu_var(var) do {				\
39 	(void)&(var);					\
40 	preempt_enable();				\
41 } while (0)
42 
43 #define get_cpu_ptr(var) ({				\
44 	preempt_disable();				\
45 	this_cpu_ptr(var); })
46 
47 #define put_cpu_ptr(var) do {				\
48 	(void)(var);					\
49 	preempt_enable();				\
50 } while (0)
51 
52 /* minimum unit size, also is the maximum supported allocation size */
53 #define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
54 
55 /*
56  * Percpu allocator can serve percpu allocations before slab is
57  * initialized which allows slab to depend on the percpu allocator.
58  * The following two parameters decide how much resource to
59  * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or
60  * larger than PERCPU_DYNAMIC_EARLY_SIZE.
61  */
62 #define PERCPU_DYNAMIC_EARLY_SLOTS	128
63 #define PERCPU_DYNAMIC_EARLY_SIZE	(12 << 10)
64 
65 /*
66  * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
67  * back on the first chunk for dynamic percpu allocation if arch is
68  * manually allocating and mapping it for faster access (as a part of
69  * large page mapping for example).
70  *
71  * The following values give between one and two pages of free space
72  * after typical minimal boot (2-way SMP, single disk and NIC) with
73  * both defconfig and a distro config on x86_64 and 32.  More
74  * intelligent way to determine this would be nice.
75  */
76 #if BITS_PER_LONG > 32
77 #define PERCPU_DYNAMIC_RESERVE		(20 << 10)
78 #else
79 #define PERCPU_DYNAMIC_RESERVE		(12 << 10)
80 #endif
81 
82 extern void *pcpu_base_addr;
83 extern const unsigned long *pcpu_unit_offsets;
84 
85 struct pcpu_group_info {
86 	int			nr_units;	/* aligned # of units */
87 	unsigned long		base_offset;	/* base address offset */
88 	unsigned int		*cpu_map;	/* unit->cpu map, empty
89 						 * entries contain NR_CPUS */
90 };
91 
92 struct pcpu_alloc_info {
93 	size_t			static_size;
94 	size_t			reserved_size;
95 	size_t			dyn_size;
96 	size_t			unit_size;
97 	size_t			atom_size;
98 	size_t			alloc_size;
99 	size_t			__ai_size;	/* internal, don't use */
100 	int			nr_groups;	/* 0 if grouping unnecessary */
101 	struct pcpu_group_info	groups[];
102 };
103 
104 enum pcpu_fc {
105 	PCPU_FC_AUTO,
106 	PCPU_FC_EMBED,
107 	PCPU_FC_PAGE,
108 
109 	PCPU_FC_NR,
110 };
111 extern const char * const pcpu_fc_names[PCPU_FC_NR];
112 
113 extern enum pcpu_fc pcpu_chosen_fc;
114 
115 typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
116 				     size_t align);
117 typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
118 typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
119 typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
120 
121 extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
122 							     int nr_units);
123 extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
124 
125 extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
126 					 void *base_addr);
127 
128 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
129 extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
130 				size_t atom_size,
131 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
132 				pcpu_fc_alloc_fn_t alloc_fn,
133 				pcpu_fc_free_fn_t free_fn);
134 #endif
135 
136 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
137 extern int __init pcpu_page_first_chunk(size_t reserved_size,
138 				pcpu_fc_alloc_fn_t alloc_fn,
139 				pcpu_fc_free_fn_t free_fn,
140 				pcpu_fc_populate_pte_fn_t populate_pte_fn);
141 #endif
142 
143 /*
144  * Use this to get to a cpu's version of the per-cpu object
145  * dynamically allocated. Non-atomic access to the current CPU's
146  * version should probably be combined with get_cpu()/put_cpu().
147  */
148 #ifdef CONFIG_SMP
149 #define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
150 #else
151 #define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
152 #endif
153 
154 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
155 extern bool is_kernel_percpu_address(unsigned long addr);
156 
157 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
158 extern void __init setup_per_cpu_areas(void);
159 #endif
160 extern void __init percpu_init_late(void);
161 
162 extern void __percpu *__alloc_percpu(size_t size, size_t align);
163 extern void free_percpu(void __percpu *__pdata);
164 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
165 
166 #define alloc_percpu(type)	\
167 	(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
168 
169 /*
170  * Branching function to split up a function into a set of functions that
171  * are called for different scalar sizes of the objects handled.
172  */
173 
174 extern void __bad_size_call_parameter(void);
175 
176 #ifdef CONFIG_DEBUG_PREEMPT
177 extern void __this_cpu_preempt_check(const char *op);
178 #else
179 static inline void __this_cpu_preempt_check(const char *op) { }
180 #endif
181 
182 #define __pcpu_size_call_return(stem, variable)				\
183 ({	typeof(variable) pscr_ret__;					\
184 	__verify_pcpu_ptr(&(variable));					\
185 	switch(sizeof(variable)) {					\
186 	case 1: pscr_ret__ = stem##1(variable);break;			\
187 	case 2: pscr_ret__ = stem##2(variable);break;			\
188 	case 4: pscr_ret__ = stem##4(variable);break;			\
189 	case 8: pscr_ret__ = stem##8(variable);break;			\
190 	default:							\
191 		__bad_size_call_parameter();break;			\
192 	}								\
193 	pscr_ret__;							\
194 })
195 
196 #define __pcpu_size_call_return2(stem, variable, ...)			\
197 ({									\
198 	typeof(variable) pscr2_ret__;					\
199 	__verify_pcpu_ptr(&(variable));					\
200 	switch(sizeof(variable)) {					\
201 	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
202 	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
203 	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
204 	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
205 	default:							\
206 		__bad_size_call_parameter(); break;			\
207 	}								\
208 	pscr2_ret__;							\
209 })
210 
211 /*
212  * Special handling for cmpxchg_double.  cmpxchg_double is passed two
213  * percpu variables.  The first has to be aligned to a double word
214  * boundary and the second has to follow directly thereafter.
215  * We enforce this on all architectures even if they don't support
216  * a double cmpxchg instruction, since it's a cheap requirement, and it
217  * avoids breaking the requirement for architectures with the instruction.
218  */
219 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)		\
220 ({									\
221 	bool pdcrb_ret__;						\
222 	__verify_pcpu_ptr(&pcp1);					\
223 	BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));			\
224 	VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));		\
225 	VM_BUG_ON((unsigned long)(&pcp2) !=				\
226 		  (unsigned long)(&pcp1) + sizeof(pcp1));		\
227 	switch(sizeof(pcp1)) {						\
228 	case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;	\
229 	case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;	\
230 	case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;	\
231 	case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;	\
232 	default:							\
233 		__bad_size_call_parameter(); break;			\
234 	}								\
235 	pdcrb_ret__;							\
236 })
237 
238 #define __pcpu_size_call(stem, variable, ...)				\
239 do {									\
240 	__verify_pcpu_ptr(&(variable));					\
241 	switch(sizeof(variable)) {					\
242 		case 1: stem##1(variable, __VA_ARGS__);break;		\
243 		case 2: stem##2(variable, __VA_ARGS__);break;		\
244 		case 4: stem##4(variable, __VA_ARGS__);break;		\
245 		case 8: stem##8(variable, __VA_ARGS__);break;		\
246 		default: 						\
247 			__bad_size_call_parameter();break;		\
248 	}								\
249 } while (0)
250 
251 /*
252  * this_cpu operations (C) 2008-2013 Christoph Lameter <[email protected]>
253  *
254  * Optimized manipulation for memory allocated through the per cpu
255  * allocator or for addresses of per cpu variables.
256  *
257  * These operation guarantee exclusivity of access for other operations
258  * on the *same* processor. The assumption is that per cpu data is only
259  * accessed by a single processor instance (the current one).
260  *
261  * The first group is used for accesses that must be done in a
262  * preemption safe way since we know that the context is not preempt
263  * safe. Interrupts may occur. If the interrupt modifies the variable
264  * too then RMW actions will not be reliable.
265  *
266  * The arch code can provide optimized functions in two ways:
267  *
268  * 1. Override the function completely. F.e. define this_cpu_add().
269  *    The arch must then ensure that the various scalar format passed
270  *    are handled correctly.
271  *
272  * 2. Provide functions for certain scalar sizes. F.e. provide
273  *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
274  *    sized RMW actions. If arch code does not provide operations for
275  *    a scalar size then the fallback in the generic code will be
276  *    used.
277  */
278 
279 #define _this_cpu_generic_read(pcp)					\
280 ({	typeof(pcp) ret__;						\
281 	preempt_disable();						\
282 	ret__ = *this_cpu_ptr(&(pcp));					\
283 	preempt_enable();						\
284 	ret__;								\
285 })
286 
287 #ifndef this_cpu_read
288 # ifndef this_cpu_read_1
289 #  define this_cpu_read_1(pcp)	_this_cpu_generic_read(pcp)
290 # endif
291 # ifndef this_cpu_read_2
292 #  define this_cpu_read_2(pcp)	_this_cpu_generic_read(pcp)
293 # endif
294 # ifndef this_cpu_read_4
295 #  define this_cpu_read_4(pcp)	_this_cpu_generic_read(pcp)
296 # endif
297 # ifndef this_cpu_read_8
298 #  define this_cpu_read_8(pcp)	_this_cpu_generic_read(pcp)
299 # endif
300 # define this_cpu_read(pcp)	__pcpu_size_call_return(this_cpu_read_, (pcp))
301 #endif
302 
303 #define _this_cpu_generic_to_op(pcp, val, op)				\
304 do {									\
305 	unsigned long flags;						\
306 	raw_local_irq_save(flags);					\
307 	*raw_cpu_ptr(&(pcp)) op val;					\
308 	raw_local_irq_restore(flags);					\
309 } while (0)
310 
311 #ifndef this_cpu_write
312 # ifndef this_cpu_write_1
313 #  define this_cpu_write_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
314 # endif
315 # ifndef this_cpu_write_2
316 #  define this_cpu_write_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
317 # endif
318 # ifndef this_cpu_write_4
319 #  define this_cpu_write_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
320 # endif
321 # ifndef this_cpu_write_8
322 #  define this_cpu_write_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
323 # endif
324 # define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, (pcp), (val))
325 #endif
326 
327 #ifndef this_cpu_add
328 # ifndef this_cpu_add_1
329 #  define this_cpu_add_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
330 # endif
331 # ifndef this_cpu_add_2
332 #  define this_cpu_add_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
333 # endif
334 # ifndef this_cpu_add_4
335 #  define this_cpu_add_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
336 # endif
337 # ifndef this_cpu_add_8
338 #  define this_cpu_add_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
339 # endif
340 # define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, (pcp), (val))
341 #endif
342 
343 #ifndef this_cpu_sub
344 # define this_cpu_sub(pcp, val)		this_cpu_add((pcp), -(typeof(pcp))(val))
345 #endif
346 
347 #ifndef this_cpu_inc
348 # define this_cpu_inc(pcp)		this_cpu_add((pcp), 1)
349 #endif
350 
351 #ifndef this_cpu_dec
352 # define this_cpu_dec(pcp)		this_cpu_sub((pcp), 1)
353 #endif
354 
355 #ifndef this_cpu_and
356 # ifndef this_cpu_and_1
357 #  define this_cpu_and_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
358 # endif
359 # ifndef this_cpu_and_2
360 #  define this_cpu_and_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
361 # endif
362 # ifndef this_cpu_and_4
363 #  define this_cpu_and_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
364 # endif
365 # ifndef this_cpu_and_8
366 #  define this_cpu_and_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
367 # endif
368 # define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, (pcp), (val))
369 #endif
370 
371 #ifndef this_cpu_or
372 # ifndef this_cpu_or_1
373 #  define this_cpu_or_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
374 # endif
375 # ifndef this_cpu_or_2
376 #  define this_cpu_or_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
377 # endif
378 # ifndef this_cpu_or_4
379 #  define this_cpu_or_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
380 # endif
381 # ifndef this_cpu_or_8
382 #  define this_cpu_or_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
383 # endif
384 # define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
385 #endif
386 
387 #define _this_cpu_generic_add_return(pcp, val)				\
388 ({									\
389 	typeof(pcp) ret__;						\
390 	unsigned long flags;						\
391 	raw_local_irq_save(flags);					\
392 	raw_cpu_add(pcp, val);					\
393 	ret__ = raw_cpu_read(pcp);					\
394 	raw_local_irq_restore(flags);					\
395 	ret__;								\
396 })
397 
398 #ifndef this_cpu_add_return
399 # ifndef this_cpu_add_return_1
400 #  define this_cpu_add_return_1(pcp, val)	_this_cpu_generic_add_return(pcp, val)
401 # endif
402 # ifndef this_cpu_add_return_2
403 #  define this_cpu_add_return_2(pcp, val)	_this_cpu_generic_add_return(pcp, val)
404 # endif
405 # ifndef this_cpu_add_return_4
406 #  define this_cpu_add_return_4(pcp, val)	_this_cpu_generic_add_return(pcp, val)
407 # endif
408 # ifndef this_cpu_add_return_8
409 #  define this_cpu_add_return_8(pcp, val)	_this_cpu_generic_add_return(pcp, val)
410 # endif
411 # define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
412 #endif
413 
414 #define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(typeof(pcp))(val))
415 #define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
416 #define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
417 
418 #define _this_cpu_generic_xchg(pcp, nval)				\
419 ({	typeof(pcp) ret__;						\
420 	unsigned long flags;						\
421 	raw_local_irq_save(flags);					\
422 	ret__ = raw_cpu_read(pcp);					\
423 	raw_cpu_write(pcp, nval);					\
424 	raw_local_irq_restore(flags);					\
425 	ret__;								\
426 })
427 
428 #ifndef this_cpu_xchg
429 # ifndef this_cpu_xchg_1
430 #  define this_cpu_xchg_1(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
431 # endif
432 # ifndef this_cpu_xchg_2
433 #  define this_cpu_xchg_2(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
434 # endif
435 # ifndef this_cpu_xchg_4
436 #  define this_cpu_xchg_4(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
437 # endif
438 # ifndef this_cpu_xchg_8
439 #  define this_cpu_xchg_8(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
440 # endif
441 # define this_cpu_xchg(pcp, nval)	\
442 	__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
443 #endif
444 
445 #define _this_cpu_generic_cmpxchg(pcp, oval, nval)			\
446 ({									\
447 	typeof(pcp) ret__;						\
448 	unsigned long flags;						\
449 	raw_local_irq_save(flags);					\
450 	ret__ = raw_cpu_read(pcp);					\
451 	if (ret__ == (oval))						\
452 		raw_cpu_write(pcp, nval);				\
453 	raw_local_irq_restore(flags);					\
454 	ret__;								\
455 })
456 
457 #ifndef this_cpu_cmpxchg
458 # ifndef this_cpu_cmpxchg_1
459 #  define this_cpu_cmpxchg_1(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
460 # endif
461 # ifndef this_cpu_cmpxchg_2
462 #  define this_cpu_cmpxchg_2(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
463 # endif
464 # ifndef this_cpu_cmpxchg_4
465 #  define this_cpu_cmpxchg_4(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
466 # endif
467 # ifndef this_cpu_cmpxchg_8
468 #  define this_cpu_cmpxchg_8(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
469 # endif
470 # define this_cpu_cmpxchg(pcp, oval, nval)	\
471 	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
472 #endif
473 
474 /*
475  * cmpxchg_double replaces two adjacent scalars at once.  The first
476  * two parameters are per cpu variables which have to be of the same
477  * size.  A truth value is returned to indicate success or failure
478  * (since a double register result is difficult to handle).  There is
479  * very limited hardware support for these operations, so only certain
480  * sizes may work.
481  */
482 #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
483 ({									\
484 	int ret__;							\
485 	unsigned long flags;						\
486 	raw_local_irq_save(flags);					\
487 	ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2,		\
488 			oval1, oval2, nval1, nval2);			\
489 	raw_local_irq_restore(flags);					\
490 	ret__;								\
491 })
492 
493 #ifndef this_cpu_cmpxchg_double
494 # ifndef this_cpu_cmpxchg_double_1
495 #  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
496 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
497 # endif
498 # ifndef this_cpu_cmpxchg_double_2
499 #  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
500 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
501 # endif
502 # ifndef this_cpu_cmpxchg_double_4
503 #  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
504 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
505 # endif
506 # ifndef this_cpu_cmpxchg_double_8
507 #  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
508 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
509 # endif
510 # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
511 	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
512 #endif
513 
514 /*
515  * Generic percpu operations for contexts where we do not want to do
516  * any checks for preemptiosn.
517  *
518  * If there is no other protection through preempt disable and/or
519  * disabling interupts then one of these RMW operations can show unexpected
520  * behavior because the execution thread was rescheduled on another processor
521  * or an interrupt occurred and the same percpu variable was modified from
522  * the interrupt context.
523  */
524 #ifndef raw_cpu_read
525 # ifndef raw_cpu_read_1
526 #  define raw_cpu_read_1(pcp)	(*raw_cpu_ptr(&(pcp)))
527 # endif
528 # ifndef raw_cpu_read_2
529 #  define raw_cpu_read_2(pcp)	(*raw_cpu_ptr(&(pcp)))
530 # endif
531 # ifndef raw_cpu_read_4
532 #  define raw_cpu_read_4(pcp)	(*raw_cpu_ptr(&(pcp)))
533 # endif
534 # ifndef raw_cpu_read_8
535 #  define raw_cpu_read_8(pcp)	(*raw_cpu_ptr(&(pcp)))
536 # endif
537 # define raw_cpu_read(pcp)	__pcpu_size_call_return(raw_cpu_read_, (pcp))
538 #endif
539 
540 #define raw_cpu_generic_to_op(pcp, val, op)				\
541 do {									\
542 	*raw_cpu_ptr(&(pcp)) op val;					\
543 } while (0)
544 
545 
546 #ifndef raw_cpu_write
547 # ifndef raw_cpu_write_1
548 #  define raw_cpu_write_1(pcp, val)	raw_cpu_generic_to_op((pcp), (val), =)
549 # endif
550 # ifndef raw_cpu_write_2
551 #  define raw_cpu_write_2(pcp, val)	raw_cpu_generic_to_op((pcp), (val), =)
552 # endif
553 # ifndef raw_cpu_write_4
554 #  define raw_cpu_write_4(pcp, val)	raw_cpu_generic_to_op((pcp), (val), =)
555 # endif
556 # ifndef raw_cpu_write_8
557 #  define raw_cpu_write_8(pcp, val)	raw_cpu_generic_to_op((pcp), (val), =)
558 # endif
559 # define raw_cpu_write(pcp, val)	__pcpu_size_call(raw_cpu_write_, (pcp), (val))
560 #endif
561 
562 #ifndef raw_cpu_add
563 # ifndef raw_cpu_add_1
564 #  define raw_cpu_add_1(pcp, val)	raw_cpu_generic_to_op((pcp), (val), +=)
565 # endif
566 # ifndef raw_cpu_add_2
567 #  define raw_cpu_add_2(pcp, val)	raw_cpu_generic_to_op((pcp), (val), +=)
568 # endif
569 # ifndef raw_cpu_add_4
570 #  define raw_cpu_add_4(pcp, val)	raw_cpu_generic_to_op((pcp), (val), +=)
571 # endif
572 # ifndef raw_cpu_add_8
573 #  define raw_cpu_add_8(pcp, val)	raw_cpu_generic_to_op((pcp), (val), +=)
574 # endif
575 # define raw_cpu_add(pcp, val)	__pcpu_size_call(raw_cpu_add_, (pcp), (val))
576 #endif
577 
578 #ifndef raw_cpu_sub
579 # define raw_cpu_sub(pcp, val)	raw_cpu_add((pcp), -(val))
580 #endif
581 
582 #ifndef raw_cpu_inc
583 # define raw_cpu_inc(pcp)		raw_cpu_add((pcp), 1)
584 #endif
585 
586 #ifndef raw_cpu_dec
587 # define raw_cpu_dec(pcp)		raw_cpu_sub((pcp), 1)
588 #endif
589 
590 #ifndef raw_cpu_and
591 # ifndef raw_cpu_and_1
592 #  define raw_cpu_and_1(pcp, val)	raw_cpu_generic_to_op((pcp), (val), &=)
593 # endif
594 # ifndef raw_cpu_and_2
595 #  define raw_cpu_and_2(pcp, val)	raw_cpu_generic_to_op((pcp), (val), &=)
596 # endif
597 # ifndef raw_cpu_and_4
598 #  define raw_cpu_and_4(pcp, val)	raw_cpu_generic_to_op((pcp), (val), &=)
599 # endif
600 # ifndef raw_cpu_and_8
601 #  define raw_cpu_and_8(pcp, val)	raw_cpu_generic_to_op((pcp), (val), &=)
602 # endif
603 # define raw_cpu_and(pcp, val)	__pcpu_size_call(raw_cpu_and_, (pcp), (val))
604 #endif
605 
606 #ifndef raw_cpu_or
607 # ifndef raw_cpu_or_1
608 #  define raw_cpu_or_1(pcp, val)	raw_cpu_generic_to_op((pcp), (val), |=)
609 # endif
610 # ifndef raw_cpu_or_2
611 #  define raw_cpu_or_2(pcp, val)	raw_cpu_generic_to_op((pcp), (val), |=)
612 # endif
613 # ifndef raw_cpu_or_4
614 #  define raw_cpu_or_4(pcp, val)	raw_cpu_generic_to_op((pcp), (val), |=)
615 # endif
616 # ifndef raw_cpu_or_8
617 #  define raw_cpu_or_8(pcp, val)	raw_cpu_generic_to_op((pcp), (val), |=)
618 # endif
619 # define raw_cpu_or(pcp, val)	__pcpu_size_call(raw_cpu_or_, (pcp), (val))
620 #endif
621 
622 #define raw_cpu_generic_add_return(pcp, val)				\
623 ({									\
624 	raw_cpu_add(pcp, val);						\
625 	raw_cpu_read(pcp);						\
626 })
627 
628 #ifndef raw_cpu_add_return
629 # ifndef raw_cpu_add_return_1
630 #  define raw_cpu_add_return_1(pcp, val)	raw_cpu_generic_add_return(pcp, val)
631 # endif
632 # ifndef raw_cpu_add_return_2
633 #  define raw_cpu_add_return_2(pcp, val)	raw_cpu_generic_add_return(pcp, val)
634 # endif
635 # ifndef raw_cpu_add_return_4
636 #  define raw_cpu_add_return_4(pcp, val)	raw_cpu_generic_add_return(pcp, val)
637 # endif
638 # ifndef raw_cpu_add_return_8
639 #  define raw_cpu_add_return_8(pcp, val)	raw_cpu_generic_add_return(pcp, val)
640 # endif
641 # define raw_cpu_add_return(pcp, val)	\
642 	__pcpu_size_call_return2(raw_add_return_, pcp, val)
643 #endif
644 
645 #define raw_cpu_sub_return(pcp, val)	raw_cpu_add_return(pcp, -(typeof(pcp))(val))
646 #define raw_cpu_inc_return(pcp)	raw_cpu_add_return(pcp, 1)
647 #define raw_cpu_dec_return(pcp)	raw_cpu_add_return(pcp, -1)
648 
649 #define raw_cpu_generic_xchg(pcp, nval)					\
650 ({	typeof(pcp) ret__;						\
651 	ret__ = raw_cpu_read(pcp);					\
652 	raw_cpu_write(pcp, nval);					\
653 	ret__;								\
654 })
655 
656 #ifndef raw_cpu_xchg
657 # ifndef raw_cpu_xchg_1
658 #  define raw_cpu_xchg_1(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
659 # endif
660 # ifndef raw_cpu_xchg_2
661 #  define raw_cpu_xchg_2(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
662 # endif
663 # ifndef raw_cpu_xchg_4
664 #  define raw_cpu_xchg_4(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
665 # endif
666 # ifndef raw_cpu_xchg_8
667 #  define raw_cpu_xchg_8(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
668 # endif
669 # define raw_cpu_xchg(pcp, nval)	\
670 	__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
671 #endif
672 
673 #define raw_cpu_generic_cmpxchg(pcp, oval, nval)			\
674 ({									\
675 	typeof(pcp) ret__;						\
676 	ret__ = raw_cpu_read(pcp);					\
677 	if (ret__ == (oval))						\
678 		raw_cpu_write(pcp, nval);				\
679 	ret__;								\
680 })
681 
682 #ifndef raw_cpu_cmpxchg
683 # ifndef raw_cpu_cmpxchg_1
684 #  define raw_cpu_cmpxchg_1(pcp, oval, nval)	raw_cpu_generic_cmpxchg(pcp, oval, nval)
685 # endif
686 # ifndef raw_cpu_cmpxchg_2
687 #  define raw_cpu_cmpxchg_2(pcp, oval, nval)	raw_cpu_generic_cmpxchg(pcp, oval, nval)
688 # endif
689 # ifndef raw_cpu_cmpxchg_4
690 #  define raw_cpu_cmpxchg_4(pcp, oval, nval)	raw_cpu_generic_cmpxchg(pcp, oval, nval)
691 # endif
692 # ifndef raw_cpu_cmpxchg_8
693 #  define raw_cpu_cmpxchg_8(pcp, oval, nval)	raw_cpu_generic_cmpxchg(pcp, oval, nval)
694 # endif
695 # define raw_cpu_cmpxchg(pcp, oval, nval)	\
696 	__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
697 #endif
698 
699 #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
700 ({									\
701 	int __ret = 0;							\
702 	if (raw_cpu_read(pcp1) == (oval1) &&				\
703 			 raw_cpu_read(pcp2)  == (oval2)) {		\
704 		raw_cpu_write(pcp1, (nval1));				\
705 		raw_cpu_write(pcp2, (nval2));				\
706 		__ret = 1;						\
707 	}								\
708 	(__ret);							\
709 })
710 
711 #ifndef raw_cpu_cmpxchg_double
712 # ifndef raw_cpu_cmpxchg_double_1
713 #  define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
714 	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
715 # endif
716 # ifndef raw_cpu_cmpxchg_double_2
717 #  define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
718 	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
719 # endif
720 # ifndef raw_cpu_cmpxchg_double_4
721 #  define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
722 	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
723 # endif
724 # ifndef raw_cpu_cmpxchg_double_8
725 #  define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
726 	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
727 # endif
728 # define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
729 	__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
730 #endif
731 
732 /*
733  * Generic percpu operations for context that are safe from preemption/interrupts.
734  */
735 #ifndef __this_cpu_read
736 # define __this_cpu_read(pcp) \
737 	(__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
738 #endif
739 
740 #ifndef __this_cpu_write
741 # define __this_cpu_write(pcp, val)					\
742 do { __this_cpu_preempt_check("write");					\
743      __pcpu_size_call(raw_cpu_write_, (pcp), (val));			\
744 } while (0)
745 #endif
746 
747 #ifndef __this_cpu_add
748 # define __this_cpu_add(pcp, val)					 \
749 do { __this_cpu_preempt_check("add");					\
750 	__pcpu_size_call(raw_cpu_add_, (pcp), (val));			\
751 } while (0)
752 #endif
753 
754 #ifndef __this_cpu_sub
755 # define __this_cpu_sub(pcp, val)	__this_cpu_add((pcp), -(typeof(pcp))(val))
756 #endif
757 
758 #ifndef __this_cpu_inc
759 # define __this_cpu_inc(pcp)		__this_cpu_add((pcp), 1)
760 #endif
761 
762 #ifndef __this_cpu_dec
763 # define __this_cpu_dec(pcp)		__this_cpu_sub((pcp), 1)
764 #endif
765 
766 #ifndef __this_cpu_and
767 # define __this_cpu_and(pcp, val)					\
768 do { __this_cpu_preempt_check("and");					\
769 	__pcpu_size_call(raw_cpu_and_, (pcp), (val));			\
770 } while (0)
771 
772 #endif
773 
774 #ifndef __this_cpu_or
775 # define __this_cpu_or(pcp, val)					\
776 do { __this_cpu_preempt_check("or");					\
777 	__pcpu_size_call(raw_cpu_or_, (pcp), (val));			\
778 } while (0)
779 #endif
780 
781 #ifndef __this_cpu_add_return
782 # define __this_cpu_add_return(pcp, val)	\
783 	(__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
784 #endif
785 
786 #define __this_cpu_sub_return(pcp, val)	__this_cpu_add_return(pcp, -(typeof(pcp))(val))
787 #define __this_cpu_inc_return(pcp)	__this_cpu_add_return(pcp, 1)
788 #define __this_cpu_dec_return(pcp)	__this_cpu_add_return(pcp, -1)
789 
790 #ifndef __this_cpu_xchg
791 # define __this_cpu_xchg(pcp, nval)	\
792 	(__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
793 #endif
794 
795 #ifndef __this_cpu_cmpxchg
796 # define __this_cpu_cmpxchg(pcp, oval, nval)	\
797 	(__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
798 #endif
799 
800 #ifndef __this_cpu_cmpxchg_double
801 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
802 	(__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
803 #endif
804 
805 #endif /* __LINUX_PERCPU_H */
806