xref: /linux-6.15/include/linux/percpu-defs.h (revision 6fbc07bb)
1 /*
2  * linux/percpu-defs.h - basic definitions for percpu areas
3  *
4  * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER.
5  *
6  * This file is separate from linux/percpu.h to avoid cyclic inclusion
7  * dependency from arch header files.  Only to be included from
8  * asm/percpu.h.
9  *
10  * This file includes macros necessary to declare percpu sections and
11  * variables, and definitions of percpu accessors and operations.  It
12  * should provide enough percpu features to arch header files even when
13  * they can only include asm/percpu.h to avoid cyclic inclusion dependency.
14  */
15 
16 #ifndef _LINUX_PERCPU_DEFS_H
17 #define _LINUX_PERCPU_DEFS_H
18 
19 #ifdef CONFIG_SMP
20 
21 #ifdef MODULE
22 #define PER_CPU_SHARED_ALIGNED_SECTION ""
23 #define PER_CPU_ALIGNED_SECTION ""
24 #else
25 #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
26 #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
27 #endif
28 #define PER_CPU_FIRST_SECTION "..first"
29 
30 #else
31 
32 #define PER_CPU_SHARED_ALIGNED_SECTION ""
33 #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
34 #define PER_CPU_FIRST_SECTION ""
35 
36 #endif
37 
38 /*
39  * Base implementations of per-CPU variable declarations and definitions, where
40  * the section in which the variable is to be placed is provided by the
41  * 'sec' argument.  This may be used to affect the parameters governing the
42  * variable's storage.
43  *
44  * NOTE!  The sections for the DECLARE and for the DEFINE must match, lest
45  * linkage errors occur due the compiler generating the wrong code to access
46  * that section.
47  */
48 #define __PCPU_ATTRS(sec)						\
49 	__percpu __attribute__((section(PER_CPU_BASE_SECTION sec)))	\
50 	PER_CPU_ATTRIBUTES
51 
52 #define __PCPU_DUMMY_ATTRS						\
53 	__attribute__((section(".discard"), unused))
54 
55 /*
56  * s390 and alpha modules require percpu variables to be defined as
57  * weak to force the compiler to generate GOT based external
58  * references for them.  This is necessary because percpu sections
59  * will be located outside of the usually addressable area.
60  *
61  * This definition puts the following two extra restrictions when
62  * defining percpu variables.
63  *
64  * 1. The symbol must be globally unique, even the static ones.
65  * 2. Static percpu variables cannot be defined inside a function.
66  *
67  * Archs which need weak percpu definitions should define
68  * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
69  *
70  * To ensure that the generic code observes the above two
71  * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
72  * definition is used for all cases.
73  */
74 #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
75 /*
76  * __pcpu_scope_* dummy variable is used to enforce scope.  It
77  * receives the static modifier when it's used in front of
78  * DEFINE_PER_CPU() and will trigger build failure if
79  * DECLARE_PER_CPU() is used for the same variable.
80  *
81  * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
82  * such that hidden weak symbol collision, which will cause unrelated
83  * variables to share the same address, can be detected during build.
84  */
85 #define DECLARE_PER_CPU_SECTION(type, name, sec)			\
86 	extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;		\
87 	extern __PCPU_ATTRS(sec) __typeof__(type) name
88 
89 #define DEFINE_PER_CPU_SECTION(type, name, sec)				\
90 	__PCPU_DUMMY_ATTRS char __pcpu_scope_##name;			\
91 	extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;		\
92 	__PCPU_DUMMY_ATTRS char __pcpu_unique_##name;			\
93 	extern __PCPU_ATTRS(sec) __typeof__(type) name;			\
94 	__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak			\
95 	__typeof__(type) name
96 #else
97 /*
98  * Normal declaration and definition macros.
99  */
100 #define DECLARE_PER_CPU_SECTION(type, name, sec)			\
101 	extern __PCPU_ATTRS(sec) __typeof__(type) name
102 
103 #define DEFINE_PER_CPU_SECTION(type, name, sec)				\
104 	__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES			\
105 	__typeof__(type) name
106 #endif
107 
108 /*
109  * Variant on the per-CPU variable declaration/definition theme used for
110  * ordinary per-CPU variables.
111  */
112 #define DECLARE_PER_CPU(type, name)					\
113 	DECLARE_PER_CPU_SECTION(type, name, "")
114 
115 #define DEFINE_PER_CPU(type, name)					\
116 	DEFINE_PER_CPU_SECTION(type, name, "")
117 
118 /*
119  * Declaration/definition used for per-CPU variables that must come first in
120  * the set of variables.
121  */
122 #define DECLARE_PER_CPU_FIRST(type, name)				\
123 	DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
124 
125 #define DEFINE_PER_CPU_FIRST(type, name)				\
126 	DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
127 
128 /*
129  * Declaration/definition used for per-CPU variables that must be cacheline
130  * aligned under SMP conditions so that, whilst a particular instance of the
131  * data corresponds to a particular CPU, inefficiencies due to direct access by
132  * other CPUs are reduced by preventing the data from unnecessarily spanning
133  * cachelines.
134  *
135  * An example of this would be statistical data, where each CPU's set of data
136  * is updated by that CPU alone, but the data from across all CPUs is collated
137  * by a CPU processing a read from a proc file.
138  */
139 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name)			\
140 	DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
141 	____cacheline_aligned_in_smp
142 
143 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)			\
144 	DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
145 	____cacheline_aligned_in_smp
146 
147 #define DECLARE_PER_CPU_ALIGNED(type, name)				\
148 	DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)	\
149 	____cacheline_aligned
150 
151 #define DEFINE_PER_CPU_ALIGNED(type, name)				\
152 	DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)	\
153 	____cacheline_aligned
154 
155 /*
156  * Declaration/definition used for per-CPU variables that must be page aligned.
157  */
158 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name)			\
159 	DECLARE_PER_CPU_SECTION(type, name, "..page_aligned")		\
160 	__aligned(PAGE_SIZE)
161 
162 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)				\
163 	DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")		\
164 	__aligned(PAGE_SIZE)
165 
166 /*
167  * Declaration/definition used for per-CPU variables that must be read mostly.
168  */
169 #define DECLARE_PER_CPU_READ_MOSTLY(type, name)			\
170 	DECLARE_PER_CPU_SECTION(type, name, "..readmostly")
171 
172 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)				\
173 	DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
174 
175 /*
176  * Intermodule exports for per-CPU variables.  sparse forgets about
177  * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
178  * noop if __CHECKER__.
179  */
180 #ifndef __CHECKER__
181 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
182 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
183 #else
184 #define EXPORT_PER_CPU_SYMBOL(var)
185 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
186 #endif
187 
188 /*
189  * Accessors and operations.
190  */
191 #ifndef __ASSEMBLY__
192 
193 /*
194  * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating
195  * @ptr and is invoked once before a percpu area is accessed by all
196  * accessors and operations.  This is performed in the generic part of
197  * percpu and arch overrides don't need to worry about it; however, if an
198  * arch wants to implement an arch-specific percpu accessor or operation,
199  * it may use __verify_pcpu_ptr() to verify the parameters.
200  *
201  * + 0 is required in order to convert the pointer type from a
202  * potential array type to a pointer to a single item of the array.
203  */
204 #define __verify_pcpu_ptr(ptr)						\
205 do {									\
206 	const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;	\
207 	(void)__vpp_verify;						\
208 } while (0)
209 
210 #ifdef CONFIG_SMP
211 
212 /*
213  * Add an offset to a pointer but keep the pointer as-is.  Use RELOC_HIDE()
214  * to prevent the compiler from making incorrect assumptions about the
215  * pointer value.  The weird cast keeps both GCC and sparse happy.
216  */
217 #define SHIFT_PERCPU_PTR(__p, __offset)					\
218 	RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
219 
220 #define per_cpu_ptr(ptr, cpu)						\
221 ({									\
222 	__verify_pcpu_ptr(ptr);						\
223 	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)));			\
224 })
225 
226 #define raw_cpu_ptr(ptr)						\
227 ({									\
228 	__verify_pcpu_ptr(ptr);						\
229 	arch_raw_cpu_ptr(ptr);						\
230 })
231 
232 #ifdef CONFIG_DEBUG_PREEMPT
233 #define this_cpu_ptr(ptr)						\
234 ({									\
235 	__verify_pcpu_ptr(ptr);						\
236 	SHIFT_PERCPU_PTR(ptr, my_cpu_offset);				\
237 })
238 #else
239 #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
240 #endif
241 
242 #else	/* CONFIG_SMP */
243 
244 #define VERIFY_PERCPU_PTR(__p)						\
245 ({									\
246 	__verify_pcpu_ptr(__p);						\
247 	(typeof(*(__p)) __kernel __force *)(__p);			\
248 })
249 
250 #define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
251 #define raw_cpu_ptr(ptr)	per_cpu_ptr(ptr, 0)
252 #define this_cpu_ptr(ptr)	raw_cpu_ptr(ptr)
253 
254 #endif	/* CONFIG_SMP */
255 
256 #define per_cpu(var, cpu)	(*per_cpu_ptr(&(var), cpu))
257 #define __raw_get_cpu_var(var)	(*raw_cpu_ptr(&(var)))
258 #define __get_cpu_var(var)	(*this_cpu_ptr(&(var)))
259 
260 /* keep until we have removed all uses of __this_cpu_ptr */
261 #define __this_cpu_ptr(ptr)	raw_cpu_ptr(ptr)
262 
263 /*
264  * Must be an lvalue. Since @var must be a simple identifier,
265  * we force a syntax error here if it isn't.
266  */
267 #define get_cpu_var(var)						\
268 (*({									\
269 	preempt_disable();						\
270 	this_cpu_ptr(&var);						\
271 }))
272 
273 /*
274  * The weird & is necessary because sparse considers (void)(var) to be
275  * a direct dereference of percpu variable (var).
276  */
277 #define put_cpu_var(var)						\
278 do {									\
279 	(void)&(var);							\
280 	preempt_enable();						\
281 } while (0)
282 
283 #define get_cpu_ptr(var)						\
284 ({									\
285 	preempt_disable();						\
286 	this_cpu_ptr(var);						\
287 })
288 
289 #define put_cpu_ptr(var)						\
290 do {									\
291 	(void)(var);							\
292 	preempt_enable();						\
293 } while (0)
294 
295 /*
296  * Branching function to split up a function into a set of functions that
297  * are called for different scalar sizes of the objects handled.
298  */
299 
300 extern void __bad_size_call_parameter(void);
301 
302 #ifdef CONFIG_DEBUG_PREEMPT
303 extern void __this_cpu_preempt_check(const char *op);
304 #else
305 static inline void __this_cpu_preempt_check(const char *op) { }
306 #endif
307 
308 #define __pcpu_size_call_return(stem, variable)				\
309 ({									\
310 	typeof(variable) pscr_ret__;					\
311 	__verify_pcpu_ptr(&(variable));					\
312 	switch(sizeof(variable)) {					\
313 	case 1: pscr_ret__ = stem##1(variable); break;			\
314 	case 2: pscr_ret__ = stem##2(variable); break;			\
315 	case 4: pscr_ret__ = stem##4(variable); break;			\
316 	case 8: pscr_ret__ = stem##8(variable); break;			\
317 	default:							\
318 		__bad_size_call_parameter(); break;			\
319 	}								\
320 	pscr_ret__;							\
321 })
322 
323 #define __pcpu_size_call_return2(stem, variable, ...)			\
324 ({									\
325 	typeof(variable) pscr2_ret__;					\
326 	__verify_pcpu_ptr(&(variable));					\
327 	switch(sizeof(variable)) {					\
328 	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
329 	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
330 	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
331 	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
332 	default:							\
333 		__bad_size_call_parameter(); break;			\
334 	}								\
335 	pscr2_ret__;							\
336 })
337 
338 /*
339  * Special handling for cmpxchg_double.  cmpxchg_double is passed two
340  * percpu variables.  The first has to be aligned to a double word
341  * boundary and the second has to follow directly thereafter.
342  * We enforce this on all architectures even if they don't support
343  * a double cmpxchg instruction, since it's a cheap requirement, and it
344  * avoids breaking the requirement for architectures with the instruction.
345  */
346 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)		\
347 ({									\
348 	bool pdcrb_ret__;						\
349 	__verify_pcpu_ptr(&(pcp1));					\
350 	BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));			\
351 	VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1)));	\
352 	VM_BUG_ON((unsigned long)(&(pcp2)) !=				\
353 		  (unsigned long)(&(pcp1)) + sizeof(pcp1));		\
354 	switch(sizeof(pcp1)) {						\
355 	case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;	\
356 	case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;	\
357 	case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;	\
358 	case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;	\
359 	default:							\
360 		__bad_size_call_parameter(); break;			\
361 	}								\
362 	pdcrb_ret__;							\
363 })
364 
365 #define __pcpu_size_call(stem, variable, ...)				\
366 do {									\
367 	__verify_pcpu_ptr(&(variable));					\
368 	switch(sizeof(variable)) {					\
369 		case 1: stem##1(variable, __VA_ARGS__);break;		\
370 		case 2: stem##2(variable, __VA_ARGS__);break;		\
371 		case 4: stem##4(variable, __VA_ARGS__);break;		\
372 		case 8: stem##8(variable, __VA_ARGS__);break;		\
373 		default: 						\
374 			__bad_size_call_parameter();break;		\
375 	}								\
376 } while (0)
377 
378 /*
379  * this_cpu operations (C) 2008-2013 Christoph Lameter <[email protected]>
380  *
381  * Optimized manipulation for memory allocated through the per cpu
382  * allocator or for addresses of per cpu variables.
383  *
384  * These operation guarantee exclusivity of access for other operations
385  * on the *same* processor. The assumption is that per cpu data is only
386  * accessed by a single processor instance (the current one).
387  *
388  * The arch code can provide optimized implementation by defining macros
389  * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per
390  * cpu atomic operations for 2 byte sized RMW actions. If arch code does
391  * not provide operations for a scalar size then the fallback in the
392  * generic code will be used.
393  *
394  * cmpxchg_double replaces two adjacent scalars at once.  The first two
395  * parameters are per cpu variables which have to be of the same size.  A
396  * truth value is returned to indicate success or failure (since a double
397  * register result is difficult to handle).  There is very limited hardware
398  * support for these operations, so only certain sizes may work.
399  */
400 
401 /*
402  * Operations for contexts where we do not want to do any checks for
403  * preemptions.  Unless strictly necessary, always use [__]this_cpu_*()
404  * instead.
405  *
406  * If there is no other protection through preempt disable and/or disabling
407  * interupts then one of these RMW operations can show unexpected behavior
408  * because the execution thread was rescheduled on another processor or an
409  * interrupt occurred and the same percpu variable was modified from the
410  * interrupt context.
411  */
412 #define raw_cpu_read(pcp)		__pcpu_size_call_return(raw_cpu_read_, pcp)
413 #define raw_cpu_write(pcp, val)		__pcpu_size_call(raw_cpu_write_, pcp, val)
414 #define raw_cpu_add(pcp, val)		__pcpu_size_call(raw_cpu_add_, pcp, val)
415 #define raw_cpu_and(pcp, val)		__pcpu_size_call(raw_cpu_and_, pcp, val)
416 #define raw_cpu_or(pcp, val)		__pcpu_size_call(raw_cpu_or_, pcp, val)
417 #define raw_cpu_add_return(pcp, val)	__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
418 #define raw_cpu_xchg(pcp, nval)		__pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
419 #define raw_cpu_cmpxchg(pcp, oval, nval) \
420 	__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
421 #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
422 	__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
423 
424 #define raw_cpu_sub(pcp, val)		raw_cpu_add(pcp, -(val))
425 #define raw_cpu_inc(pcp)		raw_cpu_add(pcp, 1)
426 #define raw_cpu_dec(pcp)		raw_cpu_sub(pcp, 1)
427 #define raw_cpu_sub_return(pcp, val)	raw_cpu_add_return(pcp, -(typeof(pcp))(val))
428 #define raw_cpu_inc_return(pcp)		raw_cpu_add_return(pcp, 1)
429 #define raw_cpu_dec_return(pcp)		raw_cpu_add_return(pcp, -1)
430 
431 /*
432  * Operations for contexts that are safe from preemption/interrupts.  These
433  * operations verify that preemption is disabled.
434  */
435 #define __this_cpu_read(pcp)						\
436 ({									\
437 	__this_cpu_preempt_check("read");				\
438 	raw_cpu_read(pcp);						\
439 })
440 
441 #define __this_cpu_write(pcp, val)					\
442 ({									\
443 	__this_cpu_preempt_check("write");				\
444 	raw_cpu_write(pcp, val);					\
445 })
446 
447 #define __this_cpu_add(pcp, val)					\
448 ({									\
449 	__this_cpu_preempt_check("add");				\
450 	raw_cpu_add(pcp, val);						\
451 })
452 
453 #define __this_cpu_and(pcp, val)					\
454 ({									\
455 	__this_cpu_preempt_check("and");				\
456 	raw_cpu_and(pcp, val);						\
457 })
458 
459 #define __this_cpu_or(pcp, val)						\
460 ({									\
461 	__this_cpu_preempt_check("or");					\
462 	raw_cpu_or(pcp, val);						\
463 })
464 
465 #define __this_cpu_add_return(pcp, val)					\
466 ({									\
467 	__this_cpu_preempt_check("add_return");				\
468 	raw_cpu_add_return(pcp, val);					\
469 })
470 
471 #define __this_cpu_xchg(pcp, nval)					\
472 ({									\
473 	__this_cpu_preempt_check("xchg");				\
474 	raw_cpu_xchg(pcp, nval);					\
475 })
476 
477 #define __this_cpu_cmpxchg(pcp, oval, nval)				\
478 ({									\
479 	__this_cpu_preempt_check("cmpxchg");				\
480 	raw_cpu_cmpxchg(pcp, oval, nval);				\
481 })
482 
483 #define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
484 ({	__this_cpu_preempt_check("cmpxchg_double");			\
485 	raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2);	\
486 })
487 
488 #define __this_cpu_sub(pcp, val)	__this_cpu_add(pcp, -(typeof(pcp))(val))
489 #define __this_cpu_inc(pcp)		__this_cpu_add(pcp, 1)
490 #define __this_cpu_dec(pcp)		__this_cpu_sub(pcp, 1)
491 #define __this_cpu_sub_return(pcp, val)	__this_cpu_add_return(pcp, -(typeof(pcp))(val))
492 #define __this_cpu_inc_return(pcp)	__this_cpu_add_return(pcp, 1)
493 #define __this_cpu_dec_return(pcp)	__this_cpu_add_return(pcp, -1)
494 
495 /*
496  * Operations with implied preemption protection.  These operations can be
497  * used without worrying about preemption.  Note that interrupts may still
498  * occur while an operation is in progress and if the interrupt modifies
499  * the variable too then RMW actions may not be reliable.
500  */
501 #define this_cpu_read(pcp)		__pcpu_size_call_return(this_cpu_read_, pcp)
502 #define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, pcp, val)
503 #define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, pcp, val)
504 #define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, pcp, val)
505 #define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, pcp, val)
506 #define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
507 #define this_cpu_xchg(pcp, nval)	__pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
508 #define this_cpu_cmpxchg(pcp, oval, nval) \
509 	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
510 #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
511 	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
512 
513 #define this_cpu_sub(pcp, val)		this_cpu_add(pcp, -(typeof(pcp))(val))
514 #define this_cpu_inc(pcp)		this_cpu_add(pcp, 1)
515 #define this_cpu_dec(pcp)		this_cpu_sub(pcp, 1)
516 #define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(typeof(pcp))(val))
517 #define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
518 #define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
519 
520 #endif /* __ASSEMBLY__ */
521 #endif /* _LINUX_PERCPU_DEFS_H */
522