xref: /linux-6.15/include/linux/cache.h (revision 0704bf43)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef __LINUX_CACHE_H
31da177e4SLinus Torvalds #define __LINUX_CACHE_H
41da177e4SLinus Torvalds 
5c28aa1f0SJoe Perches #include <uapi/linux/kernel.h>
6*0704bf43SThomas Weißschuh #include <vdso/cache.h>
71da177e4SLinus Torvalds #include <asm/cache.h>
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds #ifndef L1_CACHE_ALIGN
10c28aa1f0SJoe Perches #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
111da177e4SLinus Torvalds #endif
121da177e4SLinus Torvalds 
132cb13decSAlexander Lobakin /**
142cb13decSAlexander Lobakin  * SMP_CACHE_ALIGN - align a value to the L2 cacheline size
152cb13decSAlexander Lobakin  * @x: value to align
162cb13decSAlexander Lobakin  *
172cb13decSAlexander Lobakin  * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes,
182cb13decSAlexander Lobakin  * this needs to be accounted.
192cb13decSAlexander Lobakin  *
202cb13decSAlexander Lobakin  * Return: aligned value.
212cb13decSAlexander Lobakin  */
222cb13decSAlexander Lobakin #ifndef SMP_CACHE_ALIGN
232cb13decSAlexander Lobakin #define SMP_CACHE_ALIGN(x)	ALIGN(x, SMP_CACHE_BYTES)
242cb13decSAlexander Lobakin #endif
252cb13decSAlexander Lobakin 
262cb13decSAlexander Lobakin /*
272cb13decSAlexander Lobakin  * ``__aligned_largest`` aligns a field to the value most optimal for the
282cb13decSAlexander Lobakin  * target architecture to perform memory operations. Get the actual value
292cb13decSAlexander Lobakin  * to be able to use it anywhere else.
302cb13decSAlexander Lobakin  */
312cb13decSAlexander Lobakin #ifndef __LARGEST_ALIGN
322cb13decSAlexander Lobakin #define __LARGEST_ALIGN		sizeof(struct { long x; } __aligned_largest)
332cb13decSAlexander Lobakin #endif
342cb13decSAlexander Lobakin 
352cb13decSAlexander Lobakin #ifndef LARGEST_ALIGN
362cb13decSAlexander Lobakin #define LARGEST_ALIGN(x)	ALIGN(x, __LARGEST_ALIGN)
372cb13decSAlexander Lobakin #endif
382cb13decSAlexander Lobakin 
39c74ba8b3SKees Cook /*
40c74ba8b3SKees Cook  * __read_mostly is used to keep rarely changing variables out of frequently
414fa72523SLuis Chamberlain  * updated cachelines. Its use should be reserved for data that is used
424fa72523SLuis Chamberlain  * frequently in hot paths. Performance traces can help decide when to use
434fa72523SLuis Chamberlain  * this. You want __read_mostly data to be tightly packed, so that in the
444fa72523SLuis Chamberlain  * best case multiple frequently read variables for a hot path will be next
454fa72523SLuis Chamberlain  * to each other in order to reduce the number of cachelines needed to
464fa72523SLuis Chamberlain  * execute a critical path. We should be mindful and selective of its use.
474fa72523SLuis Chamberlain  * ie: if you're going to use it please supply a *good* justification in your
484fa72523SLuis Chamberlain  * commit log
49c74ba8b3SKees Cook  */
50804f1594SKyle McMartin #ifndef __read_mostly
516c036527SChristoph Lameter #define __read_mostly
526c036527SChristoph Lameter #endif
536c036527SChristoph Lameter 
54c74ba8b3SKees Cook /*
55c74ba8b3SKees Cook  * __ro_after_init is used to mark things that are read-only after init (i.e.
56c74ba8b3SKees Cook  * after mark_rodata_ro() has been called). These are effectively read-only,
57c74ba8b3SKees Cook  * but may get written to during init, so can't live in .rodata (via "const").
58c74ba8b3SKees Cook  */
59c74ba8b3SKees Cook #ifndef __ro_after_init
6033def849SJoe Perches #define __ro_after_init __section(".data..ro_after_init")
61c74ba8b3SKees Cook #endif
62c74ba8b3SKees Cook 
631da177e4SLinus Torvalds #ifndef ____cacheline_aligned_in_smp
641da177e4SLinus Torvalds #ifdef CONFIG_SMP
651da177e4SLinus Torvalds #define ____cacheline_aligned_in_smp ____cacheline_aligned
661da177e4SLinus Torvalds #else
671da177e4SLinus Torvalds #define ____cacheline_aligned_in_smp
681da177e4SLinus Torvalds #endif /* CONFIG_SMP */
691da177e4SLinus Torvalds #endif
701da177e4SLinus Torvalds 
711da177e4SLinus Torvalds #ifndef __cacheline_aligned
721da177e4SLinus Torvalds #define __cacheline_aligned					\
731da177e4SLinus Torvalds   __attribute__((__aligned__(SMP_CACHE_BYTES),			\
744af57b78STim Abbott 		 __section__(".data..cacheline_aligned")))
751da177e4SLinus Torvalds #endif /* __cacheline_aligned */
761da177e4SLinus Torvalds 
771da177e4SLinus Torvalds #ifndef __cacheline_aligned_in_smp
781da177e4SLinus Torvalds #ifdef CONFIG_SMP
791da177e4SLinus Torvalds #define __cacheline_aligned_in_smp __cacheline_aligned
801da177e4SLinus Torvalds #else
811da177e4SLinus Torvalds #define __cacheline_aligned_in_smp
821da177e4SLinus Torvalds #endif /* CONFIG_SMP */
831da177e4SLinus Torvalds #endif
841da177e4SLinus Torvalds 
8522fc6eccSRavikiran G Thirumalai /*
8622fc6eccSRavikiran G Thirumalai  * The maximum alignment needed for some critical structures
8722fc6eccSRavikiran G Thirumalai  * These could be inter-node cacheline sizes/L3 cacheline
8822fc6eccSRavikiran G Thirumalai  * size etc.  Define this in asm/cache.h for your arch
8922fc6eccSRavikiran G Thirumalai  */
9022fc6eccSRavikiran G Thirumalai #ifndef INTERNODE_CACHE_SHIFT
9122fc6eccSRavikiran G Thirumalai #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
9222fc6eccSRavikiran G Thirumalai #endif
9322fc6eccSRavikiran G Thirumalai 
9422fc6eccSRavikiran G Thirumalai #if !defined(____cacheline_internodealigned_in_smp)
951da177e4SLinus Torvalds #if defined(CONFIG_SMP)
9622fc6eccSRavikiran G Thirumalai #define ____cacheline_internodealigned_in_smp \
9722fc6eccSRavikiran G Thirumalai 	__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
981da177e4SLinus Torvalds #else
9922fc6eccSRavikiran G Thirumalai #define ____cacheline_internodealigned_in_smp
1001da177e4SLinus Torvalds #endif
1011da177e4SLinus Torvalds #endif
1021da177e4SLinus Torvalds 
1031b27d05bSPekka Enberg #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
1041b27d05bSPekka Enberg #define cache_line_size()	L1_CACHE_BYTES
1051b27d05bSPekka Enberg #endif
1061b27d05bSPekka Enberg 
107aeb9ce05SCoco Li #ifndef __cacheline_group_begin
108aeb9ce05SCoco Li #define __cacheline_group_begin(GROUP) \
109aeb9ce05SCoco Li 	__u8 __cacheline_group_begin__##GROUP[0]
110aeb9ce05SCoco Li #endif
111aeb9ce05SCoco Li 
112aeb9ce05SCoco Li #ifndef __cacheline_group_end
113aeb9ce05SCoco Li #define __cacheline_group_end(GROUP) \
114aeb9ce05SCoco Li 	__u8 __cacheline_group_end__##GROUP[0]
115aeb9ce05SCoco Li #endif
116aeb9ce05SCoco Li 
1172cb13decSAlexander Lobakin /**
1182cb13decSAlexander Lobakin  * __cacheline_group_begin_aligned - declare an aligned group start
1192cb13decSAlexander Lobakin  * @GROUP: name of the group
1202cb13decSAlexander Lobakin  * @...: optional group alignment
1212cb13decSAlexander Lobakin  *
1222cb13decSAlexander Lobakin  * The following block inside a struct:
1232cb13decSAlexander Lobakin  *
1242cb13decSAlexander Lobakin  *	__cacheline_group_begin_aligned(grp);
1252cb13decSAlexander Lobakin  *	field a;
1262cb13decSAlexander Lobakin  *	field b;
1272cb13decSAlexander Lobakin  *	__cacheline_group_end_aligned(grp);
1282cb13decSAlexander Lobakin  *
1292cb13decSAlexander Lobakin  * will always be aligned to either the specified alignment or
1302cb13decSAlexander Lobakin  * ``SMP_CACHE_BYTES``.
1312cb13decSAlexander Lobakin  */
1322cb13decSAlexander Lobakin #define __cacheline_group_begin_aligned(GROUP, ...)		\
1332cb13decSAlexander Lobakin 	__cacheline_group_begin(GROUP)				\
1342cb13decSAlexander Lobakin 	__aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
1352cb13decSAlexander Lobakin 
1362cb13decSAlexander Lobakin /**
1372cb13decSAlexander Lobakin  * __cacheline_group_end_aligned - declare an aligned group end
1382cb13decSAlexander Lobakin  * @GROUP: name of the group
1392cb13decSAlexander Lobakin  * @...: optional alignment (same as was in __cacheline_group_begin_aligned())
1402cb13decSAlexander Lobakin  *
1412cb13decSAlexander Lobakin  * Note that the end marker is aligned to sizeof(long) to allow more precise
1422cb13decSAlexander Lobakin  * size assertion. It also declares a padding at the end to avoid next field
1432cb13decSAlexander Lobakin  * falling into this cacheline.
1442cb13decSAlexander Lobakin  */
1452cb13decSAlexander Lobakin #define __cacheline_group_end_aligned(GROUP, ...)		\
1462cb13decSAlexander Lobakin 	__cacheline_group_end(GROUP) __aligned(sizeof(long));	\
1472cb13decSAlexander Lobakin 	struct { } __cacheline_group_pad__##GROUP		\
1482cb13decSAlexander Lobakin 	__aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
1492cb13decSAlexander Lobakin 
150aeb9ce05SCoco Li #ifndef CACHELINE_ASSERT_GROUP_MEMBER
151aeb9ce05SCoco Li #define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
152aeb9ce05SCoco Li 	BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
153aeb9ce05SCoco Li 		       offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \
154aeb9ce05SCoco Li 		       offsetofend(TYPE, MEMBER) <= \
155aeb9ce05SCoco Li 		       offsetof(TYPE, __cacheline_group_end__##GROUP)))
156aeb9ce05SCoco Li #endif
157aeb9ce05SCoco Li 
158aeb9ce05SCoco Li #ifndef CACHELINE_ASSERT_GROUP_SIZE
159aeb9ce05SCoco Li #define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \
160aeb9ce05SCoco Li 	BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \
161aeb9ce05SCoco Li 		     offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \
162aeb9ce05SCoco Li 		     SIZE)
163aeb9ce05SCoco Li #endif
164aeb9ce05SCoco Li 
165e6ad640bSShakeel Butt /*
166e6ad640bSShakeel Butt  * Helper to add padding within a struct to ensure data fall into separate
167e6ad640bSShakeel Butt  * cachelines.
168e6ad640bSShakeel Butt  */
169e6ad640bSShakeel Butt #if defined(CONFIG_SMP)
170e6ad640bSShakeel Butt struct cacheline_padding {
171e6ad640bSShakeel Butt 	char x[0];
172e6ad640bSShakeel Butt } ____cacheline_internodealigned_in_smp;
173e6ad640bSShakeel Butt #define CACHELINE_PADDING(name)		struct cacheline_padding name
174e6ad640bSShakeel Butt #else
175e6ad640bSShakeel Butt #define CACHELINE_PADDING(name)
176e6ad640bSShakeel Butt #endif
177e6ad640bSShakeel Butt 
1784ab5f8ecSCatalin Marinas #ifdef ARCH_DMA_MINALIGN
1794ab5f8ecSCatalin Marinas #define ARCH_HAS_DMA_MINALIGN
1804ab5f8ecSCatalin Marinas #else
1814ab5f8ecSCatalin Marinas #define ARCH_DMA_MINALIGN __alignof__(unsigned long long)
1824ab5f8ecSCatalin Marinas #endif
1834ab5f8ecSCatalin Marinas 
1841da177e4SLinus Torvalds #endif /* __LINUX_CACHE_H */
185