xref: /linux-6.15/include/linux/percpu.h (revision d34599bc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_PERCPU_H
3 #define __LINUX_PERCPU_H
4 
5 #include <linux/mmdebug.h>
6 #include <linux/preempt.h>
7 #include <linux/smp.h>
8 #include <linux/cpumask.h>
9 #include <linux/pfn.h>
10 #include <linux/init.h>
11 #include <linux/cleanup.h>
12 
13 #include <asm/percpu.h>
14 
15 /* enough to cover all DEFINE_PER_CPUs in modules */
16 #ifdef CONFIG_MODULES
17 #define PERCPU_MODULE_RESERVE		(8 << 10)
18 #else
19 #define PERCPU_MODULE_RESERVE		0
20 #endif
21 
22 /* minimum unit size, also is the maximum supported allocation size */
23 #define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
24 
25 /* minimum allocation size and shift in bytes */
26 #define PCPU_MIN_ALLOC_SHIFT		2
27 #define PCPU_MIN_ALLOC_SIZE		(1 << PCPU_MIN_ALLOC_SHIFT)
28 
29 /*
30  * The PCPU_BITMAP_BLOCK_SIZE must be the same size as PAGE_SIZE as the
31  * updating of hints is used to manage the nr_empty_pop_pages in both
32  * the chunk and globally.
33  */
34 #define PCPU_BITMAP_BLOCK_SIZE		PAGE_SIZE
35 #define PCPU_BITMAP_BLOCK_BITS		(PCPU_BITMAP_BLOCK_SIZE >>	\
36 					 PCPU_MIN_ALLOC_SHIFT)
37 
38 /*
39  * Percpu allocator can serve percpu allocations before slab is
40  * initialized which allows slab to depend on the percpu allocator.
41  * The following parameter decide how much resource to preallocate
42  * for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or larger than
43  * PERCPU_DYNAMIC_EARLY_SIZE.
44  */
45 #define PERCPU_DYNAMIC_EARLY_SIZE	(20 << 10)
46 
47 /*
48  * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
49  * back on the first chunk for dynamic percpu allocation if arch is
50  * manually allocating and mapping it for faster access (as a part of
51  * large page mapping for example).
52  *
53  * The following values give between one and two pages of free space
54  * after typical minimal boot (2-way SMP, single disk and NIC) with
55  * both defconfig and a distro config on x86_64 and 32.  More
56  * intelligent way to determine this would be nice.
57  */
58 #if BITS_PER_LONG > 32
59 #define PERCPU_DYNAMIC_RESERVE		(28 << 10)
60 #else
61 #define PERCPU_DYNAMIC_RESERVE		(20 << 10)
62 #endif
63 
64 extern void *pcpu_base_addr;
65 extern const unsigned long *pcpu_unit_offsets;
66 
67 struct pcpu_group_info {
68 	int			nr_units;	/* aligned # of units */
69 	unsigned long		base_offset;	/* base address offset */
70 	unsigned int		*cpu_map;	/* unit->cpu map, empty
71 						 * entries contain NR_CPUS */
72 };
73 
74 struct pcpu_alloc_info {
75 	size_t			static_size;
76 	size_t			reserved_size;
77 	size_t			dyn_size;
78 	size_t			unit_size;
79 	size_t			atom_size;
80 	size_t			alloc_size;
81 	size_t			__ai_size;	/* internal, don't use */
82 	int			nr_groups;	/* 0 if grouping unnecessary */
83 	struct pcpu_group_info	groups[];
84 };
85 
86 enum pcpu_fc {
87 	PCPU_FC_AUTO,
88 	PCPU_FC_EMBED,
89 	PCPU_FC_PAGE,
90 
91 	PCPU_FC_NR,
92 };
93 extern const char * const pcpu_fc_names[PCPU_FC_NR];
94 
95 extern enum pcpu_fc pcpu_chosen_fc;
96 
97 typedef int (pcpu_fc_cpu_to_node_fn_t)(int cpu);
98 typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
99 
100 extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
101 							     int nr_units);
102 extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
103 
104 extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
105 					 void *base_addr);
106 
107 extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
108 				size_t atom_size,
109 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
110 				pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
111 
112 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
113 void __init pcpu_populate_pte(unsigned long addr);
114 extern int __init pcpu_page_first_chunk(size_t reserved_size,
115 				pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
116 #endif
117 
118 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1);
119 extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
120 extern bool is_kernel_percpu_address(unsigned long addr);
121 
122 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
123 extern void __init setup_per_cpu_areas(void);
124 #endif
125 
126 extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
127 extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
128 extern void free_percpu(void __percpu *__pdata);
129 
130 DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
131 
132 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
133 
134 #define alloc_percpu_gfp(type, gfp)					\
135 	(typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type),	\
136 						__alignof__(type), gfp)
137 #define alloc_percpu(type)						\
138 	(typeof(type) __percpu *)__alloc_percpu(sizeof(type),		\
139 						__alignof__(type))
140 
141 extern unsigned long pcpu_nr_pages(void);
142 
143 #endif /* __LINUX_PERCPU_H */
144