xref: /linux-6.15/arch/arm64/kernel/pi/map_kernel.c (revision 84b04d3e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2023 Google LLC
3 // Author: Ard Biesheuvel <[email protected]>
4 
5 #include <linux/init.h>
6 #include <linux/libfdt.h>
7 #include <linux/linkage.h>
8 #include <linux/types.h>
9 #include <linux/sizes.h>
10 #include <linux/string.h>
11 
12 #include <asm/memory.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/tlbflush.h>
16 
17 #include "pi.h"
18 
19 extern const u8 __eh_frame_start[], __eh_frame_end[];
20 
21 extern void idmap_cpu_replace_ttbr1(void *pgdir);
22 
23 static void __init map_segment(pgd_t *pg_dir, u64 *pgd, u64 va_offset,
24 			       void *start, void *end, pgprot_t prot,
25 			       bool may_use_cont, int root_level)
26 {
27 	map_range(pgd, ((u64)start + va_offset) & ~PAGE_OFFSET,
28 		  ((u64)end + va_offset) & ~PAGE_OFFSET, (u64)start,
29 		  prot, root_level, (pte_t *)pg_dir, may_use_cont, 0);
30 }
31 
32 static void __init unmap_segment(pgd_t *pg_dir, u64 va_offset, void *start,
33 				 void *end, int root_level)
34 {
35 	map_segment(pg_dir, NULL, va_offset, start, end, __pgprot(0),
36 		    false, root_level);
37 }
38 
39 static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
40 {
41 	bool enable_scs = IS_ENABLED(CONFIG_UNWIND_PATCH_PAC_INTO_SCS);
42 	bool twopass = IS_ENABLED(CONFIG_RELOCATABLE);
43 	u64 pgdp = (u64)init_pg_dir + PAGE_SIZE;
44 	pgprot_t text_prot = PAGE_KERNEL_ROX;
45 	pgprot_t data_prot = PAGE_KERNEL;
46 	pgprot_t prot;
47 
48 	/*
49 	 * External debuggers may need to write directly to the text mapping to
50 	 * install SW breakpoints. Allow this (only) when explicitly requested
51 	 * with rodata=off.
52 	 */
53 	if (arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF))
54 		text_prot = PAGE_KERNEL_EXEC;
55 
56 	/*
57 	 * We only enable the shadow call stack dynamically if we are running
58 	 * on a system that does not implement PAC or BTI. PAC and SCS provide
59 	 * roughly the same level of protection, and BTI relies on the PACIASP
60 	 * instructions serving as landing pads, preventing us from patching
61 	 * those instructions into something else.
62 	 */
63 	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && cpu_has_pac())
64 		enable_scs = false;
65 
66 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && cpu_has_bti()) {
67 		enable_scs = false;
68 
69 		/*
70 		 * If we have a CPU that supports BTI and a kernel built for
71 		 * BTI then mark the kernel executable text as guarded pages
72 		 * now so we don't have to rewrite the page tables later.
73 		 */
74 		text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
75 	}
76 
77 	/* Map all code read-write on the first pass if needed */
78 	twopass |= enable_scs;
79 	prot = twopass ? data_prot : text_prot;
80 
81 	map_segment(init_pg_dir, &pgdp, va_offset, _stext, _etext, prot,
82 		    !twopass, root_level);
83 	map_segment(init_pg_dir, &pgdp, va_offset, __start_rodata,
84 		    __inittext_begin, data_prot, false, root_level);
85 	map_segment(init_pg_dir, &pgdp, va_offset, __inittext_begin,
86 		    __inittext_end, prot, false, root_level);
87 	map_segment(init_pg_dir, &pgdp, va_offset, __initdata_begin,
88 		    __initdata_end, data_prot, false, root_level);
89 	map_segment(init_pg_dir, &pgdp, va_offset, _data, _end, data_prot,
90 		    true, root_level);
91 	dsb(ishst);
92 
93 	idmap_cpu_replace_ttbr1(init_pg_dir);
94 
95 	if (twopass) {
96 		if (IS_ENABLED(CONFIG_RELOCATABLE))
97 			relocate_kernel(kaslr_offset);
98 
99 		if (enable_scs) {
100 			scs_patch(__eh_frame_start + va_offset,
101 				  __eh_frame_end - __eh_frame_start);
102 			asm("ic ialluis");
103 
104 			dynamic_scs_is_enabled = true;
105 		}
106 
107 		/*
108 		 * Unmap the text region before remapping it, to avoid
109 		 * potential TLB conflicts when creating the contiguous
110 		 * descriptors.
111 		 */
112 		unmap_segment(init_pg_dir, va_offset, _stext, _etext,
113 			      root_level);
114 		dsb(ishst);
115 		isb();
116 		__tlbi(vmalle1);
117 		isb();
118 
119 		/*
120 		 * Remap these segments with different permissions
121 		 * No new page table allocations should be needed
122 		 */
123 		map_segment(init_pg_dir, NULL, va_offset, _stext, _etext,
124 			    text_prot, true, root_level);
125 		map_segment(init_pg_dir, NULL, va_offset, __inittext_begin,
126 			    __inittext_end, text_prot, false, root_level);
127 		dsb(ishst);
128 	}
129 }
130 
131 static void __init map_fdt(u64 fdt)
132 {
133 	static u8 ptes[INIT_IDMAP_FDT_SIZE] __initdata __aligned(PAGE_SIZE);
134 	u64 efdt = fdt + MAX_FDT_SIZE;
135 	u64 ptep = (u64)ptes;
136 
137 	/*
138 	 * Map up to MAX_FDT_SIZE bytes, but avoid overlap with
139 	 * the kernel image.
140 	 */
141 	map_range(&ptep, fdt, (u64)_text > fdt ? min((u64)_text, efdt) : efdt,
142 		  fdt, PAGE_KERNEL, IDMAP_ROOT_LEVEL,
143 		  (pte_t *)init_idmap_pg_dir, false, 0);
144 	dsb(ishst);
145 }
146 
147 asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
148 {
149 	static char const chosen_str[] __initconst = "/chosen";
150 	u64 va_base, pa_base = (u64)&_text;
151 	u64 kaslr_offset = pa_base % MIN_KIMG_ALIGN;
152 	int root_level = 4 - CONFIG_PGTABLE_LEVELS;
153 	int chosen;
154 
155 	map_fdt((u64)fdt);
156 
157 	/* Clear BSS and the initial page tables */
158 	memset(__bss_start, 0, (u64)init_pg_end - (u64)__bss_start);
159 
160 	/* Parse the command line for CPU feature overrides */
161 	chosen = fdt_path_offset(fdt, chosen_str);
162 	init_feature_override(boot_status, fdt, chosen);
163 
164 	/*
165 	 * The virtual KASLR displacement modulo 2MiB is decided by the
166 	 * physical placement of the image, as otherwise, we might not be able
167 	 * to create the early kernel mapping using 2 MiB block descriptors. So
168 	 * take the low bits of the KASLR offset from the physical address, and
169 	 * fill in the high bits from the seed.
170 	 */
171 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
172 		u64 kaslr_seed = kaslr_early_init(fdt, chosen);
173 
174 		if (kaslr_seed && kaslr_requires_kpti())
175 			arm64_use_ng_mappings = true;
176 
177 		kaslr_offset |= kaslr_seed & ~(MIN_KIMG_ALIGN - 1);
178 	}
179 
180 	va_base = KIMAGE_VADDR + kaslr_offset;
181 	map_kernel(kaslr_offset, va_base - pa_base, root_level);
182 }
183