1 /*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #ifndef _MACHINE_CPUFUNC_H_
30 #define _MACHINE_CPUFUNC_H_
31
32 static __inline void
breakpoint(void)33 breakpoint(void)
34 {
35
36 __asm("brk #0");
37 }
38
39 #ifdef _KERNEL
40
41 #define HAVE_INLINE_FFS
42
43 static __inline __pure2 int
ffs(int mask)44 ffs(int mask)
45 {
46
47 return (__builtin_ffs(mask));
48 }
49
50 #define HAVE_INLINE_FFSL
51
52 static __inline __pure2 int
ffsl(long mask)53 ffsl(long mask)
54 {
55
56 return (__builtin_ffsl(mask));
57 }
58
59 #define HAVE_INLINE_FFSLL
60
61 static __inline __pure2 int
ffsll(long long mask)62 ffsll(long long mask)
63 {
64
65 return (__builtin_ffsll(mask));
66 }
67
68 #define HAVE_INLINE_FLS
69
70 static __inline __pure2 int
fls(int mask)71 fls(int mask)
72 {
73
74 return (mask == 0 ? 0 :
75 8 * sizeof(mask) - __builtin_clz((u_int)mask));
76 }
77
78 #define HAVE_INLINE_FLSL
79
80 static __inline __pure2 int
flsl(long mask)81 flsl(long mask)
82 {
83
84 return (mask == 0 ? 0 :
85 8 * sizeof(mask) - __builtin_clzl((u_long)mask));
86 }
87
88 #define HAVE_INLINE_FLSLL
89
90 static __inline __pure2 int
flsll(long long mask)91 flsll(long long mask)
92 {
93
94 return (mask == 0 ? 0 :
95 8 * sizeof(mask) - __builtin_clzll((unsigned long long)mask));
96 }
97
98 #include <machine/armreg.h>
99
100 void pan_enable(void);
101
102 static __inline register_t
dbg_disable(void)103 dbg_disable(void)
104 {
105 uint32_t ret;
106
107 __asm __volatile(
108 "mrs %x0, daif \n"
109 "msr daifset, #8 \n"
110 : "=&r" (ret));
111
112 return (ret);
113 }
114
115 static __inline void
dbg_enable(void)116 dbg_enable(void)
117 {
118
119 __asm __volatile("msr daifclr, #8");
120 }
121
122 static __inline register_t
intr_disable(void)123 intr_disable(void)
124 {
125 /* DAIF is a 32-bit register */
126 uint32_t ret;
127
128 __asm __volatile(
129 "mrs %x0, daif \n"
130 "msr daifset, #2 \n"
131 : "=&r" (ret));
132
133 return (ret);
134 }
135
136 static __inline void
intr_restore(register_t s)137 intr_restore(register_t s)
138 {
139
140 WRITE_SPECIALREG(daif, s);
141 }
142
143 static __inline void
intr_enable(void)144 intr_enable(void)
145 {
146
147 __asm __volatile("msr daifclr, #2");
148 }
149
150 static __inline register_t
get_midr(void)151 get_midr(void)
152 {
153 uint64_t midr;
154
155 midr = READ_SPECIALREG(midr_el1);
156
157 return (midr);
158 }
159
160 static __inline register_t
get_mpidr(void)161 get_mpidr(void)
162 {
163 uint64_t mpidr;
164
165 mpidr = READ_SPECIALREG(mpidr_el1);
166
167 return (mpidr);
168 }
169
170 static __inline void
clrex(void)171 clrex(void)
172 {
173
174 /*
175 * Ensure compiler barrier, otherwise the monitor clear might
176 * occur too late for us ?
177 */
178 __asm __volatile("clrex" : : : "memory");
179 }
180
181 static __inline void
set_ttbr0(uint64_t ttbr0)182 set_ttbr0(uint64_t ttbr0)
183 {
184
185 __asm __volatile(
186 "msr ttbr0_el1, %0 \n"
187 "isb \n"
188 :
189 : "r" (ttbr0));
190 }
191
192 static __inline void
invalidate_icache(void)193 invalidate_icache(void)
194 {
195
196 __asm __volatile(
197 "ic ialluis \n"
198 "dsb ish \n"
199 "isb \n");
200 }
201
202 static __inline void
invalidate_local_icache(void)203 invalidate_local_icache(void)
204 {
205
206 __asm __volatile(
207 "ic iallu \n"
208 "dsb nsh \n"
209 "isb \n");
210 }
211
212 extern bool icache_aliasing;
213 extern bool icache_vmid;
214
215 extern int64_t dcache_line_size;
216 extern int64_t icache_line_size;
217 extern int64_t idcache_line_size;
218 extern int64_t dczva_line_size;
219
220 #define cpu_nullop() arm64_nullop()
221 #define cpufunc_nullop() arm64_nullop()
222
223 #define cpu_tlb_flushID() arm64_tlb_flushID()
224
225 #define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s))
226 #define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s))
227 #define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s))
228
229 extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t);
230
231 #define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s))
232 #define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
233
234 void arm64_nullop(void);
235 void arm64_tlb_flushID(void);
236 void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t);
237 void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
238 int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
239 void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
240 void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
241 void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
242
243 #endif /* _KERNEL */
244 #endif /* _MACHINE_CPUFUNC_H_ */
245