xref: /f-stack/freebsd/arm/arm/cpufunc.c (revision 22ce4aff)
1 /*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * arm9 support code Copyright (C) 2001 ARM Ltd
7  * Copyright (c) 1997 Mark Brinicombe.
8  * Copyright (c) 1997 Causality Limited
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by Causality Limited.
22  * 4. The name of Causality Limited may not be used to endorse or promote
23  *    products derived from this software without specific prior written
24  *    permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * RiscBSD kernel project
39  *
40  * cpufuncs.c
41  *
42  * C functions for supporting CPU / MMU / TLB specific operations.
43  *
44  * Created      : 30/01/97
45  */
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/bus.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 #include <vm/uma.h>
61 
62 #include <machine/cpufunc.h>
63 
64 /* PRIMARY CACHE VARIABLES */
65 
66 int	arm_dcache_align;
67 int	arm_dcache_align_mask;
68 
69 #ifdef CPU_MV_PJ4B
70 static void pj4bv7_setup(void);
71 #endif
72 #if defined(CPU_ARM1176)
73 static void arm11x6_setup(void);
74 #endif
75 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
76 static void cortexa_setup(void);
77 #endif
78 
79 #ifdef CPU_MV_PJ4B
80 struct cpu_functions pj4bv7_cpufuncs = {
81 	/* Cache operations */
82 	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
83 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
84 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
85 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
86 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
87 
88 	/* Other functions */
89 	.cf_sleep = (void *)cpufunc_nullop,
90 
91 	/* Soft functions */
92 	.cf_setup = pj4bv7_setup
93 };
94 #endif /* CPU_MV_PJ4B */
95 
96 #if defined(CPU_ARM1176)
97 struct cpu_functions arm1176_cpufuncs = {
98 	/* Cache operations */
99 	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
100 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
101 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
102 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
103 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
104 
105 	/* Other functions */
106 	.cf_sleep = arm11x6_sleep,
107 
108 	/* Soft functions */
109 	.cf_setup = arm11x6_setup
110 };
111 #endif /*CPU_ARM1176 */
112 
113 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
114 struct cpu_functions cortexa_cpufuncs = {
115 	/* Cache operations */
116 
117 	/*
118 	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
119 	 * L2 cache controller is actually enabled.
120 	 */
121 	.cf_l2cache_wbinv_all = cpufunc_nullop,
122 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
123 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
124 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
125 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
126 
127 	/* Other functions */
128 	.cf_sleep = armv7_cpu_sleep,
129 
130 	/* Soft functions */
131 	.cf_setup = cortexa_setup
132 };
133 #endif /* CPU_CORTEXA || CPU_KRAIT */
134 
135 /*
136  * Global constants also used by locore.s
137  */
138 
139 struct cpu_functions cpufuncs;
140 u_int cputype;
141 
142 static void get_cachetype_cp15(void);
143 
144 static void
get_cachetype_cp15(void)145 get_cachetype_cp15(void)
146 {
147 	u_int ctype, dsize, cpuid;
148 	u_int clevel, csize, i, sel;
149 	u_int multiplier;
150 	u_char type;
151 
152 	ctype = cp15_ctr_get();
153 	cpuid = cp15_midr_get();
154 	/*
155 	 * ...and thus spake the ARM ARM:
156 	 *
157 	 * If an <opcode2> value corresponding to an unimplemented or
158 	 * reserved ID register is encountered, the System Control
159 	 * processor returns the value of the main ID register.
160 	 */
161 	if (ctype == cpuid)
162 		goto out;
163 
164 	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
165 		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
166 		    : "=r" (clevel));
167 		i = 0;
168 		while ((type = (clevel & 0x7)) && i < 7) {
169 			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
170 			    type == CACHE_SEP_CACHE) {
171 				sel = i << 1;
172 				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
173 				    : : "r" (sel));
174 				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
175 				    : "=r" (csize));
176 				arm_dcache_align = 1 <<
177 				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
178 				arm_dcache_align_mask = arm_dcache_align - 1;
179 			}
180 			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
181 				sel = (i << 1) | 1;
182 				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
183 				    : : "r" (sel));
184 				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
185 				    : "=r" (csize));
186 			}
187 			i++;
188 			clevel >>= 3;
189 		}
190 	} else {
191 		/*
192 		 * If you want to know how this code works, go read the ARM ARM.
193 		 */
194 
195 		dsize = CPU_CT_DSIZE(ctype);
196 		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
197 		arm_dcache_align = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
198 		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
199 			if (dsize & CPU_CT_xSIZE_M)
200 				arm_dcache_align = 0; /* not present */
201 		}
202 
203 	out:
204 		arm_dcache_align_mask = arm_dcache_align - 1;
205 	}
206 }
207 
208 /*
209  * Cannot panic here as we may not have a console yet ...
210  */
211 
212 int
set_cpufuncs(void)213 set_cpufuncs(void)
214 {
215 	cputype = cp15_midr_get();
216 	cputype &= CPU_ID_CPU_MASK;
217 
218 #if defined(CPU_ARM1176)
219 	if (cputype == CPU_ID_ARM1176JZS) {
220 		cpufuncs = arm1176_cpufuncs;
221 		get_cachetype_cp15();
222 		goto out;
223 	}
224 #endif /* CPU_ARM1176 */
225 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
226 	switch(cputype & CPU_ID_SCHEME_MASK) {
227 	case CPU_ID_CORTEXA5:
228 	case CPU_ID_CORTEXA7:
229 	case CPU_ID_CORTEXA8:
230 	case CPU_ID_CORTEXA9:
231 	case CPU_ID_CORTEXA12:
232 	case CPU_ID_CORTEXA15:
233 	case CPU_ID_CORTEXA53:
234 	case CPU_ID_CORTEXA57:
235 	case CPU_ID_CORTEXA72:
236 	case CPU_ID_KRAIT300:
237 		cpufuncs = cortexa_cpufuncs;
238 		get_cachetype_cp15();
239 		goto out;
240 	default:
241 		break;
242 	}
243 #endif /* CPU_CORTEXA || CPU_KRAIT */
244 
245 #if defined(CPU_MV_PJ4B)
246 	if (cputype == CPU_ID_MV88SV581X_V7 ||
247 	    cputype == CPU_ID_MV88SV584X_V7 ||
248 	    cputype == CPU_ID_ARM_88SV581X_V7) {
249 		cpufuncs = pj4bv7_cpufuncs;
250 		get_cachetype_cp15();
251 		goto out;
252 	}
253 #endif /* CPU_MV_PJ4B */
254 
255 	/*
256 	 * Bzzzz. And the answer was ...
257 	 */
258 	panic("No support for this CPU type (%08x) in kernel", cputype);
259 	return(ARCHITECTURE_NOT_PRESENT);
260 out:
261 	uma_set_align(arm_dcache_align_mask);
262 	return (0);
263 }
264 
265 /*
266  * CPU Setup code
267  */
268 
269 
270 #if defined(CPU_ARM1176) \
271  || defined(CPU_MV_PJ4B) \
272  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
273 static __inline void
cpu_scc_setup_ccnt(void)274 cpu_scc_setup_ccnt(void)
275 {
276 /* This is how you give userland access to the CCNT and PMCn
277  * registers.
278  * BEWARE! This gives write access also, which may not be what
279  * you want!
280  */
281 #ifdef _PMC_USER_READ_WRITE_
282 	/* Set PMUSERENR[0] to allow userland access */
283 	cp15_pmuserenr_set(1);
284 #endif
285 #if defined(CPU_ARM1176)
286 	/* Set PMCR[2,0] to enable counters and reset CCNT */
287 	cp15_pmcr_set(5);
288 #else
289 	/* Set up the PMCCNTR register as a cyclecounter:
290 	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
291 	 * Set PMCR[2,0] to enable counters and reset CCNT
292 	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
293 	cp15_pminten_clr(0xFFFFFFFF);
294 	cp15_pmcr_set(5);
295 	cp15_pmcnten_set(0x80000000);
296 #endif
297 }
298 #endif
299 
300 #if defined(CPU_ARM1176)
301 static void
arm11x6_setup(void)302 arm11x6_setup(void)
303 {
304 	uint32_t auxctrl, auxctrl_wax;
305 	uint32_t tmp, tmp2;
306 	uint32_t cpuid;
307 
308 	cpuid = cp15_midr_get();
309 
310 	auxctrl = 0;
311 	auxctrl_wax = ~0;
312 
313 	/*
314 	 * Enable an errata workaround
315 	 */
316 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
317 		auxctrl = ARM1176_AUXCTL_PHD;
318 		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
319 	}
320 
321 	tmp = cp15_actlr_get();
322 	tmp2 = tmp;
323 	tmp &= auxctrl_wax;
324 	tmp |= auxctrl;
325 	if (tmp != tmp2)
326 		cp15_actlr_set(tmp);
327 
328 	cpu_scc_setup_ccnt();
329 }
330 #endif  /* CPU_ARM1176 */
331 
332 #ifdef CPU_MV_PJ4B
333 static void
pj4bv7_setup(void)334 pj4bv7_setup(void)
335 {
336 
337 	pj4b_config();
338 	cpu_scc_setup_ccnt();
339 }
340 #endif /* CPU_MV_PJ4B */
341 
342 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
343 static void
cortexa_setup(void)344 cortexa_setup(void)
345 {
346 
347 	cpu_scc_setup_ccnt();
348 }
349 #endif  /* CPU_CORTEXA || CPU_KRAIT */
350