xref: /f-stack/freebsd/mips/include/cpufunc.h (revision 22ce4aff)
1 /*	$OpenBSD: pio.h,v 1.2 1998/09/15 10:50:12 pefo Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-4-Clause
5  *
6  * Copyright (c) 2002-2004 Juli Mallett.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 /*
30  * Copyright (c) 1995-1999 Per Fogelstrom.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *      This product includes software developed by Per Fogelstrom.
43  * 4. The name of the author may not be used to endorse or promote products
44  *    derived from this software without specific prior written permission
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
47  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
48  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
49  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
50  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
51  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
55  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56  *
57  *	JNPR: cpufunc.h,v 1.5 2007/08/09 11:23:32 katta
58  * $FreeBSD$
59  */
60 
61 #ifndef _MACHINE_CPUFUNC_H_
62 #define	_MACHINE_CPUFUNC_H_
63 
64 #include <sys/types.h>
65 #include <machine/cpuregs.h>
66 
67 /*
68  * These functions are required by user-land atomi ops
69  */
70 
71 static __inline void
mips_barrier(void)72 mips_barrier(void)
73 {
74 #if defined(CPU_CNMIPS) || defined(CPU_RMI) || defined(CPU_NLM)
75 	__compiler_membar();
76 #else
77 	__asm __volatile (".set noreorder\n\t"
78 			  "nop\n\t"
79 			  "nop\n\t"
80 			  "nop\n\t"
81 			  "nop\n\t"
82 			  "nop\n\t"
83 			  "nop\n\t"
84 			  "nop\n\t"
85 			  "nop\n\t"
86 			  ".set reorder\n\t"
87 			  : : : "memory");
88 #endif
89 }
90 
91 static __inline void
mips_cp0_sync(void)92 mips_cp0_sync(void)
93 {
94 	__asm __volatile (__XSTRING(COP0_SYNC));
95 }
96 
97 static __inline void
mips_wbflush(void)98 mips_wbflush(void)
99 {
100 #if defined(CPU_CNMIPS)
101 	__asm __volatile (".set noreorder\n\t"
102 			"syncw\n\t"
103 			".set reorder\n"
104 			: : : "memory");
105 #else
106 	__asm __volatile ("sync" : : : "memory");
107 	mips_barrier();
108 #endif
109 }
110 
111 static __inline void
breakpoint(void)112 breakpoint(void)
113 {
114 	__asm __volatile ("break");
115 }
116 
117 #ifdef _KERNEL
118 /*
119  * XXX
120  * It would be nice to add variants that read/write register_t, to avoid some
121  * ABI checks.
122  */
123 #if defined(__mips_n32) || defined(__mips_n64)
124 #define	MIPS_RW64_COP0(n,r)					\
125 static __inline uint64_t					\
126 mips_rd_ ## n (void)						\
127 {								\
128 	uint64_t v0;						\
129 	__asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)";"	\
130 			  : [v0] "=&r"(v0));			\
131 	mips_barrier();						\
132 	return (v0);						\
133 }								\
134 static __inline void						\
135 mips_wr_ ## n (uint64_t a0)					\
136 {								\
137 	__asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)";"	\
138 			 __XSTRING(COP0_SYNC)";"		\
139 			 "nop;"					\
140 			 "nop;"					\
141 			 :					\
142 			 : [a0] "r"(a0));			\
143 	mips_barrier();						\
144 } struct __hack
145 
146 #define	MIPS_RW64_COP0_SEL(n,r,s)				\
147 static __inline uint64_t					\
148 mips_rd_ ## n(void)						\
149 {								\
150 	uint64_t v0;						\
151 	__asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";"	\
152 			  : [v0] "=&r"(v0));			\
153 	mips_barrier();						\
154 	return (v0);						\
155 }								\
156 static __inline void						\
157 mips_wr_ ## n(uint64_t a0)					\
158 {								\
159 	__asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";"	\
160 			 __XSTRING(COP0_SYNC)";"		\
161 			 :					\
162 			 : [a0] "r"(a0));			\
163 	mips_barrier();						\
164 } struct __hack
165 
166 #if defined(__mips_n64)
167 MIPS_RW64_COP0(excpc, MIPS_COP_0_EXC_PC);
168 MIPS_RW64_COP0(entryhi, MIPS_COP_0_TLB_HI);
169 MIPS_RW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
170 MIPS_RW64_COP0_SEL(userlocal, MIPS_COP_0_USERLOCAL, 2);
171 #ifdef CPU_CNMIPS
172 MIPS_RW64_COP0_SEL(cvmcount, MIPS_COP_0_COUNT, 6);
173 MIPS_RW64_COP0_SEL(cvmctl, MIPS_COP_0_COUNT, 7);
174 MIPS_RW64_COP0_SEL(cvmmemctl, MIPS_COP_0_COMPARE, 7);
175 MIPS_RW64_COP0_SEL(icache_err, MIPS_COP_0_CACHE_ERR, 0);
176 MIPS_RW64_COP0_SEL(dcache_err, MIPS_COP_0_CACHE_ERR, 1);
177 #endif
178 #endif
179 #if defined(__mips_n64) || defined(__mips_n32) /* PHYSADDR_64_BIT */
180 MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
181 MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
182 #endif
183 MIPS_RW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT);
184 
185 #undef	MIPS_RW64_COP0
186 #undef	MIPS_RW64_COP0_SEL
187 #endif
188 
189 #define	MIPS_RW32_COP0(n,r)					\
190 static __inline uint32_t					\
191 mips_rd_ ## n (void)						\
192 {								\
193 	uint32_t v0;						\
194 	__asm __volatile ("mfc0 %[v0], $"__XSTRING(r)";"	\
195 			  : [v0] "=&r"(v0));			\
196 	mips_barrier();						\
197 	return (v0);						\
198 }								\
199 static __inline void						\
200 mips_wr_ ## n (uint32_t a0)					\
201 {								\
202 	__asm __volatile ("mtc0 %[a0], $"__XSTRING(r)";"	\
203 			 __XSTRING(COP0_SYNC)";"		\
204 			 "nop;"					\
205 			 "nop;"					\
206 			 :					\
207 			 : [a0] "r"(a0));			\
208 	mips_barrier();						\
209 } struct __hack
210 
211 #define	MIPS_RW32_COP0_SEL(n,r,s)				\
212 static __inline uint32_t					\
213 mips_rd_ ## n(void)						\
214 {								\
215 	uint32_t v0;						\
216 	__asm __volatile ("mfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";"	\
217 			  : [v0] "=&r"(v0));			\
218 	mips_barrier();						\
219 	return (v0);						\
220 }								\
221 static __inline void						\
222 mips_wr_ ## n(uint32_t a0)					\
223 {								\
224 	__asm __volatile ("mtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";"	\
225 			 __XSTRING(COP0_SYNC)";"		\
226 			 "nop;"					\
227 			 "nop;"					\
228 			 :					\
229 			 : [a0] "r"(a0));			\
230 	mips_barrier();						\
231 } struct __hack
232 
233 #ifdef CPU_CNMIPS
mips_sync_icache(void)234 static __inline void mips_sync_icache (void)
235 {
236 	__asm __volatile (
237 		".set push\n"
238 		".set mips64\n"
239 		".word 0x041f0000\n"		/* xxx ICACHE */
240 		"nop\n"
241 		".set pop\n"
242 		: : );
243 }
244 #endif
245 
246 MIPS_RW32_COP0(compare, MIPS_COP_0_COMPARE);
247 MIPS_RW32_COP0(config, MIPS_COP_0_CONFIG);
248 MIPS_RW32_COP0_SEL(config1, MIPS_COP_0_CONFIG, 1);
249 MIPS_RW32_COP0_SEL(config2, MIPS_COP_0_CONFIG, 2);
250 MIPS_RW32_COP0_SEL(config3, MIPS_COP_0_CONFIG, 3);
251 #ifdef CPU_CNMIPS
252 MIPS_RW32_COP0_SEL(config4, MIPS_COP_0_CONFIG, 4);
253 #endif
254 #ifdef BERI_LARGE_TLB
255 MIPS_RW32_COP0_SEL(config5, MIPS_COP_0_CONFIG, 5);
256 #endif
257 #if defined(CPU_NLM) || defined(BERI_LARGE_TLB)
258 MIPS_RW32_COP0_SEL(config6, MIPS_COP_0_CONFIG, 6);
259 #endif
260 #if defined(CPU_NLM) || defined(CPU_MIPS1004K) || defined (CPU_MIPS74K) || \
261     defined(CPU_MIPS24K)
262 MIPS_RW32_COP0_SEL(config7, MIPS_COP_0_CONFIG, 7);
263 #endif
264 MIPS_RW32_COP0(count, MIPS_COP_0_COUNT);
265 MIPS_RW32_COP0(index, MIPS_COP_0_TLB_INDEX);
266 MIPS_RW32_COP0(wired, MIPS_COP_0_TLB_WIRED);
267 MIPS_RW32_COP0(cause, MIPS_COP_0_CAUSE);
268 #if !defined(__mips_n64)
269 MIPS_RW32_COP0(excpc, MIPS_COP_0_EXC_PC);
270 #endif
271 MIPS_RW32_COP0(status, MIPS_COP_0_STATUS);
272 MIPS_RW32_COP0_SEL(cmgcrbase, 15, 3);
273 
274 /* XXX: Some of these registers are specific to MIPS32. */
275 #if !defined(__mips_n64)
276 MIPS_RW32_COP0(entryhi, MIPS_COP_0_TLB_HI);
277 MIPS_RW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
278 MIPS_RW32_COP0_SEL(userlocal, MIPS_COP_0_USERLOCAL, 2);
279 #endif
280 #ifdef CPU_NLM
281 MIPS_RW32_COP0_SEL(pagegrain, MIPS_COP_0_TLB_PG_MASK, 1);
282 #endif
283 #if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */
284 MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
285 MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
286 #endif
287 MIPS_RW32_COP0(prid, MIPS_COP_0_PRID);
288 MIPS_RW32_COP0_SEL(cinfo, MIPS_COP_0_PRID, 6);
289 MIPS_RW32_COP0_SEL(tinfo, MIPS_COP_0_PRID, 7);
290 /* XXX 64-bit?  */
291 MIPS_RW32_COP0_SEL(ebase, MIPS_COP_0_PRID, 1);
292 
293 #if defined(CPU_MIPS24K) || defined(CPU_MIPS34K) ||		\
294     defined(CPU_MIPS74K) || defined(CPU_MIPS1004K)  ||	\
295     defined(CPU_MIPS1074K) || defined(CPU_INTERAPTIV) ||	\
296     defined(CPU_PROAPTIV)
297 /* MIPS32/64 r2 intctl */
298 MIPS_RW32_COP0_SEL(intctl, MIPS_COP_0_INTCTL, 1);
299 #endif
300 
301 #ifdef CPU_XBURST
302 MIPS_RW32_COP0_SEL(xburst_mbox0, MIPS_COP_0_XBURST_MBOX, 0);
303 MIPS_RW32_COP0_SEL(xburst_mbox1, MIPS_COP_0_XBURST_MBOX, 1);
304 MIPS_RW32_COP0_SEL(xburst_core_ctl, MIPS_COP_0_XBURST_C12, 2);
305 MIPS_RW32_COP0_SEL(xburst_core_sts, MIPS_COP_0_XBURST_C12, 3);
306 MIPS_RW32_COP0_SEL(xburst_reim, MIPS_COP_0_XBURST_C12, 4);
307 #endif
308 MIPS_RW32_COP0(watchlo, MIPS_COP_0_WATCH_LO);
309 MIPS_RW32_COP0_SEL(watchlo1, MIPS_COP_0_WATCH_LO, 1);
310 MIPS_RW32_COP0_SEL(watchlo2, MIPS_COP_0_WATCH_LO, 2);
311 MIPS_RW32_COP0_SEL(watchlo3, MIPS_COP_0_WATCH_LO, 3);
312 MIPS_RW32_COP0(watchhi, MIPS_COP_0_WATCH_HI);
313 MIPS_RW32_COP0_SEL(watchhi1, MIPS_COP_0_WATCH_HI, 1);
314 MIPS_RW32_COP0_SEL(watchhi2, MIPS_COP_0_WATCH_HI, 2);
315 MIPS_RW32_COP0_SEL(watchhi3, MIPS_COP_0_WATCH_HI, 3);
316 
317 MIPS_RW32_COP0_SEL(perfcnt0, MIPS_COP_0_PERFCNT, 0);
318 MIPS_RW32_COP0_SEL(perfcnt1, MIPS_COP_0_PERFCNT, 1);
319 MIPS_RW32_COP0_SEL(perfcnt2, MIPS_COP_0_PERFCNT, 2);
320 MIPS_RW32_COP0_SEL(perfcnt3, MIPS_COP_0_PERFCNT, 3);
321 MIPS_RW32_COP0(hwrena, MIPS_COP_0_HWRENA);
322 
323 #undef	MIPS_RW32_COP0
324 #undef	MIPS_RW32_COP0_SEL
325 
326 static __inline register_t
intr_disable(void)327 intr_disable(void)
328 {
329 	register_t s;
330 
331 	s = mips_rd_status();
332 	mips_wr_status(s & ~MIPS_SR_INT_IE);
333 
334 	return (s & MIPS_SR_INT_IE);
335 }
336 
337 static __inline register_t
intr_enable(void)338 intr_enable(void)
339 {
340 	register_t s;
341 
342 	s = mips_rd_status();
343 	mips_wr_status(s | MIPS_SR_INT_IE);
344 
345 	return (s);
346 }
347 
348 static __inline void
intr_restore(register_t ie)349 intr_restore(register_t ie)
350 {
351 	if (ie == MIPS_SR_INT_IE) {
352 		intr_enable();
353 	}
354 }
355 
356 static __inline uint32_t
set_intr_mask(uint32_t mask)357 set_intr_mask(uint32_t mask)
358 {
359 	uint32_t ostatus;
360 
361 	ostatus = mips_rd_status();
362 	mask = (ostatus & ~MIPS_SR_INT_MASK) | (mask & MIPS_SR_INT_MASK);
363 	mips_wr_status(mask);
364 	return (ostatus);
365 }
366 
367 static __inline uint32_t
get_intr_mask(void)368 get_intr_mask(void)
369 {
370 
371 	return (mips_rd_status() & MIPS_SR_INT_MASK);
372 }
373 
374 #endif /* _KERNEL */
375 
376 #define	readb(va)	(*(volatile uint8_t *) (va))
377 #define	readw(va)	(*(volatile uint16_t *) (va))
378 #define	readl(va)	(*(volatile uint32_t *) (va))
379 #if !defined(__mips_o32)
380 #define	readq(a)	(*(volatile uint64_t *)(a))
381 #endif
382 
383 #define	writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
384 #define	writew(va, d)	(*(volatile uint16_t *) (va) = (d))
385 #define	writel(va, d)	(*(volatile uint32_t *) (va) = (d))
386 #if !defined(__mips_o32)
387 #define	writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
388 #endif
389 
390 #endif /* !_MACHINE_CPUFUNC_H_ */
391