1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1998 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #ifndef _MACHINE_CPUFUNC_H_
32 #define _MACHINE_CPUFUNC_H_
33
34 #ifdef _KERNEL
35
36 #include <sys/types.h>
37
38 #include <machine/psl.h>
39 #include <machine/spr.h>
40
41 struct thread;
42
43 #ifdef KDB
44 void breakpoint(void);
45 #else
46 static __inline void
breakpoint(void)47 breakpoint(void)
48 {
49
50 return;
51 }
52 #endif
53
54 /* CPU register mangling inlines */
55
56 static __inline void
mtmsr(register_t value)57 mtmsr(register_t value)
58 {
59
60 __asm __volatile ("mtmsr %0; isync" :: "r"(value));
61 }
62
63 #ifdef __powerpc64__
64 static __inline void
mtmsrd(register_t value)65 mtmsrd(register_t value)
66 {
67
68 __asm __volatile ("mtmsrd %0; isync" :: "r"(value));
69 }
70 #endif
71
72 static __inline register_t
mfmsr(void)73 mfmsr(void)
74 {
75 register_t value;
76
77 __asm __volatile ("mfmsr %0" : "=r"(value));
78
79 return (value);
80 }
81
82 #ifndef __powerpc64__
83 static __inline void
mtsrin(vm_offset_t va,register_t value)84 mtsrin(vm_offset_t va, register_t value)
85 {
86
87 __asm __volatile ("mtsrin %0,%1; isync" :: "r"(value), "r"(va));
88 }
89
90 static __inline register_t
mfsrin(vm_offset_t va)91 mfsrin(vm_offset_t va)
92 {
93 register_t value;
94
95 __asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va));
96
97 return (value);
98 }
99 #endif
100
101 static __inline register_t
mfctrl(void)102 mfctrl(void)
103 {
104 register_t value;
105
106 __asm __volatile ("mfspr %0,136" : "=r"(value));
107
108 return (value);
109 }
110
111
112 static __inline void
mtdec(register_t value)113 mtdec(register_t value)
114 {
115
116 __asm __volatile ("mtdec %0" :: "r"(value));
117 }
118
119 static __inline register_t
mfdec(void)120 mfdec(void)
121 {
122 register_t value;
123
124 __asm __volatile ("mfdec %0" : "=r"(value));
125
126 return (value);
127 }
128
129 static __inline register_t
mfpvr(void)130 mfpvr(void)
131 {
132 register_t value;
133
134 __asm __volatile ("mfpvr %0" : "=r"(value));
135
136 return (value);
137 }
138
139 static __inline u_quad_t
mftb(void)140 mftb(void)
141 {
142 u_quad_t tb;
143 #ifdef __powerpc64__
144 __asm __volatile ("mftb %0" : "=r"(tb));
145 #else
146 uint32_t *tbup = (uint32_t *)&tb;
147 uint32_t *tblp = tbup + 1;
148
149 do {
150 *tbup = mfspr(TBR_TBU);
151 *tblp = mfspr(TBR_TBL);
152 } while (*tbup != mfspr(TBR_TBU));
153 #endif
154
155 return (tb);
156 }
157
158 static __inline void
mttb(u_quad_t time)159 mttb(u_quad_t time)
160 {
161
162 mtspr(TBR_TBWL, 0);
163 mtspr(TBR_TBWU, (uint32_t)(time >> 32));
164 mtspr(TBR_TBWL, (uint32_t)(time & 0xffffffff));
165 }
166
167 static __inline void
eieio(void)168 eieio(void)
169 {
170
171 __asm __volatile ("eieio" : : : "memory");
172 }
173
174 static __inline void
isync(void)175 isync(void)
176 {
177
178 __asm __volatile ("isync" : : : "memory");
179 }
180
181 static __inline void
powerpc_sync(void)182 powerpc_sync(void)
183 {
184
185 __asm __volatile ("sync" : : : "memory");
186 }
187
188 static __inline register_t
intr_disable(void)189 intr_disable(void)
190 {
191 register_t msr;
192
193 msr = mfmsr();
194 mtmsr(msr & ~PSL_EE);
195 return (msr);
196 }
197
198 static __inline void
intr_restore(register_t msr)199 intr_restore(register_t msr)
200 {
201
202 mtmsr(msr);
203 }
204
205 static __inline struct pcpu *
get_pcpu(void)206 get_pcpu(void)
207 {
208 struct pcpu *ret;
209
210 __asm __volatile("mfsprg %0, 0" : "=r"(ret));
211
212 return (ret);
213 }
214
215 #endif /* _KERNEL */
216
217 #endif /* !_MACHINE_CPUFUNC_H_ */
218