xref: /f-stack/freebsd/arm64/arm64/vfp.c (revision 22ce4aff)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #ifdef VFP
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/pcpu.h>
39 #include <sys/proc.h>
40 
41 #include <machine/armreg.h>
42 #include <machine/md_var.h>
43 #include <machine/pcb.h>
44 #include <machine/vfp.h>
45 
46 /* Sanity check we can store all the VFP registers */
47 CTASSERT(sizeof(((struct pcb *)0)->pcb_fpustate.vfp_regs) == 16 * 32);
48 
49 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
50     "Kernel contexts for VFP state");
51 
52 struct fpu_kern_ctx {
53 	struct vfpstate	*prev;
54 #define	FPU_KERN_CTX_DUMMY	0x01	/* avoided save for the kern thread */
55 #define	FPU_KERN_CTX_INUSE	0x02
56 	uint32_t	 flags;
57 	struct vfpstate	 state;
58 };
59 
60 static void
vfp_enable(void)61 vfp_enable(void)
62 {
63 	uint32_t cpacr;
64 
65 	cpacr = READ_SPECIALREG(cpacr_el1);
66 	cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_NONE;
67 	WRITE_SPECIALREG(cpacr_el1, cpacr);
68 	isb();
69 }
70 
71 static void
vfp_disable(void)72 vfp_disable(void)
73 {
74 	uint32_t cpacr;
75 
76 	cpacr = READ_SPECIALREG(cpacr_el1);
77 	cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_ALL1;
78 	WRITE_SPECIALREG(cpacr_el1, cpacr);
79 	isb();
80 }
81 
82 /*
83  * Called when the thread is dying or when discarding the kernel VFP state.
84  * If the thread was the last to use the VFP unit mark it as unused to tell
85  * the kernel the fp state is unowned. Ensure the VFP unit is off so we get
86  * an exception on the next access.
87  */
88 void
vfp_discard(struct thread * td)89 vfp_discard(struct thread *td)
90 {
91 
92 #ifdef INVARIANTS
93 	if (td != NULL)
94 		CRITICAL_ASSERT(td);
95 #endif
96 	if (PCPU_GET(fpcurthread) == td)
97 		PCPU_SET(fpcurthread, NULL);
98 
99 	vfp_disable();
100 }
101 
102 static void
vfp_store(struct vfpstate * state)103 vfp_store(struct vfpstate *state)
104 {
105 	__int128_t *vfp_state;
106 	uint64_t fpcr, fpsr;
107 
108 	vfp_state = state->vfp_regs;
109 	__asm __volatile(
110 	    "mrs	%0, fpcr		\n"
111 	    "mrs	%1, fpsr		\n"
112 	    "stp	q0,  q1,  [%2, #16 *  0]\n"
113 	    "stp	q2,  q3,  [%2, #16 *  2]\n"
114 	    "stp	q4,  q5,  [%2, #16 *  4]\n"
115 	    "stp	q6,  q7,  [%2, #16 *  6]\n"
116 	    "stp	q8,  q9,  [%2, #16 *  8]\n"
117 	    "stp	q10, q11, [%2, #16 * 10]\n"
118 	    "stp	q12, q13, [%2, #16 * 12]\n"
119 	    "stp	q14, q15, [%2, #16 * 14]\n"
120 	    "stp	q16, q17, [%2, #16 * 16]\n"
121 	    "stp	q18, q19, [%2, #16 * 18]\n"
122 	    "stp	q20, q21, [%2, #16 * 20]\n"
123 	    "stp	q22, q23, [%2, #16 * 22]\n"
124 	    "stp	q24, q25, [%2, #16 * 24]\n"
125 	    "stp	q26, q27, [%2, #16 * 26]\n"
126 	    "stp	q28, q29, [%2, #16 * 28]\n"
127 	    "stp	q30, q31, [%2, #16 * 30]\n"
128 	    : "=&r"(fpcr), "=&r"(fpsr) : "r"(vfp_state));
129 
130 	state->vfp_fpcr = fpcr;
131 	state->vfp_fpsr = fpsr;
132 }
133 
134 static void
vfp_restore(struct vfpstate * state)135 vfp_restore(struct vfpstate *state)
136 {
137 	__int128_t *vfp_state;
138 	uint64_t fpcr, fpsr;
139 
140 	vfp_state = state->vfp_regs;
141 	fpcr = state->vfp_fpcr;
142 	fpsr = state->vfp_fpsr;
143 
144 	__asm __volatile(
145 	    "ldp	q0,  q1,  [%2, #16 *  0]\n"
146 	    "ldp	q2,  q3,  [%2, #16 *  2]\n"
147 	    "ldp	q4,  q5,  [%2, #16 *  4]\n"
148 	    "ldp	q6,  q7,  [%2, #16 *  6]\n"
149 	    "ldp	q8,  q9,  [%2, #16 *  8]\n"
150 	    "ldp	q10, q11, [%2, #16 * 10]\n"
151 	    "ldp	q12, q13, [%2, #16 * 12]\n"
152 	    "ldp	q14, q15, [%2, #16 * 14]\n"
153 	    "ldp	q16, q17, [%2, #16 * 16]\n"
154 	    "ldp	q18, q19, [%2, #16 * 18]\n"
155 	    "ldp	q20, q21, [%2, #16 * 20]\n"
156 	    "ldp	q22, q23, [%2, #16 * 22]\n"
157 	    "ldp	q24, q25, [%2, #16 * 24]\n"
158 	    "ldp	q26, q27, [%2, #16 * 26]\n"
159 	    "ldp	q28, q29, [%2, #16 * 28]\n"
160 	    "ldp	q30, q31, [%2, #16 * 30]\n"
161 	    "msr	fpcr, %0		\n"
162 	    "msr	fpsr, %1		\n"
163 	    : : "r"(fpcr), "r"(fpsr), "r"(vfp_state));
164 }
165 
166 void
vfp_save_state(struct thread * td,struct pcb * pcb)167 vfp_save_state(struct thread *td, struct pcb *pcb)
168 {
169 	uint32_t cpacr;
170 
171 	KASSERT(pcb != NULL, ("NULL vfp pcb"));
172 	KASSERT(td == NULL || td->td_pcb == pcb, ("Invalid vfp pcb"));
173 
174 	/*
175 	 * savectx() will be called on panic with dumppcb as an argument,
176 	 * dumppcb doesn't have pcb_fpusaved set, so set it to save
177 	 * the VFP registers.
178 	 */
179 	if (pcb->pcb_fpusaved == NULL)
180 		pcb->pcb_fpusaved = &pcb->pcb_fpustate;
181 
182 	if (td == NULL)
183 		td = curthread;
184 
185 	critical_enter();
186 	/*
187 	 * Only store the registers if the VFP is enabled,
188 	 * i.e. return if we are trapping on FP access.
189 	 */
190 	cpacr = READ_SPECIALREG(cpacr_el1);
191 	if ((cpacr & CPACR_FPEN_MASK) == CPACR_FPEN_TRAP_NONE) {
192 		KASSERT(PCPU_GET(fpcurthread) == td,
193 		    ("Storing an invalid VFP state"));
194 
195 		vfp_store(pcb->pcb_fpusaved);
196 		dsb(ish);
197 		vfp_disable();
198 	}
199 	critical_exit();
200 }
201 
202 void
vfp_restore_state(void)203 vfp_restore_state(void)
204 {
205 	struct pcb *curpcb;
206 	u_int cpu;
207 
208 	critical_enter();
209 
210 	cpu = PCPU_GET(cpuid);
211 	curpcb = curthread->td_pcb;
212 	curpcb->pcb_fpflags |= PCB_FP_STARTED;
213 
214 	vfp_enable();
215 
216 	/*
217 	 * If the previous thread on this cpu to use the VFP was not the
218 	 * current thread, or the current thread last used it on a different
219 	 * cpu we need to restore the old state.
220 	 */
221 	if (PCPU_GET(fpcurthread) != curthread || cpu != curpcb->pcb_vfpcpu) {
222 		vfp_restore(curthread->td_pcb->pcb_fpusaved);
223 		PCPU_SET(fpcurthread, curthread);
224 		curpcb->pcb_vfpcpu = cpu;
225 	}
226 
227 	critical_exit();
228 }
229 
230 void
vfp_init(void)231 vfp_init(void)
232 {
233 	uint64_t pfr;
234 
235 	/* Check if there is a vfp unit present */
236 	pfr = READ_SPECIALREG(id_aa64pfr0_el1);
237 	if ((pfr & ID_AA64PFR0_FP_MASK) == ID_AA64PFR0_FP_NONE)
238 		return;
239 
240 	/* Disable to be enabled when it's used */
241 	vfp_disable();
242 
243 	if (PCPU_GET(cpuid) == 0)
244 		thread0.td_pcb->pcb_fpusaved->vfp_fpcr = initial_fpcr;
245 }
246 
247 SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL);
248 
249 struct fpu_kern_ctx *
fpu_kern_alloc_ctx(u_int flags)250 fpu_kern_alloc_ctx(u_int flags)
251 {
252 	struct fpu_kern_ctx *res;
253 	size_t sz;
254 
255 	sz = sizeof(struct fpu_kern_ctx);
256 	res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
257 	    M_NOWAIT : M_WAITOK) | M_ZERO);
258 	return (res);
259 }
260 
261 void
fpu_kern_free_ctx(struct fpu_kern_ctx * ctx)262 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
263 {
264 
265 	KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx"));
266 	/* XXXAndrew clear the memory ? */
267 	free(ctx, M_FPUKERN_CTX);
268 }
269 
270 void
fpu_kern_enter(struct thread * td,struct fpu_kern_ctx * ctx,u_int flags)271 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
272 {
273 	struct pcb *pcb;
274 
275 	pcb = td->td_pcb;
276 	KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
277 	    ("ctx is required when !FPU_KERN_NOCTX"));
278 	KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0,
279 	    ("using inuse ctx"));
280 	KASSERT((pcb->pcb_fpflags & PCB_FP_NOSAVE) == 0,
281 	    ("recursive fpu_kern_enter while in PCB_FP_NOSAVE state"));
282 
283 	if ((flags & FPU_KERN_NOCTX) != 0) {
284 		critical_enter();
285 		if (curthread == PCPU_GET(fpcurthread)) {
286 			vfp_save_state(curthread, pcb);
287 		}
288 		PCPU_SET(fpcurthread, NULL);
289 
290 		vfp_enable();
291 		pcb->pcb_fpflags |= PCB_FP_KERN | PCB_FP_NOSAVE |
292 		    PCB_FP_STARTED;
293 		return;
294 	}
295 
296 	if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
297 		ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE;
298 		return;
299 	}
300 	/*
301 	 * Check either we are already using the VFP in the kernel, or
302 	 * the the saved state points to the default user space.
303 	 */
304 	KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) != 0 ||
305 	    pcb->pcb_fpusaved == &pcb->pcb_fpustate,
306 	    ("Mangled pcb_fpusaved %x %p %p", pcb->pcb_fpflags, pcb->pcb_fpusaved, &pcb->pcb_fpustate));
307 	ctx->flags = FPU_KERN_CTX_INUSE;
308 	vfp_save_state(curthread, pcb);
309 	ctx->prev = pcb->pcb_fpusaved;
310 	pcb->pcb_fpusaved = &ctx->state;
311 	pcb->pcb_fpflags |= PCB_FP_KERN;
312 	pcb->pcb_fpflags &= ~PCB_FP_STARTED;
313 
314 	return;
315 }
316 
317 int
fpu_kern_leave(struct thread * td,struct fpu_kern_ctx * ctx)318 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
319 {
320 	struct pcb *pcb;
321 
322 	pcb = td->td_pcb;
323 
324 	if ((pcb->pcb_fpflags & PCB_FP_NOSAVE) != 0) {
325 		KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
326 		KASSERT(PCPU_GET(fpcurthread) == NULL,
327 		    ("non-NULL fpcurthread for PCB_FP_NOSAVE"));
328 		CRITICAL_ASSERT(td);
329 
330 		vfp_disable();
331 		pcb->pcb_fpflags &= ~(PCB_FP_NOSAVE | PCB_FP_STARTED);
332 		critical_exit();
333 	} else {
334 		KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
335 		    ("FPU context not inuse"));
336 		ctx->flags &= ~FPU_KERN_CTX_INUSE;
337 
338 		if (is_fpu_kern_thread(0) &&
339 		    (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
340 			return (0);
341 		KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx"));
342 		critical_enter();
343 		vfp_discard(td);
344 		critical_exit();
345 		pcb->pcb_fpflags &= ~PCB_FP_STARTED;
346 		pcb->pcb_fpusaved = ctx->prev;
347 	}
348 
349 	if (pcb->pcb_fpusaved == &pcb->pcb_fpustate) {
350 		pcb->pcb_fpflags &= ~PCB_FP_KERN;
351 	} else {
352 		KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) != 0,
353 		    ("unpaired fpu_kern_leave"));
354 	}
355 
356 	return (0);
357 }
358 
359 int
fpu_kern_thread(u_int flags)360 fpu_kern_thread(u_int flags)
361 {
362 	struct pcb *pcb = curthread->td_pcb;
363 
364 	KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
365 	    ("Only kthread may use fpu_kern_thread"));
366 	KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
367 	    ("Mangled pcb_fpusaved"));
368 	KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) == 0,
369 	    ("Thread already setup for the VFP"));
370 	pcb->pcb_fpflags |= PCB_FP_KERN;
371 	return (0);
372 }
373 
374 int
is_fpu_kern_thread(u_int flags)375 is_fpu_kern_thread(u_int flags)
376 {
377 	struct pcb *curpcb;
378 
379 	if ((curthread->td_pflags & TDP_KTHREAD) == 0)
380 		return (0);
381 	curpcb = curthread->td_pcb;
382 	return ((curpcb->pcb_fpflags & PCB_FP_KERN) != 0);
383 }
384 #endif
385