xref: /f-stack/freebsd/arm64/arm64/vm_machdep.c (revision 22ce4aff)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_platform.h"
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/limits.h>
36 #include <sys/proc.h>
37 #include <sys/sf_buf.h>
38 #include <sys/signal.h>
39 #include <sys/sysent.h>
40 #include <sys/unistd.h>
41 
42 #include <vm/vm.h>
43 #include <vm/vm_page.h>
44 #include <vm/vm_map.h>
45 #include <vm/uma.h>
46 #include <vm/uma_int.h>
47 
48 #include <machine/armreg.h>
49 #include <machine/cpu.h>
50 #include <machine/md_var.h>
51 #include <machine/pcb.h>
52 #include <machine/frame.h>
53 
54 #ifdef VFP
55 #include <machine/vfp.h>
56 #endif
57 
58 uint32_t initial_fpcr = VFPCR_DN;
59 
60 #include <dev/psci/psci.h>
61 
62 /*
63  * Finish a fork operation, with process p2 nearly set up.
64  * Copy and update the pcb, set up the stack so that the child
65  * ready to run and return to user mode.
66  */
67 void
cpu_fork(struct thread * td1,struct proc * p2,struct thread * td2,int flags)68 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
69 {
70 	struct pcb *pcb2;
71 	struct trapframe *tf;
72 
73 	if ((flags & RFPROC) == 0)
74 		return;
75 
76 	if (td1 == curthread) {
77 		/*
78 		 * Save the tpidr_el0 and the vfp state, these normally happen
79 		 * in cpu_switch, but if userland changes these then forks
80 		 * this may not have happened.
81 		 */
82 		td1->td_pcb->pcb_tpidr_el0 = READ_SPECIALREG(tpidr_el0);
83 		td1->td_pcb->pcb_tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
84 #ifdef VFP
85 		if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0)
86 			vfp_save_state(td1, td1->td_pcb);
87 #endif
88 	}
89 
90 	pcb2 = (struct pcb *)(td2->td_kstack +
91 	    td2->td_kstack_pages * PAGE_SIZE) - 1;
92 
93 	td2->td_pcb = pcb2;
94 	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
95 
96 	tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1);
97 	bcopy(td1->td_frame, tf, sizeof(*tf));
98 	tf->tf_x[0] = 0;
99 	tf->tf_x[1] = 0;
100 	tf->tf_spsr = td1->td_frame->tf_spsr & (PSR_M_32 | PSR_DAIF);
101 
102 	td2->td_frame = tf;
103 
104 	/* Set the return value registers for fork() */
105 	td2->td_pcb->pcb_x[8] = (uintptr_t)fork_return;
106 	td2->td_pcb->pcb_x[9] = (uintptr_t)td2;
107 	td2->td_pcb->pcb_lr = (uintptr_t)fork_trampoline;
108 	td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame;
109 	td2->td_pcb->pcb_fpusaved = &td2->td_pcb->pcb_fpustate;
110 	td2->td_pcb->pcb_vfpcpu = UINT_MAX;
111 	td2->td_pcb->pcb_fpusaved->vfp_fpcr = initial_fpcr;
112 
113 	/* Setup to release spin count in fork_exit(). */
114 	td2->td_md.md_spinlock_count = 1;
115 	td2->td_md.md_saved_daif = td1->td_md.md_saved_daif & ~DAIF_I_MASKED;
116 }
117 
118 void
cpu_reset(void)119 cpu_reset(void)
120 {
121 
122 	psci_reset();
123 
124 	printf("cpu_reset failed");
125 	while(1)
126 		__asm volatile("wfi" ::: "memory");
127 }
128 
129 void
cpu_thread_swapin(struct thread * td)130 cpu_thread_swapin(struct thread *td)
131 {
132 }
133 
134 void
cpu_thread_swapout(struct thread * td)135 cpu_thread_swapout(struct thread *td)
136 {
137 }
138 
139 void
cpu_set_syscall_retval(struct thread * td,int error)140 cpu_set_syscall_retval(struct thread *td, int error)
141 {
142 	struct trapframe *frame;
143 
144 	frame = td->td_frame;
145 
146 	switch (error) {
147 	case 0:
148 		frame->tf_x[0] = td->td_retval[0];
149 		frame->tf_x[1] = td->td_retval[1];
150 		frame->tf_spsr &= ~PSR_C;	/* carry bit */
151 		break;
152 	case ERESTART:
153 		frame->tf_elr -= 4;
154 		break;
155 	case EJUSTRETURN:
156 		break;
157 	default:
158 		frame->tf_spsr |= PSR_C;	/* carry bit */
159 		frame->tf_x[0] = error;
160 		break;
161 	}
162 }
163 
164 /*
165  * Initialize machine state, mostly pcb and trap frame for a new
166  * thread, about to return to userspace.  Put enough state in the new
167  * thread's PCB to get it to go back to the fork_return(), which
168  * finalizes the thread state and handles peculiarities of the first
169  * return to userspace for the new thread.
170  */
171 void
cpu_copy_thread(struct thread * td,struct thread * td0)172 cpu_copy_thread(struct thread *td, struct thread *td0)
173 {
174 	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
175 	bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
176 
177 	td->td_pcb->pcb_x[8] = (uintptr_t)fork_return;
178 	td->td_pcb->pcb_x[9] = (uintptr_t)td;
179 	td->td_pcb->pcb_lr = (uintptr_t)fork_trampoline;
180 	td->td_pcb->pcb_sp = (uintptr_t)td->td_frame;
181 	td->td_pcb->pcb_fpflags &= ~(PCB_FP_STARTED | PCB_FP_KERN | PCB_FP_NOSAVE);
182 	td->td_pcb->pcb_fpusaved = &td->td_pcb->pcb_fpustate;
183 	td->td_pcb->pcb_vfpcpu = UINT_MAX;
184 
185 	/* Setup to release spin count in fork_exit(). */
186 	td->td_md.md_spinlock_count = 1;
187 	td->td_md.md_saved_daif = td0->td_md.md_saved_daif & ~DAIF_I_MASKED;
188 }
189 
190 /*
191  * Set that machine state for performing an upcall that starts
192  * the entry function with the given argument.
193  */
194 void
cpu_set_upcall(struct thread * td,void (* entry)(void *),void * arg,stack_t * stack)195 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
196 	stack_t *stack)
197 {
198 	struct trapframe *tf = td->td_frame;
199 
200 	/* 32bits processes use r13 for sp */
201 	if (td->td_frame->tf_spsr & PSR_M_32)
202 		tf->tf_x[13] = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size);
203 	else
204 		tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size);
205 	tf->tf_elr = (register_t)entry;
206 	tf->tf_x[0] = (register_t)arg;
207 }
208 
209 int
cpu_set_user_tls(struct thread * td,void * tls_base)210 cpu_set_user_tls(struct thread *td, void *tls_base)
211 {
212 	struct pcb *pcb;
213 
214 	if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS)
215 		return (EINVAL);
216 
217 	pcb = td->td_pcb;
218 	if (td->td_frame->tf_spsr & PSR_M_32) {
219 		/* 32bits arm stores the user TLS into tpidrro */
220 		pcb->pcb_tpidrro_el0 = (register_t)tls_base;
221 		pcb->pcb_tpidr_el0 = (register_t)tls_base;
222 		if (td == curthread) {
223 			WRITE_SPECIALREG(tpidrro_el0, tls_base);
224 			WRITE_SPECIALREG(tpidr_el0, tls_base);
225 		}
226 	} else {
227 		pcb->pcb_tpidr_el0 = (register_t)tls_base;
228 		if (td == curthread)
229 			WRITE_SPECIALREG(tpidr_el0, tls_base);
230 	}
231 
232 	return (0);
233 }
234 
235 void
cpu_thread_exit(struct thread * td)236 cpu_thread_exit(struct thread *td)
237 {
238 }
239 
240 void
cpu_thread_alloc(struct thread * td)241 cpu_thread_alloc(struct thread *td)
242 {
243 
244 	td->td_pcb = (struct pcb *)(td->td_kstack +
245 	    td->td_kstack_pages * PAGE_SIZE) - 1;
246 	td->td_frame = (struct trapframe *)STACKALIGN(
247 	    (struct trapframe *)td->td_pcb - 1);
248 }
249 
250 void
cpu_thread_free(struct thread * td)251 cpu_thread_free(struct thread *td)
252 {
253 }
254 
255 void
cpu_thread_clean(struct thread * td)256 cpu_thread_clean(struct thread *td)
257 {
258 }
259 
260 /*
261  * Intercept the return address from a freshly forked process that has NOT
262  * been scheduled yet.
263  *
264  * This is needed to make kernel threads stay in kernel mode.
265  */
266 void
cpu_fork_kthread_handler(struct thread * td,void (* func)(void *),void * arg)267 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
268 {
269 
270 	td->td_pcb->pcb_x[8] = (uintptr_t)func;
271 	td->td_pcb->pcb_x[9] = (uintptr_t)arg;
272 }
273 
274 void
cpu_exit(struct thread * td)275 cpu_exit(struct thread *td)
276 {
277 }
278 
279 bool
cpu_exec_vmspace_reuse(struct proc * p __unused,vm_map_t map __unused)280 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
281 {
282 
283 	return (true);
284 }
285 
286 int
cpu_procctl(struct thread * td __unused,int idtype __unused,id_t id __unused,int com __unused,void * data __unused)287 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
288     int com __unused, void *data __unused)
289 {
290 
291 	return (EINVAL);
292 }
293 
294 void
swi_vm(void * v)295 swi_vm(void *v)
296 {
297 
298 	if (busdma_swi_pending != 0)
299 		busdma_swi();
300 }
301