xref: /f-stack/freebsd/mips/mips/mp_machdep.c (revision 22ce4aff)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009 Neelkanth Natu
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/cpuset.h>
35 #include <sys/ktr.h>
36 #include <sys/proc.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/kernel.h>
41 #include <sys/pcpu.h>
42 #include <sys/smp.h>
43 #include <sys/sched.h>
44 #include <sys/bus.h>
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_kern.h>
50 
51 #include <machine/clock.h>
52 #include <machine/smp.h>
53 #include <machine/hwfunc.h>
54 #include <machine/intr_machdep.h>
55 #include <machine/cache.h>
56 #include <machine/tlb.h>
57 
58 struct pcb stoppcbs[MAXCPU];
59 
60 static void *dpcpu;
61 static struct mtx ap_boot_mtx;
62 
63 static volatile int aps_ready;
64 static volatile int mp_naps;
65 
66 static void
ipi_send(struct pcpu * pc,int ipi)67 ipi_send(struct pcpu *pc, int ipi)
68 {
69 
70 	CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi);
71 
72 	atomic_set_32(&pc->pc_pending_ipis, ipi);
73 	platform_ipi_send(pc->pc_cpuid);
74 
75 	CTR1(KTR_SMP, "%s: sent", __func__);
76 }
77 
78 void
ipi_all_but_self(int ipi)79 ipi_all_but_self(int ipi)
80 {
81 	cpuset_t other_cpus;
82 
83 	other_cpus = all_cpus;
84 	CPU_CLR(PCPU_GET(cpuid), &other_cpus);
85 	ipi_selected(other_cpus, ipi);
86 }
87 
88 /* Send an IPI to a set of cpus. */
89 void
ipi_selected(cpuset_t cpus,int ipi)90 ipi_selected(cpuset_t cpus, int ipi)
91 {
92 	struct pcpu *pc;
93 
94 	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
95 		if (CPU_ISSET(pc->pc_cpuid, &cpus)) {
96 			CTR3(KTR_SMP, "%s: pc: %p, ipi: %x\n", __func__, pc,
97 			    ipi);
98 			ipi_send(pc, ipi);
99 		}
100 	}
101 }
102 
103 /* Send an IPI to a specific CPU. */
104 void
ipi_cpu(int cpu,u_int ipi)105 ipi_cpu(int cpu, u_int ipi)
106 {
107 
108 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi);
109 	ipi_send(cpuid_to_pcpu[cpu], ipi);
110 }
111 
112 /*
113  * Handle an IPI sent to this processor.
114  */
115 static int
mips_ipi_handler(void * arg)116 mips_ipi_handler(void *arg)
117 {
118 	u_int	cpu, ipi, ipi_bitmap;
119 	int	bit;
120 
121 	cpu = PCPU_GET(cpuid);
122 
123 	platform_ipi_clear();	/* quiesce the pending ipi interrupt */
124 
125 	ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
126 	if (ipi_bitmap == 0)
127 		return (FILTER_STRAY);
128 
129 	CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
130 
131 	while ((bit = ffs(ipi_bitmap))) {
132 		bit = bit - 1;
133 		ipi = 1 << bit;
134 		ipi_bitmap &= ~ipi;
135 		switch (ipi) {
136 		case IPI_RENDEZVOUS:
137 			CTR0(KTR_SMP, "IPI_RENDEZVOUS");
138 			smp_rendezvous_action();
139 			break;
140 
141 		case IPI_AST:
142 			CTR0(KTR_SMP, "IPI_AST");
143 			break;
144 
145 		case IPI_STOP:
146 			/*
147 			 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
148 			 * necessary to add it in the switch.
149 			 */
150 			CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
151 
152 			savectx(&stoppcbs[cpu]);
153 			tlb_save();
154 
155 			/* Indicate we are stopped */
156 			CPU_SET_ATOMIC(cpu, &stopped_cpus);
157 
158 			/* Wait for restart */
159 			while (!CPU_ISSET(cpu, &started_cpus))
160 				cpu_spinwait();
161 
162 			CPU_CLR_ATOMIC(cpu, &started_cpus);
163 			CPU_CLR_ATOMIC(cpu, &stopped_cpus);
164 			CTR0(KTR_SMP, "IPI_STOP (restart)");
165 			break;
166 		case IPI_PREEMPT:
167 			CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
168 			sched_preempt(curthread);
169 			break;
170 		case IPI_HARDCLOCK:
171 			CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
172 			hardclockintr();
173 			break;
174 		default:
175 			panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
176 		}
177 	}
178 
179 	return (FILTER_HANDLED);
180 }
181 
182 static int
start_ap(int cpuid)183 start_ap(int cpuid)
184 {
185 	int cpus, ms;
186 
187 	cpus = mp_naps;
188 	dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
189 
190 	mips_sync();
191 
192 	if (platform_start_ap(cpuid) != 0)
193 		return (-1);			/* could not start AP */
194 
195 	for (ms = 0; ms < 5000; ++ms) {
196 		if (mp_naps > cpus)
197 			return (0);		/* success */
198 		else
199 			DELAY(1000);
200 	}
201 
202 	return (-2);				/* timeout initializing AP */
203 }
204 
205 void
cpu_mp_setmaxid(void)206 cpu_mp_setmaxid(void)
207 {
208 	cpuset_t cpumask;
209 	int cpu, last;
210 
211 	platform_cpu_mask(&cpumask);
212 	mp_ncpus = 0;
213 	last = 1;
214 	while ((cpu = CPU_FFS(&cpumask)) != 0) {
215 		last = cpu;
216 		cpu--;
217 		CPU_CLR(cpu, &cpumask);
218 		mp_ncpus++;
219 	}
220 	if (mp_ncpus <= 0)
221 		mp_ncpus = 1;
222 
223 	mp_maxid = min(last, MAXCPU) - 1;
224 }
225 
226 void
cpu_mp_announce(void)227 cpu_mp_announce(void)
228 {
229 	/* NOTHING */
230 }
231 
232 struct cpu_group *
cpu_topo(void)233 cpu_topo(void)
234 {
235 	return (platform_smp_topo());
236 }
237 
238 int
cpu_mp_probe(void)239 cpu_mp_probe(void)
240 {
241 
242 	return (mp_ncpus > 1);
243 }
244 
245 void
cpu_mp_start(void)246 cpu_mp_start(void)
247 {
248 	int error, cpuid;
249 	cpuset_t cpumask;
250 
251 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
252 
253 	CPU_ZERO(&all_cpus);
254 	platform_cpu_mask(&cpumask);
255 
256 	while (!CPU_EMPTY(&cpumask)) {
257 		cpuid = CPU_FFS(&cpumask) - 1;
258 		CPU_CLR(cpuid, &cpumask);
259 
260 		if (cpuid >= MAXCPU) {
261 			printf("cpu_mp_start: ignoring AP #%d.\n", cpuid);
262 			continue;
263 		}
264 
265 		if (cpuid != platform_processor_id()) {
266 			if ((error = start_ap(cpuid)) != 0) {
267 				printf("AP #%d failed to start: %d\n", cpuid, error);
268 				continue;
269 			}
270 			if (bootverbose)
271 				printf("AP #%d started!\n", cpuid);
272 		}
273 		CPU_SET(cpuid, &all_cpus);
274 	}
275 }
276 
277 void
smp_init_secondary(u_int32_t cpuid)278 smp_init_secondary(u_int32_t cpuid)
279 {
280 
281 	/* TLB */
282 	mips_wr_wired(0);
283 	tlb_invalidate_all();
284 	mips_wr_wired(VMWIRED_ENTRIES);
285 
286 	/*
287 	 * We assume that the L1 cache on the APs is identical to the one
288 	 * on the BSP.
289 	 */
290 	mips_dcache_wbinv_all();
291 	mips_icache_sync_all();
292 
293 	mips_sync();
294 
295 	mips_wr_entryhi(0);
296 
297 	pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu));
298 	dpcpu_init(dpcpu, cpuid);
299 
300 	/* The AP has initialized successfully - allow the BSP to proceed */
301 	++mp_naps;
302 
303 	/* Spin until the BSP is ready to release the APs */
304 	while (!aps_ready)
305 		;
306 
307 #ifdef PLATFORM_INIT_SECONDARY
308 	platform_init_secondary(cpuid);
309 #endif
310 
311 	/* Initialize curthread. */
312 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
313 	PCPU_SET(curthread, PCPU_GET(idlethread));
314 
315 	mtx_lock_spin(&ap_boot_mtx);
316 
317 	smp_cpus++;
318 
319 	CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid));
320 
321 	if (bootverbose)
322 		printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid));
323 
324 	if (smp_cpus == mp_ncpus) {
325 		atomic_store_rel_int(&smp_started, 1);
326 	}
327 
328 	mtx_unlock_spin(&ap_boot_mtx);
329 
330 	while (smp_started == 0)
331 		; /* nothing */
332 
333 	/* Start per-CPU event timers. */
334 	cpu_initclocks_ap();
335 
336 	/* enter the scheduler */
337 	sched_throw(NULL);
338 
339 	panic("scheduler returned us to %s", __func__);
340 	/* NOTREACHED */
341 }
342 
343 static void
release_aps(void * dummy __unused)344 release_aps(void *dummy __unused)
345 {
346 	int ipi_irq;
347 
348 	if (mp_ncpus == 1)
349 		return;
350 
351 #ifdef PLATFORM_INIT_SECONDARY
352 	platform_init_secondary(0);
353 #endif
354 
355 	/*
356 	 * IPI handler
357 	 */
358 	ipi_irq = platform_ipi_hardintr_num();
359 	if (ipi_irq != -1) {
360 		cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL,
361 		    ipi_irq, INTR_TYPE_MISC | INTR_EXCL, NULL);
362 	} else {
363 		ipi_irq = platform_ipi_softintr_num();
364 		cpu_establish_softintr("ipi", mips_ipi_handler, NULL, NULL,
365 		    ipi_irq, INTR_TYPE_MISC | INTR_EXCL, NULL);
366 	}
367 
368 	atomic_store_rel_int(&aps_ready, 1);
369 
370 	while (smp_started == 0)
371 		; /* nothing */
372 }
373 
374 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
375