xref: /linux-6.15/kernel/smp.c (revision 0e4a19e2)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
23d442233SJens Axboe /*
33d442233SJens Axboe  * Generic helpers for smp ipi calls
43d442233SJens Axboe  *
53d442233SJens Axboe  * (C) Jens Axboe <[email protected]> 2008
63d442233SJens Axboe  */
7ca7dfdbbSMichael Ellerman 
8ca7dfdbbSMichael Ellerman #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9ca7dfdbbSMichael Ellerman 
1047885016SFrederic Weisbecker #include <linux/irq_work.h>
113d442233SJens Axboe #include <linux/rcupdate.h>
1259190f42SLinus Torvalds #include <linux/rculist.h>
13641cd4cfSIngo Molnar #include <linux/kernel.h>
149984de1aSPaul Gortmaker #include <linux/export.h>
150b13fda1SIngo Molnar #include <linux/percpu.h>
160b13fda1SIngo Molnar #include <linux/init.h>
17f9d34595SSebastian Andrzej Siewior #include <linux/interrupt.h>
185a0e3ad6STejun Heo #include <linux/gfp.h>
193d442233SJens Axboe #include <linux/smp.h>
208969a5edSPeter Zijlstra #include <linux/cpu.h>
21c6f4459fSChuansheng Liu #include <linux/sched.h>
224c822698SIngo Molnar #include <linux/sched/idle.h>
2347ae4b05SJuergen Gross #include <linux/hypervisor.h>
2435feb604SPaul E. McKenney #include <linux/sched/clock.h>
2535feb604SPaul E. McKenney #include <linux/nmi.h>
2635feb604SPaul E. McKenney #include <linux/sched/debug.h>
278d0968ccSJuergen Gross #include <linux/jump_label.h>
28c4df1593SThorsten Blum #include <linux/string_choices.h>
293d442233SJens Axboe 
30cc9cb0a7SValentin Schneider #include <trace/events/ipi.h>
31949fa3f1SLeonardo Bras #define CREATE_TRACE_POINTS
32949fa3f1SLeonardo Bras #include <trace/events/csd.h>
33949fa3f1SLeonardo Bras #undef CREATE_TRACE_POINTS
34cc9cb0a7SValentin Schneider 
353bb5d2eeSSuresh Siddha #include "smpboot.h"
361f8db415SIngo Molnar #include "sched/smp.h"
373bb5d2eeSSuresh Siddha 
38545b8c8dSPeter Zijlstra #define CSD_TYPE(_csd)	((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
393d442233SJens Axboe 
403d442233SJens Axboe struct call_function_data {
416366d062SPaul E. McKenney 	call_single_data_t	__percpu *csd;
428969a5edSPeter Zijlstra 	cpumask_var_t		cpumask;
433fc5b3b6SAaron Lu 	cpumask_var_t		cpumask_ipi;
443d442233SJens Axboe };
453d442233SJens Axboe 
46a22793c7SNadav Amit static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
47e03bcb68SMilton Miller 
486897fc22SChristoph Hellwig static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
498969a5edSPeter Zijlstra 
500d3a00b3SImran Khan static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1);
510d3a00b3SImran Khan 
5216bf5a5eSThomas Gleixner static void __flush_smp_call_function_queue(bool warn_cpu_offline);
538d056c48SSrivatsa S. Bhat 
smpcfd_prepare_cpu(unsigned int cpu)5431487f83SRichard Weinberger int smpcfd_prepare_cpu(unsigned int cpu)
558969a5edSPeter Zijlstra {
568969a5edSPeter Zijlstra 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
578969a5edSPeter Zijlstra 
58eaa95840SYinghai Lu 	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
598969a5edSPeter Zijlstra 				     cpu_to_node(cpu)))
6031487f83SRichard Weinberger 		return -ENOMEM;
613fc5b3b6SAaron Lu 	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
623fc5b3b6SAaron Lu 				     cpu_to_node(cpu))) {
633fc5b3b6SAaron Lu 		free_cpumask_var(cfd->cpumask);
643fc5b3b6SAaron Lu 		return -ENOMEM;
653fc5b3b6SAaron Lu 	}
666366d062SPaul E. McKenney 	cfd->csd = alloc_percpu(call_single_data_t);
676366d062SPaul E. McKenney 	if (!cfd->csd) {
689a46ad6dSShaohua Li 		free_cpumask_var(cfd->cpumask);
693fc5b3b6SAaron Lu 		free_cpumask_var(cfd->cpumask_ipi);
7031487f83SRichard Weinberger 		return -ENOMEM;
719a46ad6dSShaohua Li 	}
728969a5edSPeter Zijlstra 
7331487f83SRichard Weinberger 	return 0;
7431487f83SRichard Weinberger }
758969a5edSPeter Zijlstra 
smpcfd_dead_cpu(unsigned int cpu)7631487f83SRichard Weinberger int smpcfd_dead_cpu(unsigned int cpu)
7731487f83SRichard Weinberger {
7831487f83SRichard Weinberger 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
7931487f83SRichard Weinberger 
808969a5edSPeter Zijlstra 	free_cpumask_var(cfd->cpumask);
813fc5b3b6SAaron Lu 	free_cpumask_var(cfd->cpumask_ipi);
826366d062SPaul E. McKenney 	free_percpu(cfd->csd);
8331487f83SRichard Weinberger 	return 0;
8431487f83SRichard Weinberger }
858d056c48SSrivatsa S. Bhat 
smpcfd_dying_cpu(unsigned int cpu)8631487f83SRichard Weinberger int smpcfd_dying_cpu(unsigned int cpu)
8731487f83SRichard Weinberger {
888d056c48SSrivatsa S. Bhat 	/*
898d056c48SSrivatsa S. Bhat 	 * The IPIs for the smp-call-function callbacks queued by other
908d056c48SSrivatsa S. Bhat 	 * CPUs might arrive late, either due to hardware latencies or
918d056c48SSrivatsa S. Bhat 	 * because this CPU disabled interrupts (inside stop-machine)
928d056c48SSrivatsa S. Bhat 	 * before the IPIs were sent. So flush out any pending callbacks
938d056c48SSrivatsa S. Bhat 	 * explicitly (without waiting for the IPIs to arrive), to
948d056c48SSrivatsa S. Bhat 	 * ensure that the outgoing CPU doesn't go offline with work
958d056c48SSrivatsa S. Bhat 	 * still pending.
968d056c48SSrivatsa S. Bhat 	 */
9716bf5a5eSThomas Gleixner 	__flush_smp_call_function_queue(false);
98afaa653cSPeter Zijlstra 	irq_work_run();
9931487f83SRichard Weinberger 	return 0;
1008969a5edSPeter Zijlstra }
1018969a5edSPeter Zijlstra 
call_function_init(void)102d8ad7d11STakao Indoh void __init call_function_init(void)
1033d442233SJens Axboe {
1043d442233SJens Axboe 	int i;
1053d442233SJens Axboe 
1066897fc22SChristoph Hellwig 	for_each_possible_cpu(i)
1076897fc22SChristoph Hellwig 		init_llist_head(&per_cpu(call_single_queue, i));
1088969a5edSPeter Zijlstra 
10931487f83SRichard Weinberger 	smpcfd_prepare_cpu(smp_processor_id());
1103d442233SJens Axboe }
1113d442233SJens Axboe 
11208407b5fSValentin Schneider static __always_inline void
send_call_function_single_ipi(int cpu)1135c312497SPeter Zijlstra send_call_function_single_ipi(int cpu)
11408407b5fSValentin Schneider {
11568f4ff04SValentin Schneider 	if (call_function_single_prep_ipi(cpu)) {
1165c312497SPeter Zijlstra 		trace_ipi_send_cpu(cpu, _RET_IP_,
1175c312497SPeter Zijlstra 				   generic_smp_call_function_single_interrupt);
11868f4ff04SValentin Schneider 		arch_send_call_function_single_ipi(cpu);
11968f4ff04SValentin Schneider 	}
12068f4ff04SValentin Schneider }
12168f4ff04SValentin Schneider 
12268f4ff04SValentin Schneider static __always_inline void
send_call_function_ipi_mask(struct cpumask * mask)1235c312497SPeter Zijlstra send_call_function_ipi_mask(struct cpumask *mask)
12468f4ff04SValentin Schneider {
1255c312497SPeter Zijlstra 	trace_ipi_send_cpumask(mask, _RET_IP_,
1265c312497SPeter Zijlstra 			       generic_smp_call_function_single_interrupt);
12708407b5fSValentin Schneider 	arch_send_call_function_ipi_mask(mask);
12808407b5fSValentin Schneider }
12908407b5fSValentin Schneider 
130949fa3f1SLeonardo Bras static __always_inline void
csd_do_func(smp_call_func_t func,void * info,call_single_data_t * csd)131d090ec0dSLeonardo Bras csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
132949fa3f1SLeonardo Bras {
133949fa3f1SLeonardo Bras 	trace_csd_function_entry(func, csd);
134949fa3f1SLeonardo Bras 	func(info);
135949fa3f1SLeonardo Bras 	trace_csd_function_exit(func, csd);
136949fa3f1SLeonardo Bras }
137949fa3f1SLeonardo Bras 
13835feb604SPaul E. McKenney #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
13935feb604SPaul E. McKenney 
140c5219860SPaul E. McKenney static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled);
1418d0968ccSJuergen Gross 
1421771257cSPaul E. McKenney /*
1431771257cSPaul E. McKenney  * Parse the csdlock_debug= kernel boot parameter.
1441771257cSPaul E. McKenney  *
1451771257cSPaul E. McKenney  * If you need to restore the old "ext" value that once provided
1461771257cSPaul E. McKenney  * additional debugging information, reapply the following commits:
1471771257cSPaul E. McKenney  *
1481771257cSPaul E. McKenney  * de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging")
1491771257cSPaul E. McKenney  * a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging")
1501771257cSPaul E. McKenney  */
csdlock_debug(char * str)1518d0968ccSJuergen Gross static int __init csdlock_debug(char *str)
1528d0968ccSJuergen Gross {
153203e4358SPaul E. McKenney 	int ret;
1548d0968ccSJuergen Gross 	unsigned int val = 0;
1558d0968ccSJuergen Gross 
156203e4358SPaul E. McKenney 	ret = get_option(&str, &val);
157203e4358SPaul E. McKenney 	if (ret) {
1588d0968ccSJuergen Gross 		if (val)
1598d0968ccSJuergen Gross 			static_branch_enable(&csdlock_debug_enabled);
160203e4358SPaul E. McKenney 		else
161203e4358SPaul E. McKenney 			static_branch_disable(&csdlock_debug_enabled);
162203e4358SPaul E. McKenney 	}
1638d0968ccSJuergen Gross 
1649c9b26b0SChen Zhongjin 	return 1;
1658d0968ccSJuergen Gross }
1669c9b26b0SChen Zhongjin __setup("csdlock_debug=", csdlock_debug);
1678d0968ccSJuergen Gross 
16835feb604SPaul E. McKenney static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
16935feb604SPaul E. McKenney static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
17035feb604SPaul E. McKenney static DEFINE_PER_CPU(void *, cur_csd_info);
17135feb604SPaul E. McKenney 
1723791a223SPaul E. McKenney static ulong csd_lock_timeout = 5000;  /* CSD lock timeout in milliseconds. */
173*0e4a19e2SRik van Riel module_param(csd_lock_timeout, ulong, 0644);
17494b3f0b5SRik van Riel static int panic_on_ipistall;  /* CSD panic timeout in milliseconds, 300000 for five minutes. */
175*0e4a19e2SRik van Riel module_param(panic_on_ipistall, int, 0644);
1763791a223SPaul E. McKenney 
1772b722160SWei Yongjun static atomic_t csd_bug_count = ATOMIC_INIT(0);
17835feb604SPaul E. McKenney 
17935feb604SPaul E. McKenney /* Record current CSD work for current CPU, NULL to erase. */
__csd_lock_record(call_single_data_t * csd)180d090ec0dSLeonardo Bras static void __csd_lock_record(call_single_data_t *csd)
18135feb604SPaul E. McKenney {
18235feb604SPaul E. McKenney 	if (!csd) {
18335feb604SPaul E. McKenney 		smp_mb(); /* NULL cur_csd after unlock. */
18435feb604SPaul E. McKenney 		__this_cpu_write(cur_csd, NULL);
18535feb604SPaul E. McKenney 		return;
18635feb604SPaul E. McKenney 	}
18735feb604SPaul E. McKenney 	__this_cpu_write(cur_csd_func, csd->func);
18835feb604SPaul E. McKenney 	__this_cpu_write(cur_csd_info, csd->info);
18935feb604SPaul E. McKenney 	smp_wmb(); /* func and info before csd. */
19035feb604SPaul E. McKenney 	__this_cpu_write(cur_csd, csd);
19135feb604SPaul E. McKenney 	smp_mb(); /* Update cur_csd before function call. */
19235feb604SPaul E. McKenney 		  /* Or before unlock, as the case may be. */
19335feb604SPaul E. McKenney }
19435feb604SPaul E. McKenney 
csd_lock_record(call_single_data_t * csd)195d090ec0dSLeonardo Bras static __always_inline void csd_lock_record(call_single_data_t *csd)
1968d0968ccSJuergen Gross {
1978d0968ccSJuergen Gross 	if (static_branch_unlikely(&csdlock_debug_enabled))
1988d0968ccSJuergen Gross 		__csd_lock_record(csd);
1998d0968ccSJuergen Gross }
2008d0968ccSJuergen Gross 
csd_lock_wait_getcpu(call_single_data_t * csd)201d090ec0dSLeonardo Bras static int csd_lock_wait_getcpu(call_single_data_t *csd)
20235feb604SPaul E. McKenney {
20335feb604SPaul E. McKenney 	unsigned int csd_type;
20435feb604SPaul E. McKenney 
20535feb604SPaul E. McKenney 	csd_type = CSD_TYPE(csd);
20635feb604SPaul E. McKenney 	if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
207a787bdafSIngo Molnar 		return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
20835feb604SPaul E. McKenney 	return -1;
20935feb604SPaul E. McKenney }
21035feb604SPaul E. McKenney 
211ac9d4554SPaul E. McKenney static atomic_t n_csd_lock_stuck;
212ac9d4554SPaul E. McKenney 
213ac9d4554SPaul E. McKenney /**
214ac9d4554SPaul E. McKenney  * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long?
215ac9d4554SPaul E. McKenney  *
216ac9d4554SPaul E. McKenney  * Returns @true if a CSD-lock acquisition is stuck and has been stuck
217ac9d4554SPaul E. McKenney  * long enough for a "non-responsive CSD lock" message to be printed.
218ac9d4554SPaul E. McKenney  */
csd_lock_is_stuck(void)219ac9d4554SPaul E. McKenney bool csd_lock_is_stuck(void)
220ac9d4554SPaul E. McKenney {
221ac9d4554SPaul E. McKenney 	return !!atomic_read(&n_csd_lock_stuck);
222ac9d4554SPaul E. McKenney }
223ac9d4554SPaul E. McKenney 
22435feb604SPaul E. McKenney /*
22535feb604SPaul E. McKenney  * Complain if too much time spent waiting.  Note that only
22635feb604SPaul E. McKenney  * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
22735feb604SPaul E. McKenney  * so waiting on other types gets much less information.
22835feb604SPaul E. McKenney  */
csd_lock_wait_toolong(call_single_data_t * csd,u64 ts0,u64 * ts1,int * bug_id,unsigned long * nmessages)229d40760d6SPaul E. McKenney static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id, unsigned long *nmessages)
23035feb604SPaul E. McKenney {
23135feb604SPaul E. McKenney 	int cpu = -1;
23235feb604SPaul E. McKenney 	int cpux;
23335feb604SPaul E. McKenney 	bool firsttime;
23435feb604SPaul E. McKenney 	u64 ts2, ts_delta;
23535feb604SPaul E. McKenney 	call_single_data_t *cpu_cur_csd;
236545b8c8dSPeter Zijlstra 	unsigned int flags = READ_ONCE(csd->node.u_flags);
2373791a223SPaul E. McKenney 	unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
23835feb604SPaul E. McKenney 
23935feb604SPaul E. McKenney 	if (!(flags & CSD_FLAG_LOCK)) {
24035feb604SPaul E. McKenney 		if (!unlikely(*bug_id))
24135feb604SPaul E. McKenney 			return true;
24235feb604SPaul E. McKenney 		cpu = csd_lock_wait_getcpu(csd);
24335feb604SPaul E. McKenney 		pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
24435feb604SPaul E. McKenney 			 *bug_id, raw_smp_processor_id(), cpu);
245ac9d4554SPaul E. McKenney 		atomic_dec(&n_csd_lock_stuck);
24635feb604SPaul E. McKenney 		return true;
24735feb604SPaul E. McKenney 	}
24835feb604SPaul E. McKenney 
2499861f7f6SPaul E. McKenney 	ts2 = ktime_get_mono_fast_ns();
25094b3f0b5SRik van Riel 	/* How long since we last checked for a stuck CSD lock.*/
25135feb604SPaul E. McKenney 	ts_delta = ts2 - *ts1;
252d40760d6SPaul E. McKenney 	if (likely(ts_delta <= csd_lock_timeout_ns * (*nmessages + 1) *
253d40760d6SPaul E. McKenney 			       (!*nmessages ? 1 : (ilog2(num_online_cpus()) / 2 + 1)) ||
254d40760d6SPaul E. McKenney 		   csd_lock_timeout_ns == 0))
25535feb604SPaul E. McKenney 		return false;
25635feb604SPaul E. McKenney 
2579fbaa441SRik van Riel 	if (ts0 > ts2) {
2589fbaa441SRik van Riel 		/* Our own sched_clock went backward; don't blame another CPU. */
2599fbaa441SRik van Riel 		ts_delta = ts0 - ts2;
2609fbaa441SRik van Riel 		pr_alert("sched_clock on CPU %d went backward by %llu ns\n", raw_smp_processor_id(), ts_delta);
2619fbaa441SRik van Riel 		*ts1 = ts2;
2629fbaa441SRik van Riel 		return false;
2639fbaa441SRik van Riel 	}
2649fbaa441SRik van Riel 
26535feb604SPaul E. McKenney 	firsttime = !*bug_id;
26635feb604SPaul E. McKenney 	if (firsttime)
26735feb604SPaul E. McKenney 		*bug_id = atomic_inc_return(&csd_bug_count);
26835feb604SPaul E. McKenney 	cpu = csd_lock_wait_getcpu(csd);
26935feb604SPaul E. McKenney 	if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
27035feb604SPaul E. McKenney 		cpux = 0;
27135feb604SPaul E. McKenney 	else
27235feb604SPaul E. McKenney 		cpux = cpu;
27335feb604SPaul E. McKenney 	cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
27494b3f0b5SRik van Riel 	/* How long since this CSD lock was stuck. */
27594b3f0b5SRik van Riel 	ts_delta = ts2 - ts0;
276c1972c8dSPaul E. McKenney 	pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\n",
277c1972c8dSPaul E. McKenney 		 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), (s64)ts_delta,
27835feb604SPaul E. McKenney 		 cpu, csd->func, csd->info);
279d40760d6SPaul E. McKenney 	(*nmessages)++;
280ac9d4554SPaul E. McKenney 	if (firsttime)
281ac9d4554SPaul E. McKenney 		atomic_inc(&n_csd_lock_stuck);
28294b3f0b5SRik van Riel 	/*
28394b3f0b5SRik van Riel 	 * If the CSD lock is still stuck after 5 minutes, it is unlikely
28494b3f0b5SRik van Riel 	 * to become unstuck. Use a signed comparison to avoid triggering
28594b3f0b5SRik van Riel 	 * on underflows when the TSC is out of sync between sockets.
28694b3f0b5SRik van Riel 	 */
28794b3f0b5SRik van Riel 	BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
28835feb604SPaul E. McKenney 	if (cpu_cur_csd && csd != cpu_cur_csd) {
28935feb604SPaul E. McKenney 		pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
29035feb604SPaul E. McKenney 			 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
29135feb604SPaul E. McKenney 			 READ_ONCE(per_cpu(cur_csd_info, cpux)));
29235feb604SPaul E. McKenney 	} else {
29335feb604SPaul E. McKenney 		pr_alert("\tcsd: CSD lock (#%d) %s.\n",
29435feb604SPaul E. McKenney 			 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
29535feb604SPaul E. McKenney 	}
29635feb604SPaul E. McKenney 	if (cpu >= 0) {
2970d3a00b3SImran Khan 		if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
29835feb604SPaul E. McKenney 			dump_cpu_task(cpu);
29935feb604SPaul E. McKenney 		if (!cpu_cur_csd) {
30035feb604SPaul E. McKenney 			pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
30135feb604SPaul E. McKenney 			arch_send_call_function_single_ipi(cpu);
30235feb604SPaul E. McKenney 		}
30335feb604SPaul E. McKenney 	}
3045bd00f6dSImran Khan 	if (firsttime)
30535feb604SPaul E. McKenney 		dump_stack();
30635feb604SPaul E. McKenney 	*ts1 = ts2;
30735feb604SPaul E. McKenney 
30835feb604SPaul E. McKenney 	return false;
30935feb604SPaul E. McKenney }
31035feb604SPaul E. McKenney 
3118969a5edSPeter Zijlstra /*
3128969a5edSPeter Zijlstra  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
3138969a5edSPeter Zijlstra  *
3140b13fda1SIngo Molnar  * For non-synchronous ipi calls the csd can still be in use by the
3150b13fda1SIngo Molnar  * previous function call. For multi-cpu calls its even more interesting
3160b13fda1SIngo Molnar  * as we'll have to ensure no other cpu is observing our csd.
3178969a5edSPeter Zijlstra  */
__csd_lock_wait(call_single_data_t * csd)318d090ec0dSLeonardo Bras static void __csd_lock_wait(call_single_data_t *csd)
3198969a5edSPeter Zijlstra {
320d40760d6SPaul E. McKenney 	unsigned long nmessages = 0;
32135feb604SPaul E. McKenney 	int bug_id = 0;
32235feb604SPaul E. McKenney 	u64 ts0, ts1;
32335feb604SPaul E. McKenney 
3249861f7f6SPaul E. McKenney 	ts1 = ts0 = ktime_get_mono_fast_ns();
32535feb604SPaul E. McKenney 	for (;;) {
326d40760d6SPaul E. McKenney 		if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id, &nmessages))
32735feb604SPaul E. McKenney 			break;
32835feb604SPaul E. McKenney 		cpu_relax();
32935feb604SPaul E. McKenney 	}
33035feb604SPaul E. McKenney 	smp_acquire__after_ctrl_dep();
33135feb604SPaul E. McKenney }
33235feb604SPaul E. McKenney 
csd_lock_wait(call_single_data_t * csd)333d090ec0dSLeonardo Bras static __always_inline void csd_lock_wait(call_single_data_t *csd)
3348d0968ccSJuergen Gross {
3358d0968ccSJuergen Gross 	if (static_branch_unlikely(&csdlock_debug_enabled)) {
3368d0968ccSJuergen Gross 		__csd_lock_wait(csd);
3378d0968ccSJuergen Gross 		return;
3388d0968ccSJuergen Gross 	}
3398d0968ccSJuergen Gross 
3408d0968ccSJuergen Gross 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
3418d0968ccSJuergen Gross }
34235feb604SPaul E. McKenney #else
csd_lock_record(call_single_data_t * csd)343d090ec0dSLeonardo Bras static void csd_lock_record(call_single_data_t *csd)
34435feb604SPaul E. McKenney {
34535feb604SPaul E. McKenney }
34635feb604SPaul E. McKenney 
csd_lock_wait(call_single_data_t * csd)347d090ec0dSLeonardo Bras static __always_inline void csd_lock_wait(call_single_data_t *csd)
34835feb604SPaul E. McKenney {
349545b8c8dSPeter Zijlstra 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
3506e275637SPeter Zijlstra }
35135feb604SPaul E. McKenney #endif
3526e275637SPeter Zijlstra 
csd_lock(call_single_data_t * csd)353d090ec0dSLeonardo Bras static __always_inline void csd_lock(call_single_data_t *csd)
3546e275637SPeter Zijlstra {
355e1d12f32SAndrew Morton 	csd_lock_wait(csd);
356545b8c8dSPeter Zijlstra 	csd->node.u_flags |= CSD_FLAG_LOCK;
3578969a5edSPeter Zijlstra 
3588969a5edSPeter Zijlstra 	/*
3590b13fda1SIngo Molnar 	 * prevent CPU from reordering the above assignment
3600b13fda1SIngo Molnar 	 * to ->flags with any subsequent assignments to other
361966a9671SYing Huang 	 * fields of the specified call_single_data_t structure:
3628969a5edSPeter Zijlstra 	 */
3638053871dSLinus Torvalds 	smp_wmb();
3648969a5edSPeter Zijlstra }
3658969a5edSPeter Zijlstra 
csd_unlock(call_single_data_t * csd)366d090ec0dSLeonardo Bras static __always_inline void csd_unlock(call_single_data_t *csd)
3678969a5edSPeter Zijlstra {
368545b8c8dSPeter Zijlstra 	WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
3690b13fda1SIngo Molnar 
3708969a5edSPeter Zijlstra 	/*
3710b13fda1SIngo Molnar 	 * ensure we're all done before releasing data:
3728969a5edSPeter Zijlstra 	 */
373545b8c8dSPeter Zijlstra 	smp_store_release(&csd->node.u_flags, 0);
3743d442233SJens Axboe }
3753d442233SJens Axboe 
37668f4ff04SValentin Schneider static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
37768f4ff04SValentin Schneider 
__smp_call_single_queue(int cpu,struct llist_node * node)37868f4ff04SValentin Schneider void __smp_call_single_queue(int cpu, struct llist_node *node)
37968f4ff04SValentin Schneider {
38068f4ff04SValentin Schneider 	/*
38168f4ff04SValentin Schneider 	 * We have to check the type of the CSD before queueing it, because
38268f4ff04SValentin Schneider 	 * once queued it can have its flags cleared by
38368f4ff04SValentin Schneider 	 *   flush_smp_call_function_queue()
38468f4ff04SValentin Schneider 	 * even if we haven't sent the smp_call IPI yet (e.g. the stopper
38568f4ff04SValentin Schneider 	 * executes migration_cpu_stop() on the remote CPU).
38668f4ff04SValentin Schneider 	 */
387bf5a8c26SLeonardo Bras 	if (trace_csd_queue_cpu_enabled()) {
38868f4ff04SValentin Schneider 		call_single_data_t *csd;
38968f4ff04SValentin Schneider 		smp_call_func_t func;
39068f4ff04SValentin Schneider 
39168f4ff04SValentin Schneider 		csd = container_of(node, call_single_data_t, node.llist);
39268f4ff04SValentin Schneider 		func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
39368f4ff04SValentin Schneider 			sched_ttwu_pending : csd->func;
39468f4ff04SValentin Schneider 
395bf5a8c26SLeonardo Bras 		trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
39668f4ff04SValentin Schneider 	}
3975c312497SPeter Zijlstra 
3985c312497SPeter Zijlstra 	/*
3995c312497SPeter Zijlstra 	 * The list addition should be visible to the target CPU when it pops
4005c312497SPeter Zijlstra 	 * the head of the list to pull the entry off it in the IPI handler
4015c312497SPeter Zijlstra 	 * because of normal cache coherency rules implied by the underlying
4025c312497SPeter Zijlstra 	 * llist ops.
4035c312497SPeter Zijlstra 	 *
4045c312497SPeter Zijlstra 	 * If IPIs can go out of order to the cache coherency protocol
4055c312497SPeter Zijlstra 	 * in an architecture, sufficient synchronisation should be added
4065c312497SPeter Zijlstra 	 * to arch code to make it appear to obey cache coherency WRT
4075c312497SPeter Zijlstra 	 * locking and barrier primitives. Generic code isn't really
4085c312497SPeter Zijlstra 	 * equipped to do the right thing...
4095c312497SPeter Zijlstra 	 */
4105c312497SPeter Zijlstra 	if (llist_add(node, &per_cpu(call_single_queue, cpu)))
4115c312497SPeter Zijlstra 		send_call_function_single_ipi(cpu);
4124b44a21dSPeter Zijlstra }
4134b44a21dSPeter Zijlstra 
4143d442233SJens Axboe /*
415966a9671SYing Huang  * Insert a previously allocated call_single_data_t element
4160b13fda1SIngo Molnar  * for execution on the given CPU. data must already have
4170b13fda1SIngo Molnar  * ->func, ->info, and ->flags set.
4183d442233SJens Axboe  */
generic_exec_single(int cpu,call_single_data_t * csd)419d090ec0dSLeonardo Bras static int generic_exec_single(int cpu, call_single_data_t *csd)
4203d442233SJens Axboe {
4218053871dSLinus Torvalds 	if (cpu == smp_processor_id()) {
4224b44a21dSPeter Zijlstra 		smp_call_func_t func = csd->func;
4234b44a21dSPeter Zijlstra 		void *info = csd->info;
4248b28499aSFrederic Weisbecker 		unsigned long flags;
4258b28499aSFrederic Weisbecker 
4268053871dSLinus Torvalds 		/*
4278053871dSLinus Torvalds 		 * We can unlock early even for the synchronous on-stack case,
4288053871dSLinus Torvalds 		 * since we're doing this from the same CPU..
4298053871dSLinus Torvalds 		 */
43035feb604SPaul E. McKenney 		csd_lock_record(csd);
4318053871dSLinus Torvalds 		csd_unlock(csd);
4328b28499aSFrederic Weisbecker 		local_irq_save(flags);
433949fa3f1SLeonardo Bras 		csd_do_func(func, info, NULL);
43435feb604SPaul E. McKenney 		csd_lock_record(NULL);
4358b28499aSFrederic Weisbecker 		local_irq_restore(flags);
4368b28499aSFrederic Weisbecker 		return 0;
4378b28499aSFrederic Weisbecker 	}
4388b28499aSFrederic Weisbecker 
4395224b961SLinus Torvalds 	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
4405224b961SLinus Torvalds 		csd_unlock(csd);
4418b28499aSFrederic Weisbecker 		return -ENXIO;
4425224b961SLinus Torvalds 	}
4438b28499aSFrederic Weisbecker 
444545b8c8dSPeter Zijlstra 	__smp_call_single_queue(cpu, &csd->node.llist);
4453d442233SJens Axboe 
4468b28499aSFrederic Weisbecker 	return 0;
4473d442233SJens Axboe }
4483d442233SJens Axboe 
4498d056c48SSrivatsa S. Bhat /**
4508d056c48SSrivatsa S. Bhat  * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
4518d056c48SSrivatsa S. Bhat  *
4528d056c48SSrivatsa S. Bhat  * Invoked by arch to handle an IPI for call function single.
4538d056c48SSrivatsa S. Bhat  * Must be called with interrupts disabled.
4543d442233SJens Axboe  */
generic_smp_call_function_single_interrupt(void)4553d442233SJens Axboe void generic_smp_call_function_single_interrupt(void)
4563d442233SJens Axboe {
45716bf5a5eSThomas Gleixner 	__flush_smp_call_function_queue(true);
4588d056c48SSrivatsa S. Bhat }
4598d056c48SSrivatsa S. Bhat 
4608d056c48SSrivatsa S. Bhat /**
46116bf5a5eSThomas Gleixner  * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
4628d056c48SSrivatsa S. Bhat  *
4638d056c48SSrivatsa S. Bhat  * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
4648d056c48SSrivatsa S. Bhat  *		      offline CPU. Skip this check if set to 'false'.
4658d056c48SSrivatsa S. Bhat  *
4668d056c48SSrivatsa S. Bhat  * Flush any pending smp-call-function callbacks queued on this CPU. This is
4678d056c48SSrivatsa S. Bhat  * invoked by the generic IPI handler, as well as by a CPU about to go offline,
4688d056c48SSrivatsa S. Bhat  * to ensure that all pending IPI callbacks are run before it goes completely
4698d056c48SSrivatsa S. Bhat  * offline.
4708d056c48SSrivatsa S. Bhat  *
4718d056c48SSrivatsa S. Bhat  * Loop through the call_single_queue and run all the queued callbacks.
4728d056c48SSrivatsa S. Bhat  * Must be called with interrupts disabled.
4738d056c48SSrivatsa S. Bhat  */
__flush_smp_call_function_queue(bool warn_cpu_offline)47416bf5a5eSThomas Gleixner static void __flush_smp_call_function_queue(bool warn_cpu_offline)
4758d056c48SSrivatsa S. Bhat {
476966a9671SYing Huang 	call_single_data_t *csd, *csd_next;
47752103be0SPeter Zijlstra 	struct llist_node *entry, *prev;
47852103be0SPeter Zijlstra 	struct llist_head *head;
479a219ccf4SSrivatsa S. Bhat 	static bool warned;
4800d3a00b3SImran Khan 	atomic_t *tbt;
481a219ccf4SSrivatsa S. Bhat 
48283efcbd0SFrederic Weisbecker 	lockdep_assert_irqs_disabled();
4838d056c48SSrivatsa S. Bhat 
4840d3a00b3SImran Khan 	/* Allow waiters to send backtrace NMI from here onwards */
4850d3a00b3SImran Khan 	tbt = this_cpu_ptr(&trigger_backtrace);
4860d3a00b3SImran Khan 	atomic_set_release(tbt, 1);
4870d3a00b3SImran Khan 
488bb964a92SChristoph Lameter 	head = this_cpu_ptr(&call_single_queue);
4898d056c48SSrivatsa S. Bhat 	entry = llist_del_all(head);
490a219ccf4SSrivatsa S. Bhat 	entry = llist_reverse_order(entry);
4913d442233SJens Axboe 
4928d056c48SSrivatsa S. Bhat 	/* There shouldn't be any pending callbacks on an offline CPU. */
4938d056c48SSrivatsa S. Bhat 	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
4949e949a38SNadav Amit 		     !warned && entry != NULL)) {
495a219ccf4SSrivatsa S. Bhat 		warned = true;
496a219ccf4SSrivatsa S. Bhat 		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
497269c861bSSuresh Siddha 
498a219ccf4SSrivatsa S. Bhat 		/*
499a219ccf4SSrivatsa S. Bhat 		 * We don't have to use the _safe() variant here
500a219ccf4SSrivatsa S. Bhat 		 * because we are not invoking the IPI handlers yet.
501a219ccf4SSrivatsa S. Bhat 		 */
502545b8c8dSPeter Zijlstra 		llist_for_each_entry(csd, entry, node.llist) {
5034b44a21dSPeter Zijlstra 			switch (CSD_TYPE(csd)) {
5044b44a21dSPeter Zijlstra 			case CSD_TYPE_ASYNC:
5054b44a21dSPeter Zijlstra 			case CSD_TYPE_SYNC:
5064b44a21dSPeter Zijlstra 			case CSD_TYPE_IRQ_WORK:
507a219ccf4SSrivatsa S. Bhat 				pr_warn("IPI callback %pS sent to offline CPU\n",
508a219ccf4SSrivatsa S. Bhat 					csd->func);
5094b44a21dSPeter Zijlstra 				break;
5104b44a21dSPeter Zijlstra 
511a1488664SPeter Zijlstra 			case CSD_TYPE_TTWU:
512a1488664SPeter Zijlstra 				pr_warn("IPI task-wakeup sent to offline CPU\n");
513a1488664SPeter Zijlstra 				break;
514a1488664SPeter Zijlstra 
5154b44a21dSPeter Zijlstra 			default:
5164b44a21dSPeter Zijlstra 				pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
5174b44a21dSPeter Zijlstra 					CSD_TYPE(csd));
5184b44a21dSPeter Zijlstra 				break;
519a219ccf4SSrivatsa S. Bhat 			}
5208053871dSLinus Torvalds 		}
5213d442233SJens Axboe 	}
52247885016SFrederic Weisbecker 
52347885016SFrederic Weisbecker 	/*
52452103be0SPeter Zijlstra 	 * First; run all SYNC callbacks, people are waiting for us.
52547885016SFrederic Weisbecker 	 */
52652103be0SPeter Zijlstra 	prev = NULL;
527545b8c8dSPeter Zijlstra 	llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
5284b44a21dSPeter Zijlstra 		/* Do we wait until *after* callback? */
5294b44a21dSPeter Zijlstra 		if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
5303d442233SJens Axboe 			smp_call_func_t func = csd->func;
5313d442233SJens Axboe 			void *info = csd->info;
5323d442233SJens Axboe 
53352103be0SPeter Zijlstra 			if (prev) {
534545b8c8dSPeter Zijlstra 				prev->next = &csd_next->node.llist;
53552103be0SPeter Zijlstra 			} else {
536545b8c8dSPeter Zijlstra 				entry = &csd_next->node.llist;
53752103be0SPeter Zijlstra 			}
5384b44a21dSPeter Zijlstra 
53935feb604SPaul E. McKenney 			csd_lock_record(csd);
540949fa3f1SLeonardo Bras 			csd_do_func(func, info, csd);
5413d442233SJens Axboe 			csd_unlock(csd);
54235feb604SPaul E. McKenney 			csd_lock_record(NULL);
5433d442233SJens Axboe 		} else {
544545b8c8dSPeter Zijlstra 			prev = &csd->node.llist;
54552103be0SPeter Zijlstra 		}
54652103be0SPeter Zijlstra 	}
54752103be0SPeter Zijlstra 
5481771257cSPaul E. McKenney 	if (!entry)
549a1488664SPeter Zijlstra 		return;
550a1488664SPeter Zijlstra 
55152103be0SPeter Zijlstra 	/*
55252103be0SPeter Zijlstra 	 * Second; run all !SYNC callbacks.
55352103be0SPeter Zijlstra 	 */
554a1488664SPeter Zijlstra 	prev = NULL;
555545b8c8dSPeter Zijlstra 	llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
5564b44a21dSPeter Zijlstra 		int type = CSD_TYPE(csd);
5574b44a21dSPeter Zijlstra 
558a1488664SPeter Zijlstra 		if (type != CSD_TYPE_TTWU) {
559a1488664SPeter Zijlstra 			if (prev) {
560545b8c8dSPeter Zijlstra 				prev->next = &csd_next->node.llist;
561a1488664SPeter Zijlstra 			} else {
562545b8c8dSPeter Zijlstra 				entry = &csd_next->node.llist;
563a1488664SPeter Zijlstra 			}
564a1488664SPeter Zijlstra 
5654b44a21dSPeter Zijlstra 			if (type == CSD_TYPE_ASYNC) {
56652103be0SPeter Zijlstra 				smp_call_func_t func = csd->func;
56752103be0SPeter Zijlstra 				void *info = csd->info;
56852103be0SPeter Zijlstra 
56935feb604SPaul E. McKenney 				csd_lock_record(csd);
5703d442233SJens Axboe 				csd_unlock(csd);
571949fa3f1SLeonardo Bras 				csd_do_func(func, info, csd);
57235feb604SPaul E. McKenney 				csd_lock_record(NULL);
5734b44a21dSPeter Zijlstra 			} else if (type == CSD_TYPE_IRQ_WORK) {
5744b44a21dSPeter Zijlstra 				irq_work_single(csd);
5754b44a21dSPeter Zijlstra 			}
576a1488664SPeter Zijlstra 
577a1488664SPeter Zijlstra 		} else {
578545b8c8dSPeter Zijlstra 			prev = &csd->node.llist;
5793d442233SJens Axboe 		}
5803d442233SJens Axboe 	}
5813d442233SJens Axboe 
582a1488664SPeter Zijlstra 	/*
583a1488664SPeter Zijlstra 	 * Third; only CSD_TYPE_TTWU is left, issue those.
584a1488664SPeter Zijlstra 	 */
585949fa3f1SLeonardo Bras 	if (entry) {
586949fa3f1SLeonardo Bras 		csd = llist_entry(entry, typeof(*csd), node.llist);
587949fa3f1SLeonardo Bras 		csd_do_func(sched_ttwu_pending, entry, csd);
588949fa3f1SLeonardo Bras 	}
589a1488664SPeter Zijlstra }
590a1488664SPeter Zijlstra 
59116bf5a5eSThomas Gleixner 
59216bf5a5eSThomas Gleixner /**
59316bf5a5eSThomas Gleixner  * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
59416bf5a5eSThomas Gleixner  *				   from task context (idle, migration thread)
59516bf5a5eSThomas Gleixner  *
59616bf5a5eSThomas Gleixner  * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
59716bf5a5eSThomas Gleixner  * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
59816bf5a5eSThomas Gleixner  * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
59916bf5a5eSThomas Gleixner  * handle queued SMP function calls before scheduling.
60016bf5a5eSThomas Gleixner  *
60116bf5a5eSThomas Gleixner  * The migration thread has to ensure that an eventually pending wakeup has
60216bf5a5eSThomas Gleixner  * been handled before it migrates a task.
60316bf5a5eSThomas Gleixner  */
flush_smp_call_function_queue(void)60416bf5a5eSThomas Gleixner void flush_smp_call_function_queue(void)
605b2a02fc4SPeter Zijlstra {
6061a90bfd2SSebastian Andrzej Siewior 	unsigned int was_pending;
607b2a02fc4SPeter Zijlstra 	unsigned long flags;
608b2a02fc4SPeter Zijlstra 
609b2a02fc4SPeter Zijlstra 	if (llist_empty(this_cpu_ptr(&call_single_queue)))
610b2a02fc4SPeter Zijlstra 		return;
611b2a02fc4SPeter Zijlstra 
612b2a02fc4SPeter Zijlstra 	local_irq_save(flags);
6131a90bfd2SSebastian Andrzej Siewior 	/* Get the already pending soft interrupts for RT enabled kernels */
6141a90bfd2SSebastian Andrzej Siewior 	was_pending = local_softirq_pending();
61516bf5a5eSThomas Gleixner 	__flush_smp_call_function_queue(true);
616f9d34595SSebastian Andrzej Siewior 	if (local_softirq_pending())
6171a90bfd2SSebastian Andrzej Siewior 		do_softirq_post_smp_call_flush(was_pending);
618f9d34595SSebastian Andrzej Siewior 
619b2a02fc4SPeter Zijlstra 	local_irq_restore(flags);
6203d442233SJens Axboe }
6213d442233SJens Axboe 
6223d442233SJens Axboe /*
6233d442233SJens Axboe  * smp_call_function_single - Run a function on a specific CPU
6243d442233SJens Axboe  * @func: The function to run. This must be fast and non-blocking.
6253d442233SJens Axboe  * @info: An arbitrary pointer to pass to the function.
6263d442233SJens Axboe  * @wait: If true, wait until function has completed on other CPUs.
6273d442233SJens Axboe  *
62872f279b2SSheng Yang  * Returns 0 on success, else a negative status code.
6293d442233SJens Axboe  */
smp_call_function_single(int cpu,smp_call_func_t func,void * info,int wait)6303a5f65dfSDavid Howells int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
6318691e5a8SJens Axboe 			     int wait)
6323d442233SJens Axboe {
633966a9671SYing Huang 	call_single_data_t *csd;
634966a9671SYing Huang 	call_single_data_t csd_stack = {
635545b8c8dSPeter Zijlstra 		.node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
636966a9671SYing Huang 	};
6370b13fda1SIngo Molnar 	int this_cpu;
6388b28499aSFrederic Weisbecker 	int err;
6393d442233SJens Axboe 
6400b13fda1SIngo Molnar 	/*
6410b13fda1SIngo Molnar 	 * prevent preemption and reschedule on another processor,
6420b13fda1SIngo Molnar 	 * as well as CPU removal
6430b13fda1SIngo Molnar 	 */
6440b13fda1SIngo Molnar 	this_cpu = get_cpu();
6450b13fda1SIngo Molnar 
646269c861bSSuresh Siddha 	/*
647269c861bSSuresh Siddha 	 * Can deadlock when called with interrupts disabled.
648269c861bSSuresh Siddha 	 * We allow cpu's that are not yet online though, as no one else can
649269c861bSSuresh Siddha 	 * send smp call function interrupt to this cpu and as such deadlocks
650269c861bSSuresh Siddha 	 * can't happen.
651269c861bSSuresh Siddha 	 */
652269c861bSSuresh Siddha 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
653269c861bSSuresh Siddha 		     && !oops_in_progress);
6543d442233SJens Axboe 
65519dbdcb8SPeter Zijlstra 	/*
65619dbdcb8SPeter Zijlstra 	 * When @wait we can deadlock when we interrupt between llist_add() and
65719dbdcb8SPeter Zijlstra 	 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
65819dbdcb8SPeter Zijlstra 	 * csd_lock() on because the interrupt context uses the same csd
65919dbdcb8SPeter Zijlstra 	 * storage.
66019dbdcb8SPeter Zijlstra 	 */
66119dbdcb8SPeter Zijlstra 	WARN_ON_ONCE(!in_task());
66219dbdcb8SPeter Zijlstra 
6638053871dSLinus Torvalds 	csd = &csd_stack;
6648053871dSLinus Torvalds 	if (!wait) {
6658053871dSLinus Torvalds 		csd = this_cpu_ptr(&csd_data);
6668053871dSLinus Torvalds 		csd_lock(csd);
6678053871dSLinus Torvalds 	}
6688053871dSLinus Torvalds 
6694b44a21dSPeter Zijlstra 	csd->func = func;
6704b44a21dSPeter Zijlstra 	csd->info = info;
67135feb604SPaul E. McKenney #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
672545b8c8dSPeter Zijlstra 	csd->node.src = smp_processor_id();
673545b8c8dSPeter Zijlstra 	csd->node.dst = cpu;
674e48c15b7SPaul E. McKenney #endif
6754b44a21dSPeter Zijlstra 
6764b44a21dSPeter Zijlstra 	err = generic_exec_single(cpu, csd);
6778053871dSLinus Torvalds 
6788053871dSLinus Torvalds 	if (wait)
6798053871dSLinus Torvalds 		csd_lock_wait(csd);
6803d442233SJens Axboe 
6813d442233SJens Axboe 	put_cpu();
6820b13fda1SIngo Molnar 
683f73be6deSH. Peter Anvin 	return err;
6843d442233SJens Axboe }
6853d442233SJens Axboe EXPORT_SYMBOL(smp_call_function_single);
6863d442233SJens Axboe 
687d7877c03SFrederic Weisbecker /**
68849b3bd21SRandy Dunlap  * smp_call_function_single_async() - Run an asynchronous function on a
689c46fff2aSFrederic Weisbecker  * 			         specific CPU.
690d7877c03SFrederic Weisbecker  * @cpu: The CPU to run on.
691d7877c03SFrederic Weisbecker  * @csd: Pre-allocated and setup data structure
692d7877c03SFrederic Weisbecker  *
693c46fff2aSFrederic Weisbecker  * Like smp_call_function_single(), but the call is asynchonous and
694c46fff2aSFrederic Weisbecker  * can thus be done from contexts with disabled interrupts.
695c46fff2aSFrederic Weisbecker  *
696c46fff2aSFrederic Weisbecker  * The caller passes his own pre-allocated data structure
697c46fff2aSFrederic Weisbecker  * (ie: embedded in an object) and is responsible for synchronizing it
698c46fff2aSFrederic Weisbecker  * such that the IPIs performed on the @csd are strictly serialized.
699c46fff2aSFrederic Weisbecker  *
7005a18cecaSPeter Xu  * If the function is called with one csd which has not yet been
7015a18cecaSPeter Xu  * processed by previous call to smp_call_function_single_async(), the
7025a18cecaSPeter Xu  * function will return immediately with -EBUSY showing that the csd
7035a18cecaSPeter Xu  * object is still in progress.
7045a18cecaSPeter Xu  *
705c46fff2aSFrederic Weisbecker  * NOTE: Be careful, there is unfortunately no current debugging facility to
706c46fff2aSFrederic Weisbecker  * validate the correctness of this serialization.
70749b3bd21SRandy Dunlap  *
70849b3bd21SRandy Dunlap  * Return: %0 on success or negative errno value on error
709d7877c03SFrederic Weisbecker  */
smp_call_function_single_async(int cpu,call_single_data_t * csd)710d090ec0dSLeonardo Bras int smp_call_function_single_async(int cpu, call_single_data_t *csd)
711d7877c03SFrederic Weisbecker {
712d7877c03SFrederic Weisbecker 	int err = 0;
713d7877c03SFrederic Weisbecker 
714fce8ad15SFrederic Weisbecker 	preempt_disable();
7158053871dSLinus Torvalds 
716545b8c8dSPeter Zijlstra 	if (csd->node.u_flags & CSD_FLAG_LOCK) {
7175a18cecaSPeter Xu 		err = -EBUSY;
7185a18cecaSPeter Xu 		goto out;
7195a18cecaSPeter Xu 	}
7208053871dSLinus Torvalds 
721545b8c8dSPeter Zijlstra 	csd->node.u_flags = CSD_FLAG_LOCK;
7228053871dSLinus Torvalds 	smp_wmb();
7238053871dSLinus Torvalds 
7244b44a21dSPeter Zijlstra 	err = generic_exec_single(cpu, csd);
7255a18cecaSPeter Xu 
7265a18cecaSPeter Xu out:
727fce8ad15SFrederic Weisbecker 	preempt_enable();
728d7877c03SFrederic Weisbecker 
729d7877c03SFrederic Weisbecker 	return err;
730d7877c03SFrederic Weisbecker }
731c46fff2aSFrederic Weisbecker EXPORT_SYMBOL_GPL(smp_call_function_single_async);
732d7877c03SFrederic Weisbecker 
7332ea6dec4SRusty Russell /*
7342ea6dec4SRusty Russell  * smp_call_function_any - Run a function on any of the given cpus
7352ea6dec4SRusty Russell  * @mask: The mask of cpus it can run on.
7362ea6dec4SRusty Russell  * @func: The function to run. This must be fast and non-blocking.
7372ea6dec4SRusty Russell  * @info: An arbitrary pointer to pass to the function.
7382ea6dec4SRusty Russell  * @wait: If true, wait until function has completed.
7392ea6dec4SRusty Russell  *
7402ea6dec4SRusty Russell  * Returns 0 on success, else a negative status code (if no cpus were online).
7412ea6dec4SRusty Russell  *
7422ea6dec4SRusty Russell  * Selection preference:
7432ea6dec4SRusty Russell  *	1) current cpu if in @mask
7442ea6dec4SRusty Russell  *	2) any cpu of current node if in @mask
7452ea6dec4SRusty Russell  *	3) any other online cpu in @mask
7462ea6dec4SRusty Russell  */
smp_call_function_any(const struct cpumask * mask,smp_call_func_t func,void * info,int wait)7472ea6dec4SRusty Russell int smp_call_function_any(const struct cpumask *mask,
7483a5f65dfSDavid Howells 			  smp_call_func_t func, void *info, int wait)
7492ea6dec4SRusty Russell {
7502ea6dec4SRusty Russell 	unsigned int cpu;
7512ea6dec4SRusty Russell 	const struct cpumask *nodemask;
7522ea6dec4SRusty Russell 	int ret;
7532ea6dec4SRusty Russell 
7542ea6dec4SRusty Russell 	/* Try for same CPU (cheapest) */
7552ea6dec4SRusty Russell 	cpu = get_cpu();
7562ea6dec4SRusty Russell 	if (cpumask_test_cpu(cpu, mask))
7572ea6dec4SRusty Russell 		goto call;
7582ea6dec4SRusty Russell 
7592ea6dec4SRusty Russell 	/* Try for same node. */
760af2422c4SDavid John 	nodemask = cpumask_of_node(cpu_to_node(cpu));
7612ea6dec4SRusty Russell 	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
7622ea6dec4SRusty Russell 	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
7632ea6dec4SRusty Russell 		if (cpu_online(cpu))
7642ea6dec4SRusty Russell 			goto call;
7652ea6dec4SRusty Russell 	}
7662ea6dec4SRusty Russell 
7672ea6dec4SRusty Russell 	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
7682ea6dec4SRusty Russell 	cpu = cpumask_any_and(mask, cpu_online_mask);
7692ea6dec4SRusty Russell call:
7702ea6dec4SRusty Russell 	ret = smp_call_function_single(cpu, func, info, wait);
7712ea6dec4SRusty Russell 	put_cpu();
7722ea6dec4SRusty Russell 	return ret;
7732ea6dec4SRusty Russell }
7742ea6dec4SRusty Russell EXPORT_SYMBOL_GPL(smp_call_function_any);
7752ea6dec4SRusty Russell 
776a32a4d8aSNadav Amit /*
777a32a4d8aSNadav Amit  * Flags to be used as scf_flags argument of smp_call_function_many_cond().
778a32a4d8aSNadav Amit  *
779a32a4d8aSNadav Amit  * %SCF_WAIT:		Wait until function execution is completed
780a32a4d8aSNadav Amit  * %SCF_RUN_LOCAL:	Run also locally if local cpu is set in cpumask
781a32a4d8aSNadav Amit  */
782a32a4d8aSNadav Amit #define SCF_WAIT	(1U << 0)
783a32a4d8aSNadav Amit #define SCF_RUN_LOCAL	(1U << 1)
784a32a4d8aSNadav Amit 
smp_call_function_many_cond(const struct cpumask * mask,smp_call_func_t func,void * info,unsigned int scf_flags,smp_cond_func_t cond_func)78567719ef2SSebastian Andrzej Siewior static void smp_call_function_many_cond(const struct cpumask *mask,
78667719ef2SSebastian Andrzej Siewior 					smp_call_func_t func, void *info,
787a32a4d8aSNadav Amit 					unsigned int scf_flags,
788a32a4d8aSNadav Amit 					smp_cond_func_t cond_func)
7893d442233SJens Axboe {
790a32a4d8aSNadav Amit 	int cpu, last_cpu, this_cpu = smp_processor_id();
791e1d12f32SAndrew Morton 	struct call_function_data *cfd;
792a32a4d8aSNadav Amit 	bool wait = scf_flags & SCF_WAIT;
793bf5a8c26SLeonardo Bras 	int nr_cpus = 0;
794a32a4d8aSNadav Amit 	bool run_remote = false;
795a32a4d8aSNadav Amit 	bool run_local = false;
796a32a4d8aSNadav Amit 
797a32a4d8aSNadav Amit 	lockdep_assert_preemption_disabled();
7983d442233SJens Axboe 
799269c861bSSuresh Siddha 	/*
800269c861bSSuresh Siddha 	 * Can deadlock when called with interrupts disabled.
801269c861bSSuresh Siddha 	 * We allow cpu's that are not yet online though, as no one else can
802269c861bSSuresh Siddha 	 * send smp call function interrupt to this cpu and as such deadlocks
803269c861bSSuresh Siddha 	 * can't happen.
804269c861bSSuresh Siddha 	 */
805a32a4d8aSNadav Amit 	if (cpu_online(this_cpu) && !oops_in_progress &&
806a32a4d8aSNadav Amit 	    !early_boot_irqs_disabled)
807a32a4d8aSNadav Amit 		lockdep_assert_irqs_enabled();
8083d442233SJens Axboe 
80919dbdcb8SPeter Zijlstra 	/*
81019dbdcb8SPeter Zijlstra 	 * When @wait we can deadlock when we interrupt between llist_add() and
81119dbdcb8SPeter Zijlstra 	 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
81219dbdcb8SPeter Zijlstra 	 * csd_lock() on because the interrupt context uses the same csd
81319dbdcb8SPeter Zijlstra 	 * storage.
81419dbdcb8SPeter Zijlstra 	 */
81519dbdcb8SPeter Zijlstra 	WARN_ON_ONCE(!in_task());
81619dbdcb8SPeter Zijlstra 
817a32a4d8aSNadav Amit 	/* Check if we need local execution. */
818a32a4d8aSNadav Amit 	if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) &&
819a32a4d8aSNadav Amit 	    (!cond_func || cond_func(this_cpu, info)))
820a32a4d8aSNadav Amit 		run_local = true;
821a32a4d8aSNadav Amit 
82254b11e6dSRusty Russell 	/* Check if we need remote execution, i.e., any CPU excluding this one. */
8230b13fda1SIngo Molnar 	cpu = cpumask_first_and(mask, cpu_online_mask);
82454b11e6dSRusty Russell 	if (cpu == this_cpu)
825a32a4d8aSNadav Amit 		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
826a32a4d8aSNadav Amit 	if (cpu < nr_cpu_ids)
8270b13fda1SIngo Molnar 		run_remote = true;
828a32a4d8aSNadav Amit 
829bb964a92SChristoph Lameter 	if (run_remote) {
830e1d12f32SAndrew Morton 		cfd = this_cpu_ptr(&cfd_data);
8316c8557bdSPeter Zijlstra 		cpumask_and(cfd->cpumask, mask, cpu_online_mask);
832723aae25SMilton Miller 		__cpumask_clear_cpu(this_cpu, cfd->cpumask);
8333fc5b3b6SAaron Lu 
834e1d12f32SAndrew Morton 		cpumask_clear(cfd->cpumask_ipi);
8356366d062SPaul E. McKenney 		for_each_cpu(cpu, cfd->cpumask) {
8369a46ad6dSShaohua Li 			call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
8375c312497SPeter Zijlstra 
8385c312497SPeter Zijlstra 			if (cond_func && !cond_func(cpu, info)) {
83967719ef2SSebastian Andrzej Siewior 				__cpumask_clear_cpu(cpu, cfd->cpumask);
8405c312497SPeter Zijlstra 				continue;
84167719ef2SSebastian Andrzej Siewior 			}
8429a46ad6dSShaohua Li 
8438053871dSLinus Torvalds 			csd_lock(csd);
844545b8c8dSPeter Zijlstra 			if (wait)
8459a46ad6dSShaohua Li 				csd->node.u_flags |= CSD_TYPE_SYNC;
8469a46ad6dSShaohua Li 			csd->func = func;
84735feb604SPaul E. McKenney 			csd->info = info;
848545b8c8dSPeter Zijlstra #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
849545b8c8dSPeter Zijlstra 			csd->node.src = smp_processor_id();
850e48c15b7SPaul E. McKenney 			csd->node.dst = cpu;
851bf5a8c26SLeonardo Bras #endif
852bf5a8c26SLeonardo Bras 			trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
853a32a4d8aSNadav Amit 
8546c8557bdSPeter Zijlstra 			if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
855a32a4d8aSNadav Amit 				__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
856a32a4d8aSNadav Amit 				nr_cpus++;
857a32a4d8aSNadav Amit 				last_cpu = cpu;
8589a46ad6dSShaohua Li 			}
859561920a0SSuresh Siddha 		}
860a32a4d8aSNadav Amit 
861a32a4d8aSNadav Amit 		/*
862a32a4d8aSNadav Amit 		 * Choose the most efficient way to send an IPI. Note that the
863a32a4d8aSNadav Amit 		 * number of CPUs might be zero due to concurrent changes to the
864a32a4d8aSNadav Amit 		 * provided mask.
865a32a4d8aSNadav Amit 		 */
8665c312497SPeter Zijlstra 		if (nr_cpus == 1)
867a32a4d8aSNadav Amit 			send_call_function_single_ipi(last_cpu);
8685c312497SPeter Zijlstra 		else if (likely(nr_cpus > 1))
869a32a4d8aSNadav Amit 			send_call_function_ipi_mask(cfd->cpumask_ipi);
8703d442233SJens Axboe 	}
871a32a4d8aSNadav Amit 
872a32a4d8aSNadav Amit 	if (run_local) {
873a32a4d8aSNadav Amit 		unsigned long flags;
874a32a4d8aSNadav Amit 
875949fa3f1SLeonardo Bras 		local_irq_save(flags);
876a32a4d8aSNadav Amit 		csd_do_func(func, info, NULL);
877a32a4d8aSNadav Amit 		local_irq_restore(flags);
878a32a4d8aSNadav Amit 	}
879a32a4d8aSNadav Amit 
880e1d12f32SAndrew Morton 	if (run_remote && wait) {
881966a9671SYing Huang 		for_each_cpu(cpu, cfd->cpumask) {
882e1d12f32SAndrew Morton 			call_single_data_t *csd;
8836366d062SPaul E. McKenney 
8849a46ad6dSShaohua Li 			csd = per_cpu_ptr(cfd->csd, cpu);
8859a46ad6dSShaohua Li 			csd_lock_wait(csd);
8869a46ad6dSShaohua Li 		}
887cc7a486cSNick Piggin 	}
88867719ef2SSebastian Andrzej Siewior }
88967719ef2SSebastian Andrzej Siewior 
890a32a4d8aSNadav Amit /**
89167719ef2SSebastian Andrzej Siewior  * smp_call_function_many(): Run a function on a set of CPUs.
89267719ef2SSebastian Andrzej Siewior  * @mask: The set of cpus to run on (only runs on online subset).
89367719ef2SSebastian Andrzej Siewior  * @func: The function to run. This must be fast and non-blocking.
89449b3bd21SRandy Dunlap  * @info: An arbitrary pointer to pass to the function.
895a32a4d8aSNadav Amit  * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
896a32a4d8aSNadav Amit  *        (atomically) until function has completed on other CPUs. If
897a32a4d8aSNadav Amit  *        %SCF_RUN_LOCAL is set, the function will also be run locally
89867719ef2SSebastian Andrzej Siewior  *        if the local CPU is set in the @cpumask.
89967719ef2SSebastian Andrzej Siewior  *
90067719ef2SSebastian Andrzej Siewior  * If @wait is true, then returns once @func has returned.
90167719ef2SSebastian Andrzej Siewior  *
90267719ef2SSebastian Andrzej Siewior  * You must not call this function with disabled interrupts or from a
90367719ef2SSebastian Andrzej Siewior  * hardware interrupt handler or from a bottom half handler. Preemption
90467719ef2SSebastian Andrzej Siewior  * must be disabled when calling this function.
90567719ef2SSebastian Andrzej Siewior  */
smp_call_function_many(const struct cpumask * mask,smp_call_func_t func,void * info,bool wait)90667719ef2SSebastian Andrzej Siewior void smp_call_function_many(const struct cpumask *mask,
90767719ef2SSebastian Andrzej Siewior 			    smp_call_func_t func, void *info, bool wait)
908a32a4d8aSNadav Amit {
90967719ef2SSebastian Andrzej Siewior 	smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
91054b11e6dSRusty Russell }
9113d442233SJens Axboe EXPORT_SYMBOL(smp_call_function_many);
9123d442233SJens Axboe 
9133d442233SJens Axboe /**
9143d442233SJens Axboe  * smp_call_function(): Run a function on all other CPUs.
9153d442233SJens Axboe  * @func: The function to run. This must be fast and non-blocking.
9160b13fda1SIngo Molnar  * @info: An arbitrary pointer to pass to the function.
9170b13fda1SIngo Molnar  * @wait: If true, wait (atomically) until function has completed
9183d442233SJens Axboe  *        on other CPUs.
91954b11e6dSRusty Russell  *
9203d442233SJens Axboe  * Returns 0.
9213d442233SJens Axboe  *
92272f279b2SSheng Yang  * If @wait is true, then returns once @func has returned; otherwise
9233d442233SJens Axboe  * it returns just before the target cpu calls @func.
9243d442233SJens Axboe  *
9253d442233SJens Axboe  * You must not call this function with disabled interrupts or from a
9263d442233SJens Axboe  * hardware interrupt handler or from a bottom half handler.
927caa75932SNadav Amit  */
smp_call_function(smp_call_func_t func,void * info,int wait)9283d442233SJens Axboe void smp_call_function(smp_call_func_t func, void *info, int wait)
9293d442233SJens Axboe {
93054b11e6dSRusty Russell 	preempt_disable();
9313d442233SJens Axboe 	smp_call_function_many(cpu_online_mask, func, info, wait);
9323d442233SJens Axboe 	preempt_enable();
9333d442233SJens Axboe }
934351f8f8eSAmerigo Wang EXPORT_SYMBOL(smp_call_function);
93534db18a0SAmerigo Wang 
93634db18a0SAmerigo Wang /* Setup configured maximum number of CPUs to activate */
93734db18a0SAmerigo Wang unsigned int setup_max_cpus = NR_CPUS;
93834db18a0SAmerigo Wang EXPORT_SYMBOL(setup_max_cpus);
93934db18a0SAmerigo Wang 
94034db18a0SAmerigo Wang 
94134db18a0SAmerigo Wang /*
94234db18a0SAmerigo Wang  * Setup routine for controlling SMP activation
94334db18a0SAmerigo Wang  *
94434db18a0SAmerigo Wang  * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
94534db18a0SAmerigo Wang  * activation entirely (the MPS table probe still happens, though).
94634db18a0SAmerigo Wang  *
94734db18a0SAmerigo Wang  * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
94834db18a0SAmerigo Wang  * greater than 0, limits the maximum number of CPUs activated in
94934db18a0SAmerigo Wang  * SMP mode to <NUM>.
95034db18a0SAmerigo Wang  */
951ba831b7bSThomas Gleixner 
arch_disable_smp_support(void)95234db18a0SAmerigo Wang void __weak __init arch_disable_smp_support(void) { }
95334db18a0SAmerigo Wang 
nosmp(char * str)95434db18a0SAmerigo Wang static int __init nosmp(char *str)
95534db18a0SAmerigo Wang {
95634db18a0SAmerigo Wang 	setup_max_cpus = 0;
95734db18a0SAmerigo Wang 	arch_disable_smp_support();
95834db18a0SAmerigo Wang 
95934db18a0SAmerigo Wang 	return 0;
96034db18a0SAmerigo Wang }
96134db18a0SAmerigo Wang 
96234db18a0SAmerigo Wang early_param("nosmp", nosmp);
96334db18a0SAmerigo Wang 
96434db18a0SAmerigo Wang /* this is hard limit */
nrcpus(char * str)96534db18a0SAmerigo Wang static int __init nrcpus(char *str)
96634db18a0SAmerigo Wang {
96734db18a0SAmerigo Wang 	int nr_cpus;
96858934356SMuchun Song 
96938bef8e5SYury Norov 	if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
97034db18a0SAmerigo Wang 		set_nr_cpu_ids(nr_cpus);
97134db18a0SAmerigo Wang 
97234db18a0SAmerigo Wang 	return 0;
97334db18a0SAmerigo Wang }
97434db18a0SAmerigo Wang 
97534db18a0SAmerigo Wang early_param("nr_cpus", nrcpus);
97634db18a0SAmerigo Wang 
maxcpus(char * str)97734db18a0SAmerigo Wang static int __init maxcpus(char *str)
97834db18a0SAmerigo Wang {
97934db18a0SAmerigo Wang 	get_option(&str, &setup_max_cpus);
98034db18a0SAmerigo Wang 	if (setup_max_cpus == 0)
98134db18a0SAmerigo Wang 		arch_disable_smp_support();
98234db18a0SAmerigo Wang 
98334db18a0SAmerigo Wang 	return 0;
98434db18a0SAmerigo Wang }
98534db18a0SAmerigo Wang 
98634db18a0SAmerigo Wang early_param("maxcpus", maxcpus);
9876f9c07beSYury Norov 
98834db18a0SAmerigo Wang #if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
9899b130ad5SAlexey Dobriyan /* Setup number of possible processor ids */
99034db18a0SAmerigo Wang unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
99153fc190cSYury Norov EXPORT_SYMBOL(nr_cpu_ids);
99234db18a0SAmerigo Wang #endif
99334db18a0SAmerigo Wang 
99434db18a0SAmerigo Wang /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
setup_nr_cpu_ids(void)99534db18a0SAmerigo Wang void __init setup_nr_cpu_ids(void)
99638bef8e5SYury Norov {
99734db18a0SAmerigo Wang 	set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
99834db18a0SAmerigo Wang }
99934db18a0SAmerigo Wang 
100034db18a0SAmerigo Wang /* Called by boot processor to activate the rest. */
smp_init(void)100134db18a0SAmerigo Wang void __init smp_init(void)
100292b23278SMichael Ellerman {
100334db18a0SAmerigo Wang 	int num_nodes, num_cpus;
10043bb5d2eeSSuresh Siddha 
10054cb28cedSThomas Gleixner 	idle_threads_init();
10063bb5d2eeSSuresh Siddha 	cpuhp_threads_init();
100751111dceSMichael Ellerman 
100851111dceSMichael Ellerman 	pr_info("Bringing up secondary CPUs ...\n");
1009b99a2659SQais Yousef 
101034db18a0SAmerigo Wang 	bringup_nonboot_cpus(setup_max_cpus);
101192b23278SMichael Ellerman 
101292b23278SMichael Ellerman 	num_nodes = num_online_nodes();
101392b23278SMichael Ellerman 	num_cpus  = num_online_cpus();
1014c4df1593SThorsten Blum 	pr_info("Brought up %d node%s, %d CPU%s\n",
101592b23278SMichael Ellerman 		num_nodes, str_plural(num_nodes), num_cpus, str_plural(num_cpus));
101634db18a0SAmerigo Wang 
101734db18a0SAmerigo Wang 	/* Any cleanup work */
101834db18a0SAmerigo Wang 	smp_cpus_done(setup_max_cpus);
101934db18a0SAmerigo Wang }
1020351f8f8eSAmerigo Wang 
1021b3a7e98eSGilad Ben-Yossef /*
1022b3a7e98eSGilad Ben-Yossef  * on_each_cpu_cond(): Call a function on each processor for which
1023b3a7e98eSGilad Ben-Yossef  * the supplied function cond_func returns true, optionally waiting
1024b3a7e98eSGilad Ben-Yossef  * for all the required CPUs to finish. This may include the local
1025b3a7e98eSGilad Ben-Yossef  * processor.
10267b7b8a2cSRandy Dunlap  * @cond_func:	A callback function that is passed a cpu id and
1027b3a7e98eSGilad Ben-Yossef  *		the info parameter. The function is called
1028b3a7e98eSGilad Ben-Yossef  *		with preemption disabled. The function should
1029b3a7e98eSGilad Ben-Yossef  *		return a blooean value indicating whether to IPI
1030b3a7e98eSGilad Ben-Yossef  *		the specified CPU.
1031b3a7e98eSGilad Ben-Yossef  * @func:	The function to run on all applicable CPUs.
1032b3a7e98eSGilad Ben-Yossef  *		This must be fast and non-blocking.
1033b3a7e98eSGilad Ben-Yossef  * @info:	An arbitrary pointer to pass to both functions.
1034b3a7e98eSGilad Ben-Yossef  * @wait:	If true, wait (atomically) until function has
1035b3a7e98eSGilad Ben-Yossef  *		completed on other CPUs.
1036b3a7e98eSGilad Ben-Yossef  *
1037b3a7e98eSGilad Ben-Yossef  * Preemption is disabled to protect against CPUs going offline but not online.
1038b3a7e98eSGilad Ben-Yossef  * CPUs going online during the call will not be seen or sent an IPI.
1039b3a7e98eSGilad Ben-Yossef  *
1040b3a7e98eSGilad Ben-Yossef  * You must not call this function with disabled interrupts or
1041b3a7e98eSGilad Ben-Yossef  * from a hardware interrupt handler or from a bottom half handler.
10425671d814SSebastian Andrzej Siewior  */
on_each_cpu_cond_mask(smp_cond_func_t cond_func,smp_call_func_t func,void * info,bool wait,const struct cpumask * mask)1043cb923159SSebastian Andrzej Siewior void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
1044b3a7e98eSGilad Ben-Yossef 			   void *info, bool wait, const struct cpumask *mask)
1045a32a4d8aSNadav Amit {
1046b3a7e98eSGilad Ben-Yossef 	unsigned int scf_flags = SCF_RUN_LOCAL;
1047a32a4d8aSNadav Amit 
1048a32a4d8aSNadav Amit 	if (wait)
1049b3a7e98eSGilad Ben-Yossef 		scf_flags |= SCF_WAIT;
1050a32a4d8aSNadav Amit 
1051a32a4d8aSNadav Amit 	preempt_disable();
1052a32a4d8aSNadav Amit 	smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1053b3a7e98eSGilad Ben-Yossef 	preempt_enable();
10547d49b28aSRik van Riel }
10557d49b28aSRik van Riel EXPORT_SYMBOL(on_each_cpu_cond_mask);
1056f37f435fSThomas Gleixner 
do_nothing(void * unused)1057f37f435fSThomas Gleixner static void do_nothing(void *unused)
1058f37f435fSThomas Gleixner {
1059f37f435fSThomas Gleixner }
1060f37f435fSThomas Gleixner 
1061f37f435fSThomas Gleixner /**
1062f37f435fSThomas Gleixner  * kick_all_cpus_sync - Force all cpus out of idle
1063f37f435fSThomas Gleixner  *
1064f37f435fSThomas Gleixner  * Used to synchronize the update of pm_idle function pointer. It's
1065f37f435fSThomas Gleixner  * called after the pointer is updated and returns after the dummy
1066f37f435fSThomas Gleixner  * callback function has been executed on all cpus. The execution of
1067f37f435fSThomas Gleixner  * the function can only happen on the remote cpus after they have
1068f37f435fSThomas Gleixner  * left the idle function which had been called via pm_idle function
1069f37f435fSThomas Gleixner  * pointer. So it's guaranteed that nothing uses the previous pointer
1070f37f435fSThomas Gleixner  * anymore.
1071f37f435fSThomas Gleixner  */
kick_all_cpus_sync(void)1072f37f435fSThomas Gleixner void kick_all_cpus_sync(void)
1073f37f435fSThomas Gleixner {
1074f37f435fSThomas Gleixner 	/* Make sure the change is visible before we kick the cpus */
1075f37f435fSThomas Gleixner 	smp_mb();
1076f37f435fSThomas Gleixner 	smp_call_function(do_nothing, NULL, 1);
1077f37f435fSThomas Gleixner }
1078c6f4459fSChuansheng Liu EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
1079c6f4459fSChuansheng Liu 
1080c6f4459fSChuansheng Liu /**
1081c6f4459fSChuansheng Liu  * wake_up_all_idle_cpus - break all cpus out of idle
1082c6f4459fSChuansheng Liu  * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1083c6f4459fSChuansheng Liu  * including idle polling cpus, for non-idle cpus, we will do nothing
1084c6f4459fSChuansheng Liu  * for them.
1085c6f4459fSChuansheng Liu  */
wake_up_all_idle_cpus(void)1086c6f4459fSChuansheng Liu void wake_up_all_idle_cpus(void)
1087c6f4459fSChuansheng Liu {
1088c6f4459fSChuansheng Liu 	int cpu;
108996611c26SPeter Zijlstra 
109096611c26SPeter Zijlstra 	for_each_possible_cpu(cpu) {
109196611c26SPeter Zijlstra 		preempt_disable();
1092c6f4459fSChuansheng Liu 		if (cpu != smp_processor_id() && cpu_online(cpu))
109396611c26SPeter Zijlstra 			wake_up_if_idle(cpu);
1094c6f4459fSChuansheng Liu 		preempt_enable();
1095c6f4459fSChuansheng Liu 	}
1096c6f4459fSChuansheng Liu }
1097df8ce9d7SJuergen Gross EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
1098df8ce9d7SJuergen Gross 
109949b3bd21SRandy Dunlap /**
110049b3bd21SRandy Dunlap  * struct smp_call_on_cpu_struct - Call a function on a specific CPU
110149b3bd21SRandy Dunlap  * @work: &work_struct
110249b3bd21SRandy Dunlap  * @done: &completion to signal
110349b3bd21SRandy Dunlap  * @func: function to call
110449b3bd21SRandy Dunlap  * @data: function's data argument
110549b3bd21SRandy Dunlap  * @ret: return value from @func
1106df8ce9d7SJuergen Gross  * @cpu: target CPU (%-1 for any CPU)
1107df8ce9d7SJuergen Gross  *
1108df8ce9d7SJuergen Gross  * Used to call a function on a specific cpu and wait for it to return.
1109df8ce9d7SJuergen Gross  * Optionally make sure the call is done on a specified physical cpu via vcpu
1110df8ce9d7SJuergen Gross  * pinning in order to support virtualized environments.
1111df8ce9d7SJuergen Gross  */
1112df8ce9d7SJuergen Gross struct smp_call_on_cpu_struct {
1113df8ce9d7SJuergen Gross 	struct work_struct	work;
1114df8ce9d7SJuergen Gross 	struct completion	done;
1115df8ce9d7SJuergen Gross 	int			(*func)(void *);
1116df8ce9d7SJuergen Gross 	void			*data;
1117df8ce9d7SJuergen Gross 	int			ret;
1118df8ce9d7SJuergen Gross 	int			cpu;
1119df8ce9d7SJuergen Gross };
1120df8ce9d7SJuergen Gross 
smp_call_on_cpu_callback(struct work_struct * work)1121df8ce9d7SJuergen Gross static void smp_call_on_cpu_callback(struct work_struct *work)
1122df8ce9d7SJuergen Gross {
1123df8ce9d7SJuergen Gross 	struct smp_call_on_cpu_struct *sscs;
1124df8ce9d7SJuergen Gross 
1125df8ce9d7SJuergen Gross 	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1126df8ce9d7SJuergen Gross 	if (sscs->cpu >= 0)
1127df8ce9d7SJuergen Gross 		hypervisor_pin_vcpu(sscs->cpu);
1128df8ce9d7SJuergen Gross 	sscs->ret = sscs->func(sscs->data);
1129df8ce9d7SJuergen Gross 	if (sscs->cpu >= 0)
1130df8ce9d7SJuergen Gross 		hypervisor_pin_vcpu(-1);
1131df8ce9d7SJuergen Gross 
1132df8ce9d7SJuergen Gross 	complete(&sscs->done);
1133df8ce9d7SJuergen Gross }
1134df8ce9d7SJuergen Gross 
smp_call_on_cpu(unsigned int cpu,int (* func)(void *),void * par,bool phys)1135df8ce9d7SJuergen Gross int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1136df8ce9d7SJuergen Gross {
1137df8ce9d7SJuergen Gross 	struct smp_call_on_cpu_struct sscs = {
1138df8ce9d7SJuergen Gross 		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1139df8ce9d7SJuergen Gross 		.func = func,
1140df8ce9d7SJuergen Gross 		.data = par,
1141df8ce9d7SJuergen Gross 		.cpu  = phys ? cpu : -1,
1142df8ce9d7SJuergen Gross 	};
11438db54949SPeter Zijlstra 
11448db54949SPeter Zijlstra 	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1145df8ce9d7SJuergen Gross 
1146df8ce9d7SJuergen Gross 	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1147df8ce9d7SJuergen Gross 		return -ENXIO;
1148df8ce9d7SJuergen Gross 
1149df8ce9d7SJuergen Gross 	queue_work_on(cpu, system_wq, &sscs.work);
115077aeb1b6SZqiang 	wait_for_completion(&sscs.done);
1151df8ce9d7SJuergen Gross 	destroy_work_on_stack(&sscs.work);
1152df8ce9d7SJuergen Gross 
1153df8ce9d7SJuergen Gross 	return sscs.ret;
1154df8ce9d7SJuergen Gross }
1155 EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1156