1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/module.h> 14 #include <linux/kthread.h> 15 #include <linux/stop_machine.h> 16 #include <linux/mutex.h> 17 18 #ifdef CONFIG_SMP 19 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 20 static DEFINE_MUTEX(cpu_add_remove_lock); 21 22 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 23 24 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 25 * Should always be manipulated under cpu_add_remove_lock 26 */ 27 static int cpu_hotplug_disabled; 28 29 static struct { 30 struct task_struct *active_writer; 31 struct mutex lock; /* Synchronizes accesses to refcount, */ 32 /* 33 * Also blocks the new readers during 34 * an ongoing cpu hotplug operation. 35 */ 36 int refcount; 37 } cpu_hotplug; 38 39 void __init cpu_hotplug_init(void) 40 { 41 cpu_hotplug.active_writer = NULL; 42 mutex_init(&cpu_hotplug.lock); 43 cpu_hotplug.refcount = 0; 44 } 45 46 #ifdef CONFIG_HOTPLUG_CPU 47 48 void get_online_cpus(void) 49 { 50 might_sleep(); 51 if (cpu_hotplug.active_writer == current) 52 return; 53 mutex_lock(&cpu_hotplug.lock); 54 cpu_hotplug.refcount++; 55 mutex_unlock(&cpu_hotplug.lock); 56 57 } 58 EXPORT_SYMBOL_GPL(get_online_cpus); 59 60 void put_online_cpus(void) 61 { 62 if (cpu_hotplug.active_writer == current) 63 return; 64 mutex_lock(&cpu_hotplug.lock); 65 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 66 wake_up_process(cpu_hotplug.active_writer); 67 mutex_unlock(&cpu_hotplug.lock); 68 69 } 70 EXPORT_SYMBOL_GPL(put_online_cpus); 71 72 #endif /* CONFIG_HOTPLUG_CPU */ 73 74 /* 75 * The following two API's must be used when attempting 76 * to serialize the updates to cpu_online_mask, cpu_present_mask. 77 */ 78 void cpu_maps_update_begin(void) 79 { 80 mutex_lock(&cpu_add_remove_lock); 81 } 82 83 void cpu_maps_update_done(void) 84 { 85 mutex_unlock(&cpu_add_remove_lock); 86 } 87 88 /* 89 * This ensures that the hotplug operation can begin only when the 90 * refcount goes to zero. 91 * 92 * Note that during a cpu-hotplug operation, the new readers, if any, 93 * will be blocked by the cpu_hotplug.lock 94 * 95 * Since cpu_hotplug_begin() is always called after invoking 96 * cpu_maps_update_begin(), we can be sure that only one writer is active. 97 * 98 * Note that theoretically, there is a possibility of a livelock: 99 * - Refcount goes to zero, last reader wakes up the sleeping 100 * writer. 101 * - Last reader unlocks the cpu_hotplug.lock. 102 * - A new reader arrives at this moment, bumps up the refcount. 103 * - The writer acquires the cpu_hotplug.lock finds the refcount 104 * non zero and goes to sleep again. 105 * 106 * However, this is very difficult to achieve in practice since 107 * get_online_cpus() not an api which is called all that often. 108 * 109 */ 110 static void cpu_hotplug_begin(void) 111 { 112 cpu_hotplug.active_writer = current; 113 114 for (;;) { 115 mutex_lock(&cpu_hotplug.lock); 116 if (likely(!cpu_hotplug.refcount)) 117 break; 118 __set_current_state(TASK_UNINTERRUPTIBLE); 119 mutex_unlock(&cpu_hotplug.lock); 120 schedule(); 121 } 122 } 123 124 static void cpu_hotplug_done(void) 125 { 126 cpu_hotplug.active_writer = NULL; 127 mutex_unlock(&cpu_hotplug.lock); 128 } 129 /* Need to know about CPUs going up/down? */ 130 int __ref register_cpu_notifier(struct notifier_block *nb) 131 { 132 int ret; 133 cpu_maps_update_begin(); 134 ret = raw_notifier_chain_register(&cpu_chain, nb); 135 cpu_maps_update_done(); 136 return ret; 137 } 138 139 #ifdef CONFIG_HOTPLUG_CPU 140 141 EXPORT_SYMBOL(register_cpu_notifier); 142 143 void __ref unregister_cpu_notifier(struct notifier_block *nb) 144 { 145 cpu_maps_update_begin(); 146 raw_notifier_chain_unregister(&cpu_chain, nb); 147 cpu_maps_update_done(); 148 } 149 EXPORT_SYMBOL(unregister_cpu_notifier); 150 151 static inline void check_for_tasks(int cpu) 152 { 153 struct task_struct *p; 154 155 write_lock_irq(&tasklist_lock); 156 for_each_process(p) { 157 if (task_cpu(p) == cpu && 158 (!cputime_eq(p->utime, cputime_zero) || 159 !cputime_eq(p->stime, cputime_zero))) 160 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ 161 (state = %ld, flags = %x) \n", 162 p->comm, task_pid_nr(p), cpu, 163 p->state, p->flags); 164 } 165 write_unlock_irq(&tasklist_lock); 166 } 167 168 struct take_cpu_down_param { 169 unsigned long mod; 170 void *hcpu; 171 }; 172 173 /* Take this CPU down. */ 174 static int __ref take_cpu_down(void *_param) 175 { 176 struct take_cpu_down_param *param = _param; 177 int err; 178 179 /* Ensure this CPU doesn't handle any more interrupts. */ 180 err = __cpu_disable(); 181 if (err < 0) 182 return err; 183 184 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, 185 param->hcpu); 186 187 /* Force idle task to run as soon as we yield: it should 188 immediately notice cpu is offline and die quickly. */ 189 sched_idle_next(); 190 return 0; 191 } 192 193 /* Requires cpu_add_remove_lock to be held */ 194 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 195 { 196 int err, nr_calls = 0; 197 cpumask_var_t old_allowed; 198 void *hcpu = (void *)(long)cpu; 199 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 200 struct take_cpu_down_param tcd_param = { 201 .mod = mod, 202 .hcpu = hcpu, 203 }; 204 205 if (num_online_cpus() == 1) 206 return -EBUSY; 207 208 if (!cpu_online(cpu)) 209 return -EINVAL; 210 211 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) 212 return -ENOMEM; 213 214 cpu_hotplug_begin(); 215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 216 hcpu, -1, &nr_calls); 217 if (err == NOTIFY_BAD) { 218 nr_calls--; 219 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 220 hcpu, nr_calls, NULL); 221 printk("%s: attempt to take down CPU %u failed\n", 222 __func__, cpu); 223 err = -EINVAL; 224 goto out_release; 225 } 226 227 /* Ensure that we are not runnable on dying cpu */ 228 cpumask_copy(old_allowed, ¤t->cpus_allowed); 229 set_cpus_allowed_ptr(current, 230 cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); 231 232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 233 if (err) { 234 /* CPU didn't die: tell everyone. Can't complain. */ 235 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 236 hcpu) == NOTIFY_BAD) 237 BUG(); 238 239 goto out_allowed; 240 } 241 BUG_ON(cpu_online(cpu)); 242 243 /* Wait for it to sleep (leaving idle task). */ 244 while (!idle_cpu(cpu)) 245 yield(); 246 247 /* This actually kills the CPU. */ 248 __cpu_die(cpu); 249 250 /* CPU is completely dead: tell everyone. Too late to complain. */ 251 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, 252 hcpu) == NOTIFY_BAD) 253 BUG(); 254 255 check_for_tasks(cpu); 256 257 out_allowed: 258 set_cpus_allowed_ptr(current, old_allowed); 259 out_release: 260 cpu_hotplug_done(); 261 if (!err) { 262 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, 263 hcpu) == NOTIFY_BAD) 264 BUG(); 265 } 266 free_cpumask_var(old_allowed); 267 return err; 268 } 269 270 int __ref cpu_down(unsigned int cpu) 271 { 272 int err = 0; 273 274 cpu_maps_update_begin(); 275 276 if (cpu_hotplug_disabled) { 277 err = -EBUSY; 278 goto out; 279 } 280 281 cpu_clear(cpu, cpu_active_map); 282 283 /* 284 * Make sure the all cpus did the reschedule and are not 285 * using stale version of the cpu_active_mask. 286 * This is not strictly necessary becuase stop_machine() 287 * that we run down the line already provides the required 288 * synchronization. But it's really a side effect and we do not 289 * want to depend on the innards of the stop_machine here. 290 */ 291 synchronize_sched(); 292 293 err = _cpu_down(cpu, 0); 294 295 if (cpu_online(cpu)) 296 cpu_set(cpu, cpu_active_map); 297 298 out: 299 cpu_maps_update_done(); 300 return err; 301 } 302 EXPORT_SYMBOL(cpu_down); 303 #endif /*CONFIG_HOTPLUG_CPU*/ 304 305 /* Requires cpu_add_remove_lock to be held */ 306 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) 307 { 308 int ret, nr_calls = 0; 309 void *hcpu = (void *)(long)cpu; 310 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 311 312 if (cpu_online(cpu) || !cpu_present(cpu)) 313 return -EINVAL; 314 315 cpu_hotplug_begin(); 316 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, 317 -1, &nr_calls); 318 if (ret == NOTIFY_BAD) { 319 nr_calls--; 320 printk("%s: attempt to bring up CPU %u failed\n", 321 __func__, cpu); 322 ret = -EINVAL; 323 goto out_notify; 324 } 325 326 /* Arch-specific enabling code. */ 327 ret = __cpu_up(cpu); 328 if (ret != 0) 329 goto out_notify; 330 BUG_ON(!cpu_online(cpu)); 331 332 cpu_set(cpu, cpu_active_map); 333 334 /* Now call notifier in preparation. */ 335 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 336 337 out_notify: 338 if (ret != 0) 339 __raw_notifier_call_chain(&cpu_chain, 340 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 341 cpu_hotplug_done(); 342 343 return ret; 344 } 345 346 int __cpuinit cpu_up(unsigned int cpu) 347 { 348 int err = 0; 349 if (!cpu_possible(cpu)) { 350 printk(KERN_ERR "can't online cpu %d because it is not " 351 "configured as may-hotadd at boot time\n", cpu); 352 #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) 353 printk(KERN_ERR "please check additional_cpus= boot " 354 "parameter\n"); 355 #endif 356 return -EINVAL; 357 } 358 359 cpu_maps_update_begin(); 360 361 if (cpu_hotplug_disabled) { 362 err = -EBUSY; 363 goto out; 364 } 365 366 err = _cpu_up(cpu, 0); 367 368 out: 369 cpu_maps_update_done(); 370 return err; 371 } 372 373 #ifdef CONFIG_PM_SLEEP_SMP 374 static cpumask_var_t frozen_cpus; 375 376 int disable_nonboot_cpus(void) 377 { 378 int cpu, first_cpu, error = 0; 379 380 cpu_maps_update_begin(); 381 first_cpu = cpumask_first(cpu_online_mask); 382 /* We take down all of the non-boot CPUs in one shot to avoid races 383 * with the userspace trying to use the CPU hotplug at the same time 384 */ 385 cpumask_clear(frozen_cpus); 386 printk("Disabling non-boot CPUs ...\n"); 387 for_each_online_cpu(cpu) { 388 if (cpu == first_cpu) 389 continue; 390 error = _cpu_down(cpu, 1); 391 if (!error) { 392 cpumask_set_cpu(cpu, frozen_cpus); 393 printk("CPU%d is down\n", cpu); 394 } else { 395 printk(KERN_ERR "Error taking CPU%d down: %d\n", 396 cpu, error); 397 break; 398 } 399 } 400 if (!error) { 401 BUG_ON(num_online_cpus() > 1); 402 /* Make sure the CPUs won't be enabled by someone else */ 403 cpu_hotplug_disabled = 1; 404 } else { 405 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 406 } 407 cpu_maps_update_done(); 408 return error; 409 } 410 411 void __ref enable_nonboot_cpus(void) 412 { 413 int cpu, error; 414 415 /* Allow everyone to use the CPU hotplug again */ 416 cpu_maps_update_begin(); 417 cpu_hotplug_disabled = 0; 418 if (cpumask_empty(frozen_cpus)) 419 goto out; 420 421 printk("Enabling non-boot CPUs ...\n"); 422 for_each_cpu(cpu, frozen_cpus) { 423 error = _cpu_up(cpu, 1); 424 if (!error) { 425 printk("CPU%d is up\n", cpu); 426 continue; 427 } 428 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 429 } 430 cpumask_clear(frozen_cpus); 431 out: 432 cpu_maps_update_done(); 433 } 434 435 static int alloc_frozen_cpus(void) 436 { 437 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 438 return -ENOMEM; 439 return 0; 440 } 441 core_initcall(alloc_frozen_cpus); 442 #endif /* CONFIG_PM_SLEEP_SMP */ 443 444 /** 445 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 446 * @cpu: cpu that just started 447 * 448 * This function calls the cpu_chain notifiers with CPU_STARTING. 449 * It must be called by the arch code on the new cpu, before the new cpu 450 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 451 */ 452 void __cpuinit notify_cpu_starting(unsigned int cpu) 453 { 454 unsigned long val = CPU_STARTING; 455 456 #ifdef CONFIG_PM_SLEEP_SMP 457 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 458 val = CPU_STARTING_FROZEN; 459 #endif /* CONFIG_PM_SLEEP_SMP */ 460 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 461 } 462 463 #endif /* CONFIG_SMP */ 464 465 /* 466 * cpu_bit_bitmap[] is a special, "compressed" data structure that 467 * represents all NR_CPUS bits binary values of 1<<nr. 468 * 469 * It is used by cpumask_of() to get a constant address to a CPU 470 * mask value that has a single bit set only. 471 */ 472 473 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 474 #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) 475 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 476 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 477 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 478 479 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 480 481 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 482 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 483 #if BITS_PER_LONG > 32 484 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 485 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 486 #endif 487 }; 488 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 489 490 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 491 EXPORT_SYMBOL(cpu_all_bits); 492 493 #ifdef CONFIG_INIT_ALL_POSSIBLE 494 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 495 = CPU_BITS_ALL; 496 #else 497 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 498 #endif 499 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 500 EXPORT_SYMBOL(cpu_possible_mask); 501 502 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 503 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 504 EXPORT_SYMBOL(cpu_online_mask); 505 506 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 507 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 508 EXPORT_SYMBOL(cpu_present_mask); 509 510 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 511 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 512 EXPORT_SYMBOL(cpu_active_mask); 513 514 void set_cpu_possible(unsigned int cpu, bool possible) 515 { 516 if (possible) 517 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 518 else 519 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 520 } 521 522 void set_cpu_present(unsigned int cpu, bool present) 523 { 524 if (present) 525 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 526 else 527 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 528 } 529 530 void set_cpu_online(unsigned int cpu, bool online) 531 { 532 if (online) 533 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 534 else 535 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 536 } 537 538 void set_cpu_active(unsigned int cpu, bool active) 539 { 540 if (active) 541 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 542 else 543 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 544 } 545 546 void init_cpu_present(const struct cpumask *src) 547 { 548 cpumask_copy(to_cpumask(cpu_present_bits), src); 549 } 550 551 void init_cpu_possible(const struct cpumask *src) 552 { 553 cpumask_copy(to_cpumask(cpu_possible_bits), src); 554 } 555 556 void init_cpu_online(const struct cpumask *src) 557 { 558 cpumask_copy(to_cpumask(cpu_online_bits), src); 559 } 560