1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/reboot.c 4 * 5 * Copyright (C) 2013 Linus Torvalds 6 */ 7 8 #define pr_fmt(fmt) "reboot: " fmt 9 10 #include <linux/atomic.h> 11 #include <linux/ctype.h> 12 #include <linux/export.h> 13 #include <linux/kexec.h> 14 #include <linux/kmod.h> 15 #include <linux/kmsg_dump.h> 16 #include <linux/reboot.h> 17 #include <linux/suspend.h> 18 #include <linux/syscalls.h> 19 #include <linux/syscore_ops.h> 20 #include <linux/uaccess.h> 21 22 /* 23 * this indicates whether you can reboot with ctrl-alt-del: the default is yes 24 */ 25 26 int C_A_D = 1; 27 struct pid *cad_pid; 28 EXPORT_SYMBOL(cad_pid); 29 30 #if defined(CONFIG_ARM) 31 #define DEFAULT_REBOOT_MODE = REBOOT_HARD 32 #else 33 #define DEFAULT_REBOOT_MODE 34 #endif 35 enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; 36 EXPORT_SYMBOL_GPL(reboot_mode); 37 enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED; 38 39 /* 40 * This variable is used privately to keep track of whether or not 41 * reboot_type is still set to its default value (i.e., reboot= hasn't 42 * been set on the command line). This is needed so that we can 43 * suppress DMI scanning for reboot quirks. Without it, it's 44 * impossible to override a faulty reboot quirk without recompiling. 45 */ 46 int reboot_default = 1; 47 int reboot_cpu; 48 enum reboot_type reboot_type = BOOT_ACPI; 49 int reboot_force; 50 51 struct sys_off_handler { 52 struct notifier_block nb; 53 int (*sys_off_cb)(struct sys_off_data *data); 54 void *cb_data; 55 enum sys_off_mode mode; 56 bool blocking; 57 void *list; 58 }; 59 60 /* 61 * Temporary stub that prevents linkage failure while we're in process 62 * of removing all uses of legacy pm_power_off() around the kernel. 63 */ 64 void __weak (*pm_power_off)(void); 65 66 /** 67 * emergency_restart - reboot the system 68 * 69 * Without shutting down any hardware or taking any locks 70 * reboot the system. This is called when we know we are in 71 * trouble so this is our best effort to reboot. This is 72 * safe to call in interrupt context. 73 */ 74 void emergency_restart(void) 75 { 76 kmsg_dump(KMSG_DUMP_EMERG); 77 machine_emergency_restart(); 78 } 79 EXPORT_SYMBOL_GPL(emergency_restart); 80 81 void kernel_restart_prepare(char *cmd) 82 { 83 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 84 system_state = SYSTEM_RESTART; 85 usermodehelper_disable(); 86 device_shutdown(); 87 } 88 89 /** 90 * register_reboot_notifier - Register function to be called at reboot time 91 * @nb: Info about notifier function to be called 92 * 93 * Registers a function with the list of functions 94 * to be called at reboot time. 95 * 96 * Currently always returns zero, as blocking_notifier_chain_register() 97 * always returns zero. 98 */ 99 int register_reboot_notifier(struct notifier_block *nb) 100 { 101 return blocking_notifier_chain_register(&reboot_notifier_list, nb); 102 } 103 EXPORT_SYMBOL(register_reboot_notifier); 104 105 /** 106 * unregister_reboot_notifier - Unregister previously registered reboot notifier 107 * @nb: Hook to be unregistered 108 * 109 * Unregisters a previously registered reboot 110 * notifier function. 111 * 112 * Returns zero on success, or %-ENOENT on failure. 113 */ 114 int unregister_reboot_notifier(struct notifier_block *nb) 115 { 116 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); 117 } 118 EXPORT_SYMBOL(unregister_reboot_notifier); 119 120 static void devm_unregister_reboot_notifier(struct device *dev, void *res) 121 { 122 WARN_ON(unregister_reboot_notifier(*(struct notifier_block **)res)); 123 } 124 125 int devm_register_reboot_notifier(struct device *dev, struct notifier_block *nb) 126 { 127 struct notifier_block **rcnb; 128 int ret; 129 130 rcnb = devres_alloc(devm_unregister_reboot_notifier, 131 sizeof(*rcnb), GFP_KERNEL); 132 if (!rcnb) 133 return -ENOMEM; 134 135 ret = register_reboot_notifier(nb); 136 if (!ret) { 137 *rcnb = nb; 138 devres_add(dev, rcnb); 139 } else { 140 devres_free(rcnb); 141 } 142 143 return ret; 144 } 145 EXPORT_SYMBOL(devm_register_reboot_notifier); 146 147 /* 148 * Notifier list for kernel code which wants to be called 149 * to restart the system. 150 */ 151 static ATOMIC_NOTIFIER_HEAD(restart_handler_list); 152 153 /** 154 * register_restart_handler - Register function to be called to reset 155 * the system 156 * @nb: Info about handler function to be called 157 * @nb->priority: Handler priority. Handlers should follow the 158 * following guidelines for setting priorities. 159 * 0: Restart handler of last resort, 160 * with limited restart capabilities 161 * 128: Default restart handler; use if no other 162 * restart handler is expected to be available, 163 * and/or if restart functionality is 164 * sufficient to restart the entire system 165 * 255: Highest priority restart handler, will 166 * preempt all other restart handlers 167 * 168 * Registers a function with code to be called to restart the 169 * system. 170 * 171 * Registered functions will be called from machine_restart as last 172 * step of the restart sequence (if the architecture specific 173 * machine_restart function calls do_kernel_restart - see below 174 * for details). 175 * Registered functions are expected to restart the system immediately. 176 * If more than one function is registered, the restart handler priority 177 * selects which function will be called first. 178 * 179 * Restart handlers are expected to be registered from non-architecture 180 * code, typically from drivers. A typical use case would be a system 181 * where restart functionality is provided through a watchdog. Multiple 182 * restart handlers may exist; for example, one restart handler might 183 * restart the entire system, while another only restarts the CPU. 184 * In such cases, the restart handler which only restarts part of the 185 * hardware is expected to register with low priority to ensure that 186 * it only runs if no other means to restart the system is available. 187 * 188 * Currently always returns zero, as atomic_notifier_chain_register() 189 * always returns zero. 190 */ 191 int register_restart_handler(struct notifier_block *nb) 192 { 193 return atomic_notifier_chain_register(&restart_handler_list, nb); 194 } 195 EXPORT_SYMBOL(register_restart_handler); 196 197 /** 198 * unregister_restart_handler - Unregister previously registered 199 * restart handler 200 * @nb: Hook to be unregistered 201 * 202 * Unregisters a previously registered restart handler function. 203 * 204 * Returns zero on success, or %-ENOENT on failure. 205 */ 206 int unregister_restart_handler(struct notifier_block *nb) 207 { 208 return atomic_notifier_chain_unregister(&restart_handler_list, nb); 209 } 210 EXPORT_SYMBOL(unregister_restart_handler); 211 212 /** 213 * do_kernel_restart - Execute kernel restart handler call chain 214 * 215 * Calls functions registered with register_restart_handler. 216 * 217 * Expected to be called from machine_restart as last step of the restart 218 * sequence. 219 * 220 * Restarts the system immediately if a restart handler function has been 221 * registered. Otherwise does nothing. 222 */ 223 void do_kernel_restart(char *cmd) 224 { 225 atomic_notifier_call_chain(&restart_handler_list, reboot_mode, cmd); 226 } 227 228 void migrate_to_reboot_cpu(void) 229 { 230 /* The boot cpu is always logical cpu 0 */ 231 int cpu = reboot_cpu; 232 233 cpu_hotplug_disable(); 234 235 /* Make certain the cpu I'm about to reboot on is online */ 236 if (!cpu_online(cpu)) 237 cpu = cpumask_first(cpu_online_mask); 238 239 /* Prevent races with other tasks migrating this task */ 240 current->flags |= PF_NO_SETAFFINITY; 241 242 /* Make certain I only run on the appropriate processor */ 243 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 244 } 245 246 /** 247 * kernel_restart - reboot the system 248 * @cmd: pointer to buffer containing command to execute for restart 249 * or %NULL 250 * 251 * Shutdown everything and perform a clean reboot. 252 * This is not safe to call in interrupt context. 253 */ 254 void kernel_restart(char *cmd) 255 { 256 kernel_restart_prepare(cmd); 257 migrate_to_reboot_cpu(); 258 syscore_shutdown(); 259 if (!cmd) 260 pr_emerg("Restarting system\n"); 261 else 262 pr_emerg("Restarting system with command '%s'\n", cmd); 263 kmsg_dump(KMSG_DUMP_SHUTDOWN); 264 machine_restart(cmd); 265 } 266 EXPORT_SYMBOL_GPL(kernel_restart); 267 268 static void kernel_shutdown_prepare(enum system_states state) 269 { 270 blocking_notifier_call_chain(&reboot_notifier_list, 271 (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL); 272 system_state = state; 273 usermodehelper_disable(); 274 device_shutdown(); 275 } 276 /** 277 * kernel_halt - halt the system 278 * 279 * Shutdown everything and perform a clean system halt. 280 */ 281 void kernel_halt(void) 282 { 283 kernel_shutdown_prepare(SYSTEM_HALT); 284 migrate_to_reboot_cpu(); 285 syscore_shutdown(); 286 pr_emerg("System halted\n"); 287 kmsg_dump(KMSG_DUMP_SHUTDOWN); 288 machine_halt(); 289 } 290 EXPORT_SYMBOL_GPL(kernel_halt); 291 292 /* 293 * Notifier list for kernel code which wants to be called 294 * to prepare system for power off. 295 */ 296 static BLOCKING_NOTIFIER_HEAD(power_off_prep_handler_list); 297 298 /* 299 * Notifier list for kernel code which wants to be called 300 * to power off system. 301 */ 302 static ATOMIC_NOTIFIER_HEAD(power_off_handler_list); 303 304 static int sys_off_notify(struct notifier_block *nb, 305 unsigned long mode, void *cmd) 306 { 307 struct sys_off_handler *handler; 308 struct sys_off_data data = {}; 309 310 handler = container_of(nb, struct sys_off_handler, nb); 311 data.cb_data = handler->cb_data; 312 data.mode = mode; 313 data.cmd = cmd; 314 315 return handler->sys_off_cb(&data); 316 } 317 318 /** 319 * register_sys_off_handler - Register sys-off handler 320 * @mode: Sys-off mode 321 * @priority: Handler priority 322 * @callback: Callback function 323 * @cb_data: Callback argument 324 * 325 * Registers system power-off or restart handler that will be invoked 326 * at the step corresponding to the given sys-off mode. Handler's callback 327 * should return NOTIFY_DONE to permit execution of the next handler in 328 * the call chain or NOTIFY_STOP to break the chain (in error case for 329 * example). 330 * 331 * Multiple handlers can be registered at the default priority level. 332 * 333 * Only one handler can be registered at the non-default priority level, 334 * otherwise ERR_PTR(-EBUSY) is returned. 335 * 336 * Returns a new instance of struct sys_off_handler on success, or 337 * an ERR_PTR()-encoded error code otherwise. 338 */ 339 struct sys_off_handler * 340 register_sys_off_handler(enum sys_off_mode mode, 341 int priority, 342 int (*callback)(struct sys_off_data *data), 343 void *cb_data) 344 { 345 struct sys_off_handler *handler; 346 int err; 347 348 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 349 if (!handler) 350 return ERR_PTR(-ENOMEM); 351 352 switch (mode) { 353 case SYS_OFF_MODE_POWER_OFF_PREPARE: 354 handler->list = &power_off_prep_handler_list; 355 handler->blocking = true; 356 break; 357 358 case SYS_OFF_MODE_POWER_OFF: 359 handler->list = &power_off_handler_list; 360 break; 361 362 case SYS_OFF_MODE_RESTART: 363 handler->list = &restart_handler_list; 364 break; 365 366 default: 367 kfree(handler); 368 return ERR_PTR(-EINVAL); 369 } 370 371 handler->nb.notifier_call = sys_off_notify; 372 handler->nb.priority = priority; 373 handler->sys_off_cb = callback; 374 handler->cb_data = cb_data; 375 handler->mode = mode; 376 377 if (handler->blocking) { 378 if (priority == SYS_OFF_PRIO_DEFAULT) 379 err = blocking_notifier_chain_register(handler->list, 380 &handler->nb); 381 else 382 err = blocking_notifier_chain_register_unique_prio(handler->list, 383 &handler->nb); 384 } else { 385 if (priority == SYS_OFF_PRIO_DEFAULT) 386 err = atomic_notifier_chain_register(handler->list, 387 &handler->nb); 388 else 389 err = atomic_notifier_chain_register_unique_prio(handler->list, 390 &handler->nb); 391 } 392 393 if (err) { 394 kfree(handler); 395 return ERR_PTR(err); 396 } 397 398 return handler; 399 } 400 EXPORT_SYMBOL_GPL(register_sys_off_handler); 401 402 /** 403 * unregister_sys_off_handler - Unregister sys-off handler 404 * @handler: Sys-off handler 405 * 406 * Unregisters given sys-off handler. 407 */ 408 void unregister_sys_off_handler(struct sys_off_handler *handler) 409 { 410 int err; 411 412 if (!handler) 413 return; 414 415 if (handler->blocking) 416 err = blocking_notifier_chain_unregister(handler->list, 417 &handler->nb); 418 else 419 err = atomic_notifier_chain_unregister(handler->list, 420 &handler->nb); 421 422 /* sanity check, shall never happen */ 423 WARN_ON(err); 424 425 kfree(handler); 426 } 427 EXPORT_SYMBOL_GPL(unregister_sys_off_handler); 428 429 static void devm_unregister_sys_off_handler(void *data) 430 { 431 struct sys_off_handler *handler = data; 432 433 unregister_sys_off_handler(handler); 434 } 435 436 /** 437 * devm_register_sys_off_handler - Register sys-off handler 438 * @dev: Device that registers handler 439 * @mode: Sys-off mode 440 * @priority: Handler priority 441 * @callback: Callback function 442 * @cb_data: Callback argument 443 * 444 * Registers resource-managed sys-off handler. 445 * 446 * Returns zero on success, or error code on failure. 447 */ 448 int devm_register_sys_off_handler(struct device *dev, 449 enum sys_off_mode mode, 450 int priority, 451 int (*callback)(struct sys_off_data *data), 452 void *cb_data) 453 { 454 struct sys_off_handler *handler; 455 456 handler = register_sys_off_handler(mode, priority, callback, cb_data); 457 if (IS_ERR(handler)) 458 return PTR_ERR(handler); 459 460 return devm_add_action_or_reset(dev, devm_unregister_sys_off_handler, 461 handler); 462 } 463 EXPORT_SYMBOL_GPL(devm_register_sys_off_handler); 464 465 /** 466 * devm_register_power_off_handler - Register power-off handler 467 * @dev: Device that registers callback 468 * @callback: Callback function 469 * @cb_data: Callback's argument 470 * 471 * Registers resource-managed sys-off handler with a default priority 472 * and using power-off mode. 473 * 474 * Returns zero on success, or error code on failure. 475 */ 476 int devm_register_power_off_handler(struct device *dev, 477 int (*callback)(struct sys_off_data *data), 478 void *cb_data) 479 { 480 return devm_register_sys_off_handler(dev, 481 SYS_OFF_MODE_POWER_OFF, 482 SYS_OFF_PRIO_DEFAULT, 483 callback, cb_data); 484 } 485 EXPORT_SYMBOL_GPL(devm_register_power_off_handler); 486 487 /** 488 * devm_register_restart_handler - Register restart handler 489 * @dev: Device that registers callback 490 * @callback: Callback function 491 * @cb_data: Callback's argument 492 * 493 * Registers resource-managed sys-off handler with a default priority 494 * and using restart mode. 495 * 496 * Returns zero on success, or error code on failure. 497 */ 498 int devm_register_restart_handler(struct device *dev, 499 int (*callback)(struct sys_off_data *data), 500 void *cb_data) 501 { 502 return devm_register_sys_off_handler(dev, 503 SYS_OFF_MODE_RESTART, 504 SYS_OFF_PRIO_DEFAULT, 505 callback, cb_data); 506 } 507 EXPORT_SYMBOL_GPL(devm_register_restart_handler); 508 509 static struct sys_off_handler *platform_power_off_handler; 510 511 static int platform_power_off_notify(struct sys_off_data *data) 512 { 513 void (*platform_power_power_off_cb)(void) = data->cb_data; 514 515 platform_power_power_off_cb(); 516 517 return NOTIFY_DONE; 518 } 519 520 /** 521 * register_platform_power_off - Register platform-level power-off callback 522 * @power_off: Power-off callback 523 * 524 * Registers power-off callback that will be called as last step 525 * of the power-off sequence. This callback is expected to be invoked 526 * for the last resort. Only one platform power-off callback is allowed 527 * to be registered at a time. 528 * 529 * Returns zero on success, or error code on failure. 530 */ 531 int register_platform_power_off(void (*power_off)(void)) 532 { 533 struct sys_off_handler *handler; 534 535 handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, 536 SYS_OFF_PRIO_PLATFORM, 537 platform_power_off_notify, 538 power_off); 539 if (IS_ERR(handler)) 540 return PTR_ERR(handler); 541 542 platform_power_off_handler = handler; 543 544 return 0; 545 } 546 EXPORT_SYMBOL_GPL(register_platform_power_off); 547 548 /** 549 * unregister_platform_power_off - Unregister platform-level power-off callback 550 * @power_off: Power-off callback 551 * 552 * Unregisters previously registered platform power-off callback. 553 */ 554 void unregister_platform_power_off(void (*power_off)(void)) 555 { 556 if (platform_power_off_handler && 557 platform_power_off_handler->cb_data == power_off) { 558 unregister_sys_off_handler(platform_power_off_handler); 559 platform_power_off_handler = NULL; 560 } 561 } 562 EXPORT_SYMBOL_GPL(unregister_platform_power_off); 563 564 static int legacy_pm_power_off(struct sys_off_data *data) 565 { 566 if (pm_power_off) 567 pm_power_off(); 568 569 return NOTIFY_DONE; 570 } 571 572 static void do_kernel_power_off_prepare(void) 573 { 574 blocking_notifier_call_chain(&power_off_prep_handler_list, 0, NULL); 575 } 576 577 /** 578 * do_kernel_power_off - Execute kernel power-off handler call chain 579 * 580 * Expected to be called as last step of the power-off sequence. 581 * 582 * Powers off the system immediately if a power-off handler function has 583 * been registered. Otherwise does nothing. 584 */ 585 void do_kernel_power_off(void) 586 { 587 atomic_notifier_call_chain(&power_off_handler_list, 0, NULL); 588 } 589 590 /** 591 * kernel_can_power_off - check whether system can be powered off 592 * 593 * Returns true if power-off handler is registered and system can be 594 * powered off, false otherwise. 595 */ 596 bool kernel_can_power_off(void) 597 { 598 return !atomic_notifier_call_chain_is_empty(&power_off_handler_list); 599 } 600 EXPORT_SYMBOL_GPL(kernel_can_power_off); 601 602 /** 603 * kernel_power_off - power_off the system 604 * 605 * Shutdown everything and perform a clean system power_off. 606 */ 607 void kernel_power_off(void) 608 { 609 kernel_shutdown_prepare(SYSTEM_POWER_OFF); 610 do_kernel_power_off_prepare(); 611 migrate_to_reboot_cpu(); 612 syscore_shutdown(); 613 pr_emerg("Power down\n"); 614 kmsg_dump(KMSG_DUMP_SHUTDOWN); 615 machine_power_off(); 616 } 617 EXPORT_SYMBOL_GPL(kernel_power_off); 618 619 DEFINE_MUTEX(system_transition_mutex); 620 621 /* 622 * Reboot system call: for obvious reasons only root may call it, 623 * and even root needs to set up some magic numbers in the registers 624 * so that some mistake won't make this reboot the whole machine. 625 * You can also set the meaning of the ctrl-alt-del-key here. 626 * 627 * reboot doesn't sync: do that yourself before calling this. 628 */ 629 SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, 630 void __user *, arg) 631 { 632 struct pid_namespace *pid_ns = task_active_pid_ns(current); 633 struct sys_off_handler *sys_off = NULL; 634 char buffer[256]; 635 int ret = 0; 636 637 /* We only trust the superuser with rebooting the system. */ 638 if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT)) 639 return -EPERM; 640 641 /* For safety, we require "magic" arguments. */ 642 if (magic1 != LINUX_REBOOT_MAGIC1 || 643 (magic2 != LINUX_REBOOT_MAGIC2 && 644 magic2 != LINUX_REBOOT_MAGIC2A && 645 magic2 != LINUX_REBOOT_MAGIC2B && 646 magic2 != LINUX_REBOOT_MAGIC2C)) 647 return -EINVAL; 648 649 /* 650 * If pid namespaces are enabled and the current task is in a child 651 * pid_namespace, the command is handled by reboot_pid_ns() which will 652 * call do_exit(). 653 */ 654 ret = reboot_pid_ns(pid_ns, cmd); 655 if (ret) 656 return ret; 657 658 /* 659 * Register sys-off handlers for legacy PM callback. This allows 660 * legacy PM callbacks temporary co-exist with the new sys-off API. 661 * 662 * TODO: Remove legacy handlers once all legacy PM users will be 663 * switched to the sys-off based APIs. 664 */ 665 if (pm_power_off) { 666 sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, 667 SYS_OFF_PRIO_DEFAULT, 668 legacy_pm_power_off, NULL); 669 if (IS_ERR(sys_off)) 670 return PTR_ERR(sys_off); 671 } 672 673 /* Instead of trying to make the power_off code look like 674 * halt when pm_power_off is not set do it the easy way. 675 */ 676 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !kernel_can_power_off()) 677 cmd = LINUX_REBOOT_CMD_HALT; 678 679 mutex_lock(&system_transition_mutex); 680 switch (cmd) { 681 case LINUX_REBOOT_CMD_RESTART: 682 kernel_restart(NULL); 683 break; 684 685 case LINUX_REBOOT_CMD_CAD_ON: 686 C_A_D = 1; 687 break; 688 689 case LINUX_REBOOT_CMD_CAD_OFF: 690 C_A_D = 0; 691 break; 692 693 case LINUX_REBOOT_CMD_HALT: 694 kernel_halt(); 695 do_exit(0); 696 697 case LINUX_REBOOT_CMD_POWER_OFF: 698 kernel_power_off(); 699 do_exit(0); 700 break; 701 702 case LINUX_REBOOT_CMD_RESTART2: 703 ret = strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1); 704 if (ret < 0) { 705 ret = -EFAULT; 706 break; 707 } 708 buffer[sizeof(buffer) - 1] = '\0'; 709 710 kernel_restart(buffer); 711 break; 712 713 #ifdef CONFIG_KEXEC_CORE 714 case LINUX_REBOOT_CMD_KEXEC: 715 ret = kernel_kexec(); 716 break; 717 #endif 718 719 #ifdef CONFIG_HIBERNATION 720 case LINUX_REBOOT_CMD_SW_SUSPEND: 721 ret = hibernate(); 722 break; 723 #endif 724 725 default: 726 ret = -EINVAL; 727 break; 728 } 729 mutex_unlock(&system_transition_mutex); 730 unregister_sys_off_handler(sys_off); 731 return ret; 732 } 733 734 static void deferred_cad(struct work_struct *dummy) 735 { 736 kernel_restart(NULL); 737 } 738 739 /* 740 * This function gets called by ctrl-alt-del - ie the keyboard interrupt. 741 * As it's called within an interrupt, it may NOT sync: the only choice 742 * is whether to reboot at once, or just ignore the ctrl-alt-del. 743 */ 744 void ctrl_alt_del(void) 745 { 746 static DECLARE_WORK(cad_work, deferred_cad); 747 748 if (C_A_D) 749 schedule_work(&cad_work); 750 else 751 kill_cad_pid(SIGINT, 1); 752 } 753 754 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; 755 static const char reboot_cmd[] = "/sbin/reboot"; 756 757 static int run_cmd(const char *cmd) 758 { 759 char **argv; 760 static char *envp[] = { 761 "HOME=/", 762 "PATH=/sbin:/bin:/usr/sbin:/usr/bin", 763 NULL 764 }; 765 int ret; 766 argv = argv_split(GFP_KERNEL, cmd, NULL); 767 if (argv) { 768 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 769 argv_free(argv); 770 } else { 771 ret = -ENOMEM; 772 } 773 774 return ret; 775 } 776 777 static int __orderly_reboot(void) 778 { 779 int ret; 780 781 ret = run_cmd(reboot_cmd); 782 783 if (ret) { 784 pr_warn("Failed to start orderly reboot: forcing the issue\n"); 785 emergency_sync(); 786 kernel_restart(NULL); 787 } 788 789 return ret; 790 } 791 792 static int __orderly_poweroff(bool force) 793 { 794 int ret; 795 796 ret = run_cmd(poweroff_cmd); 797 798 if (ret && force) { 799 pr_warn("Failed to start orderly shutdown: forcing the issue\n"); 800 801 /* 802 * I guess this should try to kick off some daemon to sync and 803 * poweroff asap. Or not even bother syncing if we're doing an 804 * emergency shutdown? 805 */ 806 emergency_sync(); 807 kernel_power_off(); 808 } 809 810 return ret; 811 } 812 813 static bool poweroff_force; 814 815 static void poweroff_work_func(struct work_struct *work) 816 { 817 __orderly_poweroff(poweroff_force); 818 } 819 820 static DECLARE_WORK(poweroff_work, poweroff_work_func); 821 822 /** 823 * orderly_poweroff - Trigger an orderly system poweroff 824 * @force: force poweroff if command execution fails 825 * 826 * This may be called from any context to trigger a system shutdown. 827 * If the orderly shutdown fails, it will force an immediate shutdown. 828 */ 829 void orderly_poweroff(bool force) 830 { 831 if (force) /* do not override the pending "true" */ 832 poweroff_force = true; 833 schedule_work(&poweroff_work); 834 } 835 EXPORT_SYMBOL_GPL(orderly_poweroff); 836 837 static void reboot_work_func(struct work_struct *work) 838 { 839 __orderly_reboot(); 840 } 841 842 static DECLARE_WORK(reboot_work, reboot_work_func); 843 844 /** 845 * orderly_reboot - Trigger an orderly system reboot 846 * 847 * This may be called from any context to trigger a system reboot. 848 * If the orderly reboot fails, it will force an immediate reboot. 849 */ 850 void orderly_reboot(void) 851 { 852 schedule_work(&reboot_work); 853 } 854 EXPORT_SYMBOL_GPL(orderly_reboot); 855 856 /** 857 * hw_failure_emergency_poweroff_func - emergency poweroff work after a known delay 858 * @work: work_struct associated with the emergency poweroff function 859 * 860 * This function is called in very critical situations to force 861 * a kernel poweroff after a configurable timeout value. 862 */ 863 static void hw_failure_emergency_poweroff_func(struct work_struct *work) 864 { 865 /* 866 * We have reached here after the emergency shutdown waiting period has 867 * expired. This means orderly_poweroff has not been able to shut off 868 * the system for some reason. 869 * 870 * Try to shut down the system immediately using kernel_power_off 871 * if populated 872 */ 873 pr_emerg("Hardware protection timed-out. Trying forced poweroff\n"); 874 kernel_power_off(); 875 876 /* 877 * Worst of the worst case trigger emergency restart 878 */ 879 pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n"); 880 emergency_restart(); 881 } 882 883 static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work, 884 hw_failure_emergency_poweroff_func); 885 886 /** 887 * hw_failure_emergency_poweroff - Trigger an emergency system poweroff 888 * 889 * This may be called from any critical situation to trigger a system shutdown 890 * after a given period of time. If time is negative this is not scheduled. 891 */ 892 static void hw_failure_emergency_poweroff(int poweroff_delay_ms) 893 { 894 if (poweroff_delay_ms <= 0) 895 return; 896 schedule_delayed_work(&hw_failure_emergency_poweroff_work, 897 msecs_to_jiffies(poweroff_delay_ms)); 898 } 899 900 /** 901 * hw_protection_shutdown - Trigger an emergency system poweroff 902 * 903 * @reason: Reason of emergency shutdown to be printed. 904 * @ms_until_forced: Time to wait for orderly shutdown before tiggering a 905 * forced shudown. Negative value disables the forced 906 * shutdown. 907 * 908 * Initiate an emergency system shutdown in order to protect hardware from 909 * further damage. Usage examples include a thermal protection or a voltage or 910 * current regulator failures. 911 * NOTE: The request is ignored if protection shutdown is already pending even 912 * if the previous request has given a large timeout for forced shutdown. 913 * Can be called from any context. 914 */ 915 void hw_protection_shutdown(const char *reason, int ms_until_forced) 916 { 917 static atomic_t allow_proceed = ATOMIC_INIT(1); 918 919 pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason); 920 921 /* Shutdown should be initiated only once. */ 922 if (!atomic_dec_and_test(&allow_proceed)) 923 return; 924 925 /* 926 * Queue a backup emergency shutdown in the event of 927 * orderly_poweroff failure 928 */ 929 hw_failure_emergency_poweroff(ms_until_forced); 930 orderly_poweroff(true); 931 } 932 EXPORT_SYMBOL_GPL(hw_protection_shutdown); 933 934 static int __init reboot_setup(char *str) 935 { 936 for (;;) { 937 enum reboot_mode *mode; 938 939 /* 940 * Having anything passed on the command line via 941 * reboot= will cause us to disable DMI checking 942 * below. 943 */ 944 reboot_default = 0; 945 946 if (!strncmp(str, "panic_", 6)) { 947 mode = &panic_reboot_mode; 948 str += 6; 949 } else { 950 mode = &reboot_mode; 951 } 952 953 switch (*str) { 954 case 'w': 955 *mode = REBOOT_WARM; 956 break; 957 958 case 'c': 959 *mode = REBOOT_COLD; 960 break; 961 962 case 'h': 963 *mode = REBOOT_HARD; 964 break; 965 966 case 's': 967 /* 968 * reboot_cpu is s[mp]#### with #### being the processor 969 * to be used for rebooting. Skip 's' or 'smp' prefix. 970 */ 971 str += str[1] == 'm' && str[2] == 'p' ? 3 : 1; 972 973 if (isdigit(str[0])) { 974 int cpu = simple_strtoul(str, NULL, 0); 975 976 if (cpu >= num_possible_cpus()) { 977 pr_err("Ignoring the CPU number in reboot= option. " 978 "CPU %d exceeds possible cpu number %d\n", 979 cpu, num_possible_cpus()); 980 break; 981 } 982 reboot_cpu = cpu; 983 } else 984 *mode = REBOOT_SOFT; 985 break; 986 987 case 'g': 988 *mode = REBOOT_GPIO; 989 break; 990 991 case 'b': 992 case 'a': 993 case 'k': 994 case 't': 995 case 'e': 996 case 'p': 997 reboot_type = *str; 998 break; 999 1000 case 'f': 1001 reboot_force = 1; 1002 break; 1003 } 1004 1005 str = strchr(str, ','); 1006 if (str) 1007 str++; 1008 else 1009 break; 1010 } 1011 return 1; 1012 } 1013 __setup("reboot=", reboot_setup); 1014 1015 #ifdef CONFIG_SYSFS 1016 1017 #define REBOOT_COLD_STR "cold" 1018 #define REBOOT_WARM_STR "warm" 1019 #define REBOOT_HARD_STR "hard" 1020 #define REBOOT_SOFT_STR "soft" 1021 #define REBOOT_GPIO_STR "gpio" 1022 #define REBOOT_UNDEFINED_STR "undefined" 1023 1024 #define BOOT_TRIPLE_STR "triple" 1025 #define BOOT_KBD_STR "kbd" 1026 #define BOOT_BIOS_STR "bios" 1027 #define BOOT_ACPI_STR "acpi" 1028 #define BOOT_EFI_STR "efi" 1029 #define BOOT_PCI_STR "pci" 1030 1031 static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 1032 { 1033 const char *val; 1034 1035 switch (reboot_mode) { 1036 case REBOOT_COLD: 1037 val = REBOOT_COLD_STR; 1038 break; 1039 case REBOOT_WARM: 1040 val = REBOOT_WARM_STR; 1041 break; 1042 case REBOOT_HARD: 1043 val = REBOOT_HARD_STR; 1044 break; 1045 case REBOOT_SOFT: 1046 val = REBOOT_SOFT_STR; 1047 break; 1048 case REBOOT_GPIO: 1049 val = REBOOT_GPIO_STR; 1050 break; 1051 default: 1052 val = REBOOT_UNDEFINED_STR; 1053 } 1054 1055 return sprintf(buf, "%s\n", val); 1056 } 1057 static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr, 1058 const char *buf, size_t count) 1059 { 1060 if (!capable(CAP_SYS_BOOT)) 1061 return -EPERM; 1062 1063 if (!strncmp(buf, REBOOT_COLD_STR, strlen(REBOOT_COLD_STR))) 1064 reboot_mode = REBOOT_COLD; 1065 else if (!strncmp(buf, REBOOT_WARM_STR, strlen(REBOOT_WARM_STR))) 1066 reboot_mode = REBOOT_WARM; 1067 else if (!strncmp(buf, REBOOT_HARD_STR, strlen(REBOOT_HARD_STR))) 1068 reboot_mode = REBOOT_HARD; 1069 else if (!strncmp(buf, REBOOT_SOFT_STR, strlen(REBOOT_SOFT_STR))) 1070 reboot_mode = REBOOT_SOFT; 1071 else if (!strncmp(buf, REBOOT_GPIO_STR, strlen(REBOOT_GPIO_STR))) 1072 reboot_mode = REBOOT_GPIO; 1073 else 1074 return -EINVAL; 1075 1076 reboot_default = 0; 1077 1078 return count; 1079 } 1080 static struct kobj_attribute reboot_mode_attr = __ATTR_RW(mode); 1081 1082 #ifdef CONFIG_X86 1083 static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 1084 { 1085 return sprintf(buf, "%d\n", reboot_force); 1086 } 1087 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, 1088 const char *buf, size_t count) 1089 { 1090 bool res; 1091 1092 if (!capable(CAP_SYS_BOOT)) 1093 return -EPERM; 1094 1095 if (kstrtobool(buf, &res)) 1096 return -EINVAL; 1097 1098 reboot_default = 0; 1099 reboot_force = res; 1100 1101 return count; 1102 } 1103 static struct kobj_attribute reboot_force_attr = __ATTR_RW(force); 1104 1105 static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 1106 { 1107 const char *val; 1108 1109 switch (reboot_type) { 1110 case BOOT_TRIPLE: 1111 val = BOOT_TRIPLE_STR; 1112 break; 1113 case BOOT_KBD: 1114 val = BOOT_KBD_STR; 1115 break; 1116 case BOOT_BIOS: 1117 val = BOOT_BIOS_STR; 1118 break; 1119 case BOOT_ACPI: 1120 val = BOOT_ACPI_STR; 1121 break; 1122 case BOOT_EFI: 1123 val = BOOT_EFI_STR; 1124 break; 1125 case BOOT_CF9_FORCE: 1126 val = BOOT_PCI_STR; 1127 break; 1128 default: 1129 val = REBOOT_UNDEFINED_STR; 1130 } 1131 1132 return sprintf(buf, "%s\n", val); 1133 } 1134 static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr, 1135 const char *buf, size_t count) 1136 { 1137 if (!capable(CAP_SYS_BOOT)) 1138 return -EPERM; 1139 1140 if (!strncmp(buf, BOOT_TRIPLE_STR, strlen(BOOT_TRIPLE_STR))) 1141 reboot_type = BOOT_TRIPLE; 1142 else if (!strncmp(buf, BOOT_KBD_STR, strlen(BOOT_KBD_STR))) 1143 reboot_type = BOOT_KBD; 1144 else if (!strncmp(buf, BOOT_BIOS_STR, strlen(BOOT_BIOS_STR))) 1145 reboot_type = BOOT_BIOS; 1146 else if (!strncmp(buf, BOOT_ACPI_STR, strlen(BOOT_ACPI_STR))) 1147 reboot_type = BOOT_ACPI; 1148 else if (!strncmp(buf, BOOT_EFI_STR, strlen(BOOT_EFI_STR))) 1149 reboot_type = BOOT_EFI; 1150 else if (!strncmp(buf, BOOT_PCI_STR, strlen(BOOT_PCI_STR))) 1151 reboot_type = BOOT_CF9_FORCE; 1152 else 1153 return -EINVAL; 1154 1155 reboot_default = 0; 1156 1157 return count; 1158 } 1159 static struct kobj_attribute reboot_type_attr = __ATTR_RW(type); 1160 #endif 1161 1162 #ifdef CONFIG_SMP 1163 static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 1164 { 1165 return sprintf(buf, "%d\n", reboot_cpu); 1166 } 1167 static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr, 1168 const char *buf, size_t count) 1169 { 1170 unsigned int cpunum; 1171 int rc; 1172 1173 if (!capable(CAP_SYS_BOOT)) 1174 return -EPERM; 1175 1176 rc = kstrtouint(buf, 0, &cpunum); 1177 1178 if (rc) 1179 return rc; 1180 1181 if (cpunum >= num_possible_cpus()) 1182 return -ERANGE; 1183 1184 reboot_default = 0; 1185 reboot_cpu = cpunum; 1186 1187 return count; 1188 } 1189 static struct kobj_attribute reboot_cpu_attr = __ATTR_RW(cpu); 1190 #endif 1191 1192 static struct attribute *reboot_attrs[] = { 1193 &reboot_mode_attr.attr, 1194 #ifdef CONFIG_X86 1195 &reboot_force_attr.attr, 1196 &reboot_type_attr.attr, 1197 #endif 1198 #ifdef CONFIG_SMP 1199 &reboot_cpu_attr.attr, 1200 #endif 1201 NULL, 1202 }; 1203 1204 static const struct attribute_group reboot_attr_group = { 1205 .attrs = reboot_attrs, 1206 }; 1207 1208 static int __init reboot_ksysfs_init(void) 1209 { 1210 struct kobject *reboot_kobj; 1211 int ret; 1212 1213 reboot_kobj = kobject_create_and_add("reboot", kernel_kobj); 1214 if (!reboot_kobj) 1215 return -ENOMEM; 1216 1217 ret = sysfs_create_group(reboot_kobj, &reboot_attr_group); 1218 if (ret) { 1219 kobject_put(reboot_kobj); 1220 return ret; 1221 } 1222 1223 return 0; 1224 } 1225 late_initcall(reboot_ksysfs_init); 1226 1227 #endif 1228