1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/kernel/panic.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 91da177e4SLinus Torvalds * This function is used through-out the kernel (including mm and fs) 101da177e4SLinus Torvalds * to indicate a major problem. 111da177e4SLinus Torvalds */ 12657b3010SAndrew Morton #include <linux/debug_locks.h> 13b17b0153SIngo Molnar #include <linux/sched/debug.h> 14c95dbf27SIngo Molnar #include <linux/interrupt.h> 157d92bda2SDouglas Anderson #include <linux/kgdb.h> 16456b565cSSimon Kagstrom #include <linux/kmsg_dump.h> 1779b4cc5eSArjan van de Ven #include <linux/kallsyms.h> 18c95dbf27SIngo Molnar #include <linux/notifier.h> 19c7c3f05eSSergey Senozhatsky #include <linux/vt_kern.h> 20c95dbf27SIngo Molnar #include <linux/module.h> 21c95dbf27SIngo Molnar #include <linux/random.h> 22de7edd31SSteven Rostedt (Red Hat) #include <linux/ftrace.h> 23c95dbf27SIngo Molnar #include <linux/reboot.h> 24c95dbf27SIngo Molnar #include <linux/delay.h> 25c95dbf27SIngo Molnar #include <linux/kexec.h> 26f39650deSAndy Shevchenko #include <linux/panic_notifier.h> 27c95dbf27SIngo Molnar #include <linux/sched.h> 285d5dd3e4SAndy Shevchenko #include <linux/string_helpers.h> 29c95dbf27SIngo Molnar #include <linux/sysrq.h> 30c95dbf27SIngo Molnar #include <linux/init.h> 31c95dbf27SIngo Molnar #include <linux/nmi.h> 3208d78658SVitaly Kuznetsov #include <linux/console.h> 332553b67aSJosh Poimboeuf #include <linux/bug.h> 347a46ec0eSKees Cook #include <linux/ratelimit.h> 35b1fca27dSAndi Kleen #include <linux/debugfs.h> 368b05aa26SKees Cook #include <linux/sysfs.h> 375a5d7e9bSPeter Zijlstra #include <linux/context_tracking.h> 3823b36fecSMarco Elver #include <trace/events/error_report.h> 39b1fca27dSAndi Kleen #include <asm/sections.h> 401da177e4SLinus Torvalds 41c7ff0d9cSTAMUKI Shoichi #define PANIC_TIMER_STEP 100 42c7ff0d9cSTAMUKI Shoichi #define PANIC_BLINK_SPD 18 43c7ff0d9cSTAMUKI Shoichi 4460c958d8SGuilherme G. Piccoli #ifdef CONFIG_SMP 4560c958d8SGuilherme G. Piccoli /* 4660c958d8SGuilherme G. Piccoli * Should we dump all CPUs backtraces in an oops event? 4760c958d8SGuilherme G. Piccoli * Defaults to 0, can be changed via sysctl. 4860c958d8SGuilherme G. Piccoli */ 499df91869Stangmeng static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace; 509df91869Stangmeng #else 519df91869Stangmeng #define sysctl_oops_all_cpu_backtrace 0 5260c958d8SGuilherme G. Piccoli #endif /* CONFIG_SMP */ 5360c958d8SGuilherme G. Piccoli 542a01bb38SKyle McMartin int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; 55bc4f2f54SKees Cook static unsigned long tainted_mask = 56595b893eSKees Cook IS_ENABLED(CONFIG_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0; 57dd287796SAndrew Morton static int pause_on_oops; 58dd287796SAndrew Morton static int pause_on_oops_flag; 59dd287796SAndrew Morton static DEFINE_SPINLOCK(pause_on_oops_lock); 605375b708SHATAYAMA Daisuke bool crash_kexec_post_notifiers; 619e3961a0SPrarit Bhargava int panic_on_warn __read_mostly; 62db38d5c1SRafael Aquini unsigned long panic_on_taint; 63db38d5c1SRafael Aquini bool panic_on_taint_nousertaint = false; 649fc9e278SKees Cook static unsigned int warn_limit __read_mostly; 651da177e4SLinus Torvalds 665800dc3cSJason Baron int panic_timeout = CONFIG_PANIC_TIMEOUT; 6781e88fdcSHuang Ying EXPORT_SYMBOL_GPL(panic_timeout); 681da177e4SLinus Torvalds 69d999bd93SFeng Tang #define PANIC_PRINT_TASK_INFO 0x00000001 70d999bd93SFeng Tang #define PANIC_PRINT_MEM_INFO 0x00000002 71d999bd93SFeng Tang #define PANIC_PRINT_TIMER_INFO 0x00000004 72d999bd93SFeng Tang #define PANIC_PRINT_LOCK_INFO 0x00000008 73d999bd93SFeng Tang #define PANIC_PRINT_FTRACE_INFO 0x00000010 74de6da1e8SFeng Tang #define PANIC_PRINT_ALL_PRINTK_MSG 0x00000020 758d470a45SGuilherme G. Piccoli #define PANIC_PRINT_ALL_CPU_BT 0x00000040 7681c9d43fSFeng Tang unsigned long panic_print; 77d999bd93SFeng Tang 78e041c683SAlan Stern ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 791da177e4SLinus Torvalds 801da177e4SLinus Torvalds EXPORT_SYMBOL(panic_notifier_list); 811da177e4SLinus Torvalds 829360d035SKees Cook #ifdef CONFIG_SYSCTL 839df91869Stangmeng static struct ctl_table kern_panic_table[] = { 849360d035SKees Cook #ifdef CONFIG_SMP 859df91869Stangmeng { 869df91869Stangmeng .procname = "oops_all_cpu_backtrace", 879df91869Stangmeng .data = &sysctl_oops_all_cpu_backtrace, 889df91869Stangmeng .maxlen = sizeof(int), 899df91869Stangmeng .mode = 0644, 909df91869Stangmeng .proc_handler = proc_dointvec_minmax, 919df91869Stangmeng .extra1 = SYSCTL_ZERO, 929df91869Stangmeng .extra2 = SYSCTL_ONE, 939df91869Stangmeng }, 949360d035SKees Cook #endif 959fc9e278SKees Cook { 969fc9e278SKees Cook .procname = "warn_limit", 979fc9e278SKees Cook .data = &warn_limit, 989fc9e278SKees Cook .maxlen = sizeof(warn_limit), 999fc9e278SKees Cook .mode = 0644, 1009fc9e278SKees Cook .proc_handler = proc_douintvec, 1019fc9e278SKees Cook }, 1029df91869Stangmeng { } 1039df91869Stangmeng }; 1049df91869Stangmeng 1059df91869Stangmeng static __init int kernel_panic_sysctls_init(void) 1069df91869Stangmeng { 1079df91869Stangmeng register_sysctl_init("kernel", kern_panic_table); 1089df91869Stangmeng return 0; 1099df91869Stangmeng } 1109df91869Stangmeng late_initcall(kernel_panic_sysctls_init); 1119df91869Stangmeng #endif 1129df91869Stangmeng 1138b05aa26SKees Cook static atomic_t warn_count = ATOMIC_INIT(0); 1148b05aa26SKees Cook 1158b05aa26SKees Cook #ifdef CONFIG_SYSFS 1168b05aa26SKees Cook static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr, 1178b05aa26SKees Cook char *page) 1188b05aa26SKees Cook { 1198b05aa26SKees Cook return sysfs_emit(page, "%d\n", atomic_read(&warn_count)); 1208b05aa26SKees Cook } 1218b05aa26SKees Cook 1228b05aa26SKees Cook static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count); 1238b05aa26SKees Cook 1248b05aa26SKees Cook static __init int kernel_panic_sysfs_init(void) 1258b05aa26SKees Cook { 1268b05aa26SKees Cook sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL); 1278b05aa26SKees Cook return 0; 1288b05aa26SKees Cook } 1298b05aa26SKees Cook late_initcall(kernel_panic_sysfs_init); 1308b05aa26SKees Cook #endif 1318b05aa26SKees Cook 132c7ff0d9cSTAMUKI Shoichi static long no_blink(int state) 1338aeee85aSAnton Blanchard { 134c7ff0d9cSTAMUKI Shoichi return 0; 135c7ff0d9cSTAMUKI Shoichi } 1368aeee85aSAnton Blanchard 137c7ff0d9cSTAMUKI Shoichi /* Returns how long it waited in ms */ 138c7ff0d9cSTAMUKI Shoichi long (*panic_blink)(int state); 139c7ff0d9cSTAMUKI Shoichi EXPORT_SYMBOL(panic_blink); 1408aeee85aSAnton Blanchard 14193e13a36SMichael Holzheu /* 14293e13a36SMichael Holzheu * Stop ourself in panic -- architecture code may override this 14393e13a36SMichael Holzheu */ 1447412a60dSJosh Poimboeuf void __weak __noreturn panic_smp_self_stop(void) 14593e13a36SMichael Holzheu { 14693e13a36SMichael Holzheu while (1) 14793e13a36SMichael Holzheu cpu_relax(); 14893e13a36SMichael Holzheu } 14993e13a36SMichael Holzheu 15058c5661fSHidehiro Kawai /* 15158c5661fSHidehiro Kawai * Stop ourselves in NMI context if another CPU has already panicked. Arch code 15258c5661fSHidehiro Kawai * may override this to prepare for crash dumping, e.g. save regs info. 15358c5661fSHidehiro Kawai */ 15427dea14cSJosh Poimboeuf void __weak __noreturn nmi_panic_self_stop(struct pt_regs *regs) 15558c5661fSHidehiro Kawai { 15658c5661fSHidehiro Kawai panic_smp_self_stop(); 15758c5661fSHidehiro Kawai } 15858c5661fSHidehiro Kawai 1590ee59413SHidehiro Kawai /* 1600ee59413SHidehiro Kawai * Stop other CPUs in panic. Architecture dependent code may override this 1610ee59413SHidehiro Kawai * with more suitable version. For example, if the architecture supports 1620ee59413SHidehiro Kawai * crash dump, it should save registers of each stopped CPU and disable 1630ee59413SHidehiro Kawai * per-CPU features such as virtualization extensions. 1640ee59413SHidehiro Kawai */ 1650ee59413SHidehiro Kawai void __weak crash_smp_send_stop(void) 1660ee59413SHidehiro Kawai { 1670ee59413SHidehiro Kawai static int cpus_stopped; 1680ee59413SHidehiro Kawai 1690ee59413SHidehiro Kawai /* 1700ee59413SHidehiro Kawai * This function can be called twice in panic path, but obviously 1710ee59413SHidehiro Kawai * we execute this only once. 1720ee59413SHidehiro Kawai */ 1730ee59413SHidehiro Kawai if (cpus_stopped) 1740ee59413SHidehiro Kawai return; 1750ee59413SHidehiro Kawai 1760ee59413SHidehiro Kawai /* 1770ee59413SHidehiro Kawai * Note smp_send_stop is the usual smp shutdown function, which 1780ee59413SHidehiro Kawai * unfortunately means it may not be hardened to work in a panic 1790ee59413SHidehiro Kawai * situation. 1800ee59413SHidehiro Kawai */ 1810ee59413SHidehiro Kawai smp_send_stop(); 1820ee59413SHidehiro Kawai cpus_stopped = 1; 1830ee59413SHidehiro Kawai } 1840ee59413SHidehiro Kawai 1851717f209SHidehiro Kawai atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); 1861717f209SHidehiro Kawai 187ebc41f20SHidehiro Kawai /* 188ebc41f20SHidehiro Kawai * A variant of panic() called from NMI context. We return if we've already 189ebc41f20SHidehiro Kawai * panicked on this CPU. If another CPU already panicked, loop in 190ebc41f20SHidehiro Kawai * nmi_panic_self_stop() which can provide architecture dependent code such 191ebc41f20SHidehiro Kawai * as saving register state for crash dump. 192ebc41f20SHidehiro Kawai */ 193ebc41f20SHidehiro Kawai void nmi_panic(struct pt_regs *regs, const char *msg) 194ebc41f20SHidehiro Kawai { 195*9734fe4dSUros Bizjak int old_cpu, this_cpu; 196ebc41f20SHidehiro Kawai 197*9734fe4dSUros Bizjak old_cpu = PANIC_CPU_INVALID; 198*9734fe4dSUros Bizjak this_cpu = raw_smp_processor_id(); 199ebc41f20SHidehiro Kawai 200*9734fe4dSUros Bizjak /* atomic_try_cmpxchg updates old_cpu on failure */ 201*9734fe4dSUros Bizjak if (atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu)) 202ebc41f20SHidehiro Kawai panic("%s", msg); 203*9734fe4dSUros Bizjak else if (old_cpu != this_cpu) 204ebc41f20SHidehiro Kawai nmi_panic_self_stop(regs); 205ebc41f20SHidehiro Kawai } 206ebc41f20SHidehiro Kawai EXPORT_SYMBOL(nmi_panic); 207ebc41f20SHidehiro Kawai 208f953f140SGuilherme G. Piccoli static void panic_print_sys_info(bool console_flush) 209d999bd93SFeng Tang { 210f953f140SGuilherme G. Piccoli if (console_flush) { 211de6da1e8SFeng Tang if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG) 212de6da1e8SFeng Tang console_flush_on_panic(CONSOLE_REPLAY_ALL); 213f953f140SGuilherme G. Piccoli return; 214f953f140SGuilherme G. Piccoli } 215de6da1e8SFeng Tang 216d999bd93SFeng Tang if (panic_print & PANIC_PRINT_TASK_INFO) 217d999bd93SFeng Tang show_state(); 218d999bd93SFeng Tang 219d999bd93SFeng Tang if (panic_print & PANIC_PRINT_MEM_INFO) 220527ed4f7SKefeng Wang show_mem(); 221d999bd93SFeng Tang 222d999bd93SFeng Tang if (panic_print & PANIC_PRINT_TIMER_INFO) 223d999bd93SFeng Tang sysrq_timer_list_show(); 224d999bd93SFeng Tang 225d999bd93SFeng Tang if (panic_print & PANIC_PRINT_LOCK_INFO) 226d999bd93SFeng Tang debug_show_all_locks(); 227d999bd93SFeng Tang 228d999bd93SFeng Tang if (panic_print & PANIC_PRINT_FTRACE_INFO) 229d999bd93SFeng Tang ftrace_dump(DUMP_ALL); 230d999bd93SFeng Tang } 231d999bd93SFeng Tang 23279cc1ba7SKees Cook void check_panic_on_warn(const char *origin) 23379cc1ba7SKees Cook { 2347535b832SKees Cook unsigned int limit; 2357535b832SKees Cook 23679cc1ba7SKees Cook if (panic_on_warn) 23779cc1ba7SKees Cook panic("%s: panic_on_warn set ...\n", origin); 2389fc9e278SKees Cook 2397535b832SKees Cook limit = READ_ONCE(warn_limit); 2407535b832SKees Cook if (atomic_inc_return(&warn_count) >= limit && limit) 2419fc9e278SKees Cook panic("%s: system warned too often (kernel.warn_limit is %d)", 2427535b832SKees Cook origin, limit); 24379cc1ba7SKees Cook } 24479cc1ba7SKees Cook 245b905039eSGuilherme G. Piccoli /* 246b905039eSGuilherme G. Piccoli * Helper that triggers the NMI backtrace (if set in panic_print) 247b905039eSGuilherme G. Piccoli * and then performs the secondary CPUs shutdown - we cannot have 248b905039eSGuilherme G. Piccoli * the NMI backtrace after the CPUs are off! 249b905039eSGuilherme G. Piccoli */ 250b905039eSGuilherme G. Piccoli static void panic_other_cpus_shutdown(bool crash_kexec) 251b905039eSGuilherme G. Piccoli { 252b905039eSGuilherme G. Piccoli if (panic_print & PANIC_PRINT_ALL_CPU_BT) 253b905039eSGuilherme G. Piccoli trigger_all_cpu_backtrace(); 254b905039eSGuilherme G. Piccoli 255b905039eSGuilherme G. Piccoli /* 256b905039eSGuilherme G. Piccoli * Note that smp_send_stop() is the usual SMP shutdown function, 257b905039eSGuilherme G. Piccoli * which unfortunately may not be hardened to work in a panic 258b905039eSGuilherme G. Piccoli * situation. If we want to do crash dump after notifier calls 259b905039eSGuilherme G. Piccoli * and kmsg_dump, we will need architecture dependent extra 260b905039eSGuilherme G. Piccoli * bits in addition to stopping other CPUs, hence we rely on 261b905039eSGuilherme G. Piccoli * crash_smp_send_stop() for that. 262b905039eSGuilherme G. Piccoli */ 263b905039eSGuilherme G. Piccoli if (!crash_kexec) 264b905039eSGuilherme G. Piccoli smp_send_stop(); 265b905039eSGuilherme G. Piccoli else 266b905039eSGuilherme G. Piccoli crash_smp_send_stop(); 267b905039eSGuilherme G. Piccoli } 268b905039eSGuilherme G. Piccoli 2691da177e4SLinus Torvalds /** 2701da177e4SLinus Torvalds * panic - halt the system 2711da177e4SLinus Torvalds * @fmt: The text string to print 2721da177e4SLinus Torvalds * 2731da177e4SLinus Torvalds * Display a message, then perform cleanups. 2741da177e4SLinus Torvalds * 2751da177e4SLinus Torvalds * This function never returns. 2761da177e4SLinus Torvalds */ 2779402c95fSJoe Perches void panic(const char *fmt, ...) 2781da177e4SLinus Torvalds { 2791da177e4SLinus Torvalds static char buf[1024]; 2801da177e4SLinus Torvalds va_list args; 281b49dec1cSBorislav Petkov long i, i_next = 0, len; 282c7ff0d9cSTAMUKI Shoichi int state = 0; 2831717f209SHidehiro Kawai int old_cpu, this_cpu; 284b26e27ddSHidehiro Kawai bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; 2851da177e4SLinus Torvalds 2861a2383e8STiezhu Yang if (panic_on_warn) { 2871a2383e8STiezhu Yang /* 2881a2383e8STiezhu Yang * This thread may hit another WARN() in the panic path. 2891a2383e8STiezhu Yang * Resetting this prevents additional WARN() from panicking the 2901a2383e8STiezhu Yang * system on this thread. Other threads are blocked by the 2911a2383e8STiezhu Yang * panic_mutex in panic(). 2921a2383e8STiezhu Yang */ 2931a2383e8STiezhu Yang panic_on_warn = 0; 2941a2383e8STiezhu Yang } 2951a2383e8STiezhu Yang 296dc009d92SEric W. Biederman /* 297190320c3SVikram Mulukutla * Disable local interrupts. This will prevent panic_smp_self_stop 298190320c3SVikram Mulukutla * from deadlocking the first cpu that invokes the panic, since 299190320c3SVikram Mulukutla * there is nothing to prevent an interrupt handler (that runs 3001717f209SHidehiro Kawai * after setting panic_cpu) from invoking panic() again. 301190320c3SVikram Mulukutla */ 302190320c3SVikram Mulukutla local_irq_disable(); 30320bb759aSWill Deacon preempt_disable_notrace(); 304190320c3SVikram Mulukutla 305190320c3SVikram Mulukutla /* 306c95dbf27SIngo Molnar * It's possible to come here directly from a panic-assertion and 307c95dbf27SIngo Molnar * not have preempt disabled. Some functions called from here want 308dc009d92SEric W. Biederman * preempt to be disabled. No point enabling it later though... 30993e13a36SMichael Holzheu * 31093e13a36SMichael Holzheu * Only one CPU is allowed to execute the panic code from here. For 31193e13a36SMichael Holzheu * multiple parallel invocations of panic, all other CPUs either 31293e13a36SMichael Holzheu * stop themself or will wait until they are stopped by the 1st CPU 31393e13a36SMichael Holzheu * with smp_send_stop(). 3141717f209SHidehiro Kawai * 315*9734fe4dSUros Bizjak * cmpxchg success means this is the 1st CPU which comes here, 316*9734fe4dSUros Bizjak * so go ahead. 3171717f209SHidehiro Kawai * `old_cpu == this_cpu' means we came from nmi_panic() which sets 3181717f209SHidehiro Kawai * panic_cpu to this CPU. In this case, this is also the 1st CPU. 319dc009d92SEric W. Biederman */ 320*9734fe4dSUros Bizjak old_cpu = PANIC_CPU_INVALID; 3211717f209SHidehiro Kawai this_cpu = raw_smp_processor_id(); 3221717f209SHidehiro Kawai 323*9734fe4dSUros Bizjak /* atomic_try_cmpxchg updates old_cpu on failure */ 324*9734fe4dSUros Bizjak if (atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu)) { 325*9734fe4dSUros Bizjak /* go ahead */ 326*9734fe4dSUros Bizjak } else if (old_cpu != this_cpu) 32793e13a36SMichael Holzheu panic_smp_self_stop(); 328dc009d92SEric W. Biederman 3295b530fc1SAnton Blanchard console_verbose(); 3301da177e4SLinus Torvalds bust_spinlocks(1); 3311da177e4SLinus Torvalds va_start(args, fmt); 332b49dec1cSBorislav Petkov len = vscnprintf(buf, sizeof(buf), fmt, args); 3331da177e4SLinus Torvalds va_end(args); 334b49dec1cSBorislav Petkov 335b49dec1cSBorislav Petkov if (len && buf[len - 1] == '\n') 336b49dec1cSBorislav Petkov buf[len - 1] = '\0'; 337b49dec1cSBorislav Petkov 338d7c0847fSFabian Frederick pr_emerg("Kernel panic - not syncing: %s\n", buf); 3395cb27301SIngo Molnar #ifdef CONFIG_DEBUG_BUGVERBOSE 3406e6f0a1fSAndi Kleen /* 3416e6f0a1fSAndi Kleen * Avoid nested stack-dumping if a panic occurs during oops processing 3426e6f0a1fSAndi Kleen */ 343026ee1f6SJason Wessel if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) 3445cb27301SIngo Molnar dump_stack(); 3455cb27301SIngo Molnar #endif 3461da177e4SLinus Torvalds 347dc009d92SEric W. Biederman /* 3487d92bda2SDouglas Anderson * If kgdb is enabled, give it a chance to run before we stop all 3497d92bda2SDouglas Anderson * the other CPUs or else we won't be able to debug processes left 3507d92bda2SDouglas Anderson * running on them. 3517d92bda2SDouglas Anderson */ 3527d92bda2SDouglas Anderson kgdb_panic(buf); 3537d92bda2SDouglas Anderson 3547d92bda2SDouglas Anderson /* 355dc009d92SEric W. Biederman * If we have crashed and we have a crash kernel loaded let it handle 356dc009d92SEric W. Biederman * everything else. 357f06e5153SMasami Hiramatsu * If we want to run this after calling panic_notifiers, pass 358f06e5153SMasami Hiramatsu * the "crash_kexec_post_notifiers" option to the kernel. 3597bbee5caSHidehiro Kawai * 3607bbee5caSHidehiro Kawai * Bypass the panic_cpu check and call __crash_kexec directly. 361dc009d92SEric W. Biederman */ 362b905039eSGuilherme G. Piccoli if (!_crash_kexec_post_notifiers) 3637bbee5caSHidehiro Kawai __crash_kexec(NULL); 364dc009d92SEric W. Biederman 365b905039eSGuilherme G. Piccoli panic_other_cpus_shutdown(_crash_kexec_post_notifiers); 3661da177e4SLinus Torvalds 3676723734cSKees Cook /* 3686723734cSKees Cook * Run any panic handlers, including those that might need to 3696723734cSKees Cook * add information to the kmsg dump output. 3706723734cSKees Cook */ 371e041c683SAlan Stern atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 3721da177e4SLinus Torvalds 373f953f140SGuilherme G. Piccoli panic_print_sys_info(false); 374f953f140SGuilherme G. Piccoli 3756723734cSKees Cook kmsg_dump(KMSG_DUMP_PANIC); 3766723734cSKees Cook 377f06e5153SMasami Hiramatsu /* 378f06e5153SMasami Hiramatsu * If you doubt kdump always works fine in any situation, 379f06e5153SMasami Hiramatsu * "crash_kexec_post_notifiers" offers you a chance to run 380f06e5153SMasami Hiramatsu * panic_notifiers and dumping kmsg before kdump. 381f06e5153SMasami Hiramatsu * Note: since some panic_notifiers can make crashed kernel 382f06e5153SMasami Hiramatsu * more unstable, it can increase risks of the kdump failure too. 3837bbee5caSHidehiro Kawai * 3847bbee5caSHidehiro Kawai * Bypass the panic_cpu check and call __crash_kexec directly. 385f06e5153SMasami Hiramatsu */ 386b26e27ddSHidehiro Kawai if (_crash_kexec_post_notifiers) 3877bbee5caSHidehiro Kawai __crash_kexec(NULL); 388f06e5153SMasami Hiramatsu 389c7c3f05eSSergey Senozhatsky console_unblank(); 390d014e889SAaro Koskinen 39108d78658SVitaly Kuznetsov /* 39208d78658SVitaly Kuznetsov * We may have ended up stopping the CPU holding the lock (in 39308d78658SVitaly Kuznetsov * smp_send_stop()) while still having some valuable data in the console 39408d78658SVitaly Kuznetsov * buffer. Try to acquire the lock then release it regardless of the 3957625b3a0SVitaly Kuznetsov * result. The release will also print the buffers out. Locks debug 3967625b3a0SVitaly Kuznetsov * should be disabled to avoid reporting bad unlock balance when 3977625b3a0SVitaly Kuznetsov * panic() is not being callled from OOPS. 39808d78658SVitaly Kuznetsov */ 3997625b3a0SVitaly Kuznetsov debug_locks_off(); 400de6da1e8SFeng Tang console_flush_on_panic(CONSOLE_FLUSH_PENDING); 40108d78658SVitaly Kuznetsov 402f953f140SGuilherme G. Piccoli panic_print_sys_info(true); 403d999bd93SFeng Tang 404c7ff0d9cSTAMUKI Shoichi if (!panic_blink) 405c7ff0d9cSTAMUKI Shoichi panic_blink = no_blink; 406c7ff0d9cSTAMUKI Shoichi 407dc009d92SEric W. Biederman if (panic_timeout > 0) { 4081da177e4SLinus Torvalds /* 4091da177e4SLinus Torvalds * Delay timeout seconds before rebooting the machine. 410c95dbf27SIngo Molnar * We can't use the "normal" timers since we just panicked. 4111da177e4SLinus Torvalds */ 412ff7a28a0SJiri Slaby pr_emerg("Rebooting in %d seconds..\n", panic_timeout); 413c95dbf27SIngo Molnar 414c7ff0d9cSTAMUKI Shoichi for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { 4151da177e4SLinus Torvalds touch_nmi_watchdog(); 416c7ff0d9cSTAMUKI Shoichi if (i >= i_next) { 417c7ff0d9cSTAMUKI Shoichi i += panic_blink(state ^= 1); 418c7ff0d9cSTAMUKI Shoichi i_next = i + 3600 / PANIC_BLINK_SPD; 419c7ff0d9cSTAMUKI Shoichi } 420c7ff0d9cSTAMUKI Shoichi mdelay(PANIC_TIMER_STEP); 4211da177e4SLinus Torvalds } 4224302fbc8SHugh Dickins } 4234302fbc8SHugh Dickins if (panic_timeout != 0) { 424c95dbf27SIngo Molnar /* 425c95dbf27SIngo Molnar * This will not be a clean reboot, with everything 4262f048ea8SEric W. Biederman * shutting down. But if there is a chance of 4272f048ea8SEric W. Biederman * rebooting the system it will be rebooted. 4281da177e4SLinus Torvalds */ 429b287a25aSAaro Koskinen if (panic_reboot_mode != REBOOT_UNDEFINED) 430b287a25aSAaro Koskinen reboot_mode = panic_reboot_mode; 4312f048ea8SEric W. Biederman emergency_restart(); 4321da177e4SLinus Torvalds } 4331da177e4SLinus Torvalds #ifdef __sparc__ 4341da177e4SLinus Torvalds { 4351da177e4SLinus Torvalds extern int stop_a_enabled; 436a271c241STom 'spot' Callaway /* Make sure the user can actually press Stop-A (L1-A) */ 4371da177e4SLinus Torvalds stop_a_enabled = 1; 4387db60d05SVijay Kumar pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n" 4397db60d05SVijay Kumar "twice on console to return to the boot prom\n"); 4401da177e4SLinus Torvalds } 4411da177e4SLinus Torvalds #endif 442347a8dc3SMartin Schwidefsky #if defined(CONFIG_S390) 44398587c2dSMartin Schwidefsky disabled_wait(); 4441da177e4SLinus Torvalds #endif 4455ad75105SBorislav Petkov pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf); 446c39ea0b9SFeng Tang 447c39ea0b9SFeng Tang /* Do not scroll important messages printed above */ 448c39ea0b9SFeng Tang suppress_printk = 1; 4491da177e4SLinus Torvalds local_irq_enable(); 450c7ff0d9cSTAMUKI Shoichi for (i = 0; ; i += PANIC_TIMER_STEP) { 451c22db941SJan Beulich touch_softlockup_watchdog(); 452c7ff0d9cSTAMUKI Shoichi if (i >= i_next) { 453c7ff0d9cSTAMUKI Shoichi i += panic_blink(state ^= 1); 454c7ff0d9cSTAMUKI Shoichi i_next = i + 3600 / PANIC_BLINK_SPD; 455c7ff0d9cSTAMUKI Shoichi } 456c7ff0d9cSTAMUKI Shoichi mdelay(PANIC_TIMER_STEP); 4571da177e4SLinus Torvalds } 4581da177e4SLinus Torvalds } 4591da177e4SLinus Torvalds 4601da177e4SLinus Torvalds EXPORT_SYMBOL(panic); 4611da177e4SLinus Torvalds 4627fd8329bSPetr Mladek /* 4637fd8329bSPetr Mladek * TAINT_FORCED_RMMOD could be a per-module flag but the module 4647fd8329bSPetr Mladek * is being removed anyway. 4657fd8329bSPetr Mladek */ 4667fd8329bSPetr Mladek const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { 46747d4b263SKees Cook [ TAINT_PROPRIETARY_MODULE ] = { 'P', 'G', true }, 46847d4b263SKees Cook [ TAINT_FORCED_MODULE ] = { 'F', ' ', true }, 46947d4b263SKees Cook [ TAINT_CPU_OUT_OF_SPEC ] = { 'S', ' ', false }, 47047d4b263SKees Cook [ TAINT_FORCED_RMMOD ] = { 'R', ' ', false }, 47147d4b263SKees Cook [ TAINT_MACHINE_CHECK ] = { 'M', ' ', false }, 47247d4b263SKees Cook [ TAINT_BAD_PAGE ] = { 'B', ' ', false }, 47347d4b263SKees Cook [ TAINT_USER ] = { 'U', ' ', false }, 47447d4b263SKees Cook [ TAINT_DIE ] = { 'D', ' ', false }, 47547d4b263SKees Cook [ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false }, 47647d4b263SKees Cook [ TAINT_WARN ] = { 'W', ' ', false }, 47747d4b263SKees Cook [ TAINT_CRAP ] = { 'C', ' ', true }, 47847d4b263SKees Cook [ TAINT_FIRMWARE_WORKAROUND ] = { 'I', ' ', false }, 47947d4b263SKees Cook [ TAINT_OOT_MODULE ] = { 'O', ' ', true }, 48047d4b263SKees Cook [ TAINT_UNSIGNED_MODULE ] = { 'E', ' ', true }, 48147d4b263SKees Cook [ TAINT_SOFTLOCKUP ] = { 'L', ' ', false }, 48247d4b263SKees Cook [ TAINT_LIVEPATCH ] = { 'K', ' ', true }, 48347d4b263SKees Cook [ TAINT_AUX ] = { 'X', ' ', true }, 484bc4f2f54SKees Cook [ TAINT_RANDSTRUCT ] = { 'T', ' ', true }, 4852852ca7fSDavid Gow [ TAINT_TEST ] = { 'N', ' ', true }, 48625ddbb18SAndi Kleen }; 48725ddbb18SAndi Kleen 4881da177e4SLinus Torvalds /** 4891da177e4SLinus Torvalds * print_tainted - return a string to represent the kernel taint state. 4901da177e4SLinus Torvalds * 49157043247SMauro Carvalho Chehab * For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst 4921da177e4SLinus Torvalds * 4939c4560e5SKees Cook * The string is overwritten by the next call to print_tainted(), 4949c4560e5SKees Cook * but is always NULL terminated. 4951da177e4SLinus Torvalds */ 4961da177e4SLinus Torvalds const char *print_tainted(void) 4971da177e4SLinus Torvalds { 4987fd8329bSPetr Mladek static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")]; 49925ddbb18SAndi Kleen 50047d4b263SKees Cook BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT); 50147d4b263SKees Cook 50225ddbb18SAndi Kleen if (tainted_mask) { 50325ddbb18SAndi Kleen char *s; 50425ddbb18SAndi Kleen int i; 50525ddbb18SAndi Kleen 50625ddbb18SAndi Kleen s = buf + sprintf(buf, "Tainted: "); 5077fd8329bSPetr Mladek for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 5087fd8329bSPetr Mladek const struct taint_flag *t = &taint_flags[i]; 5097fd8329bSPetr Mladek *s++ = test_bit(i, &tainted_mask) ? 5105eb7c0d0SLarry Finger t->c_true : t->c_false; 5111da177e4SLinus Torvalds } 51225ddbb18SAndi Kleen *s = 0; 51325ddbb18SAndi Kleen } else 5141da177e4SLinus Torvalds snprintf(buf, sizeof(buf), "Not tainted"); 515c95dbf27SIngo Molnar 516c95dbf27SIngo Molnar return buf; 5171da177e4SLinus Torvalds } 5181da177e4SLinus Torvalds 51925ddbb18SAndi Kleen int test_taint(unsigned flag) 52025ddbb18SAndi Kleen { 52125ddbb18SAndi Kleen return test_bit(flag, &tainted_mask); 52225ddbb18SAndi Kleen } 52325ddbb18SAndi Kleen EXPORT_SYMBOL(test_taint); 52425ddbb18SAndi Kleen 52525ddbb18SAndi Kleen unsigned long get_taint(void) 52625ddbb18SAndi Kleen { 52725ddbb18SAndi Kleen return tainted_mask; 52825ddbb18SAndi Kleen } 52925ddbb18SAndi Kleen 530373d4d09SRusty Russell /** 531373d4d09SRusty Russell * add_taint: add a taint flag if not already set. 532373d4d09SRusty Russell * @flag: one of the TAINT_* constants. 533373d4d09SRusty Russell * @lockdep_ok: whether lock debugging is still OK. 534373d4d09SRusty Russell * 535373d4d09SRusty Russell * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for 536373d4d09SRusty Russell * some notewortht-but-not-corrupting cases, it can be set to true. 5379eeba613SFrederic Weisbecker */ 538373d4d09SRusty Russell void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) 539373d4d09SRusty Russell { 540373d4d09SRusty Russell if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) 541d7c0847fSFabian Frederick pr_warn("Disabling lock debugging due to kernel taint\n"); 5429eeba613SFrederic Weisbecker 54325ddbb18SAndi Kleen set_bit(flag, &tainted_mask); 544db38d5c1SRafael Aquini 545db38d5c1SRafael Aquini if (tainted_mask & panic_on_taint) { 546db38d5c1SRafael Aquini panic_on_taint = 0; 547db38d5c1SRafael Aquini panic("panic_on_taint set ..."); 548db38d5c1SRafael Aquini } 5491da177e4SLinus Torvalds } 5501da177e4SLinus Torvalds EXPORT_SYMBOL(add_taint); 551dd287796SAndrew Morton 552dd287796SAndrew Morton static void spin_msec(int msecs) 553dd287796SAndrew Morton { 554dd287796SAndrew Morton int i; 555dd287796SAndrew Morton 556dd287796SAndrew Morton for (i = 0; i < msecs; i++) { 557dd287796SAndrew Morton touch_nmi_watchdog(); 558dd287796SAndrew Morton mdelay(1); 559dd287796SAndrew Morton } 560dd287796SAndrew Morton } 561dd287796SAndrew Morton 562dd287796SAndrew Morton /* 563dd287796SAndrew Morton * It just happens that oops_enter() and oops_exit() are identically 564dd287796SAndrew Morton * implemented... 565dd287796SAndrew Morton */ 566dd287796SAndrew Morton static void do_oops_enter_exit(void) 567dd287796SAndrew Morton { 568dd287796SAndrew Morton unsigned long flags; 569dd287796SAndrew Morton static int spin_counter; 570dd287796SAndrew Morton 571dd287796SAndrew Morton if (!pause_on_oops) 572dd287796SAndrew Morton return; 573dd287796SAndrew Morton 574dd287796SAndrew Morton spin_lock_irqsave(&pause_on_oops_lock, flags); 575dd287796SAndrew Morton if (pause_on_oops_flag == 0) { 576dd287796SAndrew Morton /* This CPU may now print the oops message */ 577dd287796SAndrew Morton pause_on_oops_flag = 1; 578dd287796SAndrew Morton } else { 579dd287796SAndrew Morton /* We need to stall this CPU */ 580dd287796SAndrew Morton if (!spin_counter) { 581dd287796SAndrew Morton /* This CPU gets to do the counting */ 582dd287796SAndrew Morton spin_counter = pause_on_oops; 583dd287796SAndrew Morton do { 584dd287796SAndrew Morton spin_unlock(&pause_on_oops_lock); 585dd287796SAndrew Morton spin_msec(MSEC_PER_SEC); 586dd287796SAndrew Morton spin_lock(&pause_on_oops_lock); 587dd287796SAndrew Morton } while (--spin_counter); 588dd287796SAndrew Morton pause_on_oops_flag = 0; 589dd287796SAndrew Morton } else { 590dd287796SAndrew Morton /* This CPU waits for a different one */ 591dd287796SAndrew Morton while (spin_counter) { 592dd287796SAndrew Morton spin_unlock(&pause_on_oops_lock); 593dd287796SAndrew Morton spin_msec(1); 594dd287796SAndrew Morton spin_lock(&pause_on_oops_lock); 595dd287796SAndrew Morton } 596dd287796SAndrew Morton } 597dd287796SAndrew Morton } 598dd287796SAndrew Morton spin_unlock_irqrestore(&pause_on_oops_lock, flags); 599dd287796SAndrew Morton } 600dd287796SAndrew Morton 601dd287796SAndrew Morton /* 602c95dbf27SIngo Molnar * Return true if the calling CPU is allowed to print oops-related info. 603c95dbf27SIngo Molnar * This is a bit racy.. 604dd287796SAndrew Morton */ 60579076e12STiezhu Yang bool oops_may_print(void) 606dd287796SAndrew Morton { 607dd287796SAndrew Morton return pause_on_oops_flag == 0; 608dd287796SAndrew Morton } 609dd287796SAndrew Morton 610dd287796SAndrew Morton /* 611dd287796SAndrew Morton * Called when the architecture enters its oops handler, before it prints 612c95dbf27SIngo Molnar * anything. If this is the first CPU to oops, and it's oopsing the first 613c95dbf27SIngo Molnar * time then let it proceed. 614dd287796SAndrew Morton * 615c95dbf27SIngo Molnar * This is all enabled by the pause_on_oops kernel boot option. We do all 616c95dbf27SIngo Molnar * this to ensure that oopses don't scroll off the screen. It has the 617c95dbf27SIngo Molnar * side-effect of preventing later-oopsing CPUs from mucking up the display, 618c95dbf27SIngo Molnar * too. 619dd287796SAndrew Morton * 620c95dbf27SIngo Molnar * It turns out that the CPU which is allowed to print ends up pausing for 621c95dbf27SIngo Molnar * the right duration, whereas all the other CPUs pause for twice as long: 622c95dbf27SIngo Molnar * once in oops_enter(), once in oops_exit(). 623dd287796SAndrew Morton */ 624dd287796SAndrew Morton void oops_enter(void) 625dd287796SAndrew Morton { 626bdff7870SThomas Gleixner tracing_off(); 627c95dbf27SIngo Molnar /* can't trust the integrity of the kernel anymore: */ 628c95dbf27SIngo Molnar debug_locks_off(); 629dd287796SAndrew Morton do_oops_enter_exit(); 63060c958d8SGuilherme G. Piccoli 63160c958d8SGuilherme G. Piccoli if (sysctl_oops_all_cpu_backtrace) 63260c958d8SGuilherme G. Piccoli trigger_all_cpu_backtrace(); 633dd287796SAndrew Morton } 634dd287796SAndrew Morton 63563037f74SYue Hu static void print_oops_end_marker(void) 63671c33911SArjan van de Ven { 637e83a4472SSebastian Andrzej Siewior pr_warn("---[ end trace %016llx ]---\n", 0ULL); 63871c33911SArjan van de Ven } 63971c33911SArjan van de Ven 6402c3b20e9SArjan van de Ven /* 641dd287796SAndrew Morton * Called when the architecture exits its oops handler, after printing 642dd287796SAndrew Morton * everything. 643dd287796SAndrew Morton */ 644dd287796SAndrew Morton void oops_exit(void) 645dd287796SAndrew Morton { 646dd287796SAndrew Morton do_oops_enter_exit(); 64771c33911SArjan van de Ven print_oops_end_marker(); 648456b565cSSimon Kagstrom kmsg_dump(KMSG_DUMP_OOPS); 649dd287796SAndrew Morton } 6503162f751SArjan van de Ven 6512553b67aSJosh Poimboeuf struct warn_args { 6520f6f49a8SLinus Torvalds const char *fmt; 653a8f18b90SArjan van de Ven va_list args; 6540f6f49a8SLinus Torvalds }; 6550f6f49a8SLinus Torvalds 6562553b67aSJosh Poimboeuf void __warn(const char *file, int line, void *caller, unsigned taint, 6572553b67aSJosh Poimboeuf struct pt_regs *regs, struct warn_args *args) 6580f6f49a8SLinus Torvalds { 659de7edd31SSteven Rostedt (Red Hat) disable_trace_on_warning(); 660de7edd31SSteven Rostedt (Red Hat) 6612553b67aSJosh Poimboeuf if (file) 6622553b67aSJosh Poimboeuf pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", 6632553b67aSJosh Poimboeuf raw_smp_processor_id(), current->pid, file, line, 6642553b67aSJosh Poimboeuf caller); 6652553b67aSJosh Poimboeuf else 6662553b67aSJosh Poimboeuf pr_warn("WARNING: CPU: %d PID: %d at %pS\n", 6672553b67aSJosh Poimboeuf raw_smp_processor_id(), current->pid, caller); 66874853dbaSArjan van de Ven 6690f6f49a8SLinus Torvalds if (args) 6700f6f49a8SLinus Torvalds vprintk(args->fmt, args->args); 671a8f18b90SArjan van de Ven 6723f388f28SAlexey Kardashevskiy print_modules(); 6733f388f28SAlexey Kardashevskiy 6743f388f28SAlexey Kardashevskiy if (regs) 6753f388f28SAlexey Kardashevskiy show_regs(regs); 6763f388f28SAlexey Kardashevskiy 67779cc1ba7SKees Cook check_panic_on_warn("kernel"); 6789e3961a0SPrarit Bhargava 6792f31ad64SChristophe Leroy if (!regs) 680a8f18b90SArjan van de Ven dump_stack(); 6812553b67aSJosh Poimboeuf 6824c281074SSteven Rostedt (VMware) print_irqtrace_events(current); 6834c281074SSteven Rostedt (VMware) 684a8f18b90SArjan van de Ven print_oops_end_marker(); 68523b36fecSMarco Elver trace_error_report_end(ERROR_DETECTOR_WARN, (unsigned long)caller); 6862553b67aSJosh Poimboeuf 687373d4d09SRusty Russell /* Just a warning, don't kill lockdep. */ 688373d4d09SRusty Russell add_taint(taint, LOCKDEP_STILL_OK); 689a8f18b90SArjan van de Ven } 6900f6f49a8SLinus Torvalds 691525bb813SArnd Bergmann #ifdef CONFIG_BUG 6922da1ead4SKees Cook #ifndef __WARN_FLAGS 693ee871133SKees Cook void warn_slowpath_fmt(const char *file, int line, unsigned taint, 694ee871133SKees Cook const char *fmt, ...) 695b2be0527SBen Hutchings { 6965a5d7e9bSPeter Zijlstra bool rcu = warn_rcu_enter(); 6972553b67aSJosh Poimboeuf struct warn_args args; 698b2be0527SBen Hutchings 699f2f84b05SKees Cook pr_warn(CUT_HERE); 700d38aba49SKees Cook 701d38aba49SKees Cook if (!fmt) { 702f2f84b05SKees Cook __warn(file, line, __builtin_return_address(0), taint, 703f2f84b05SKees Cook NULL, NULL); 704cccd3281SLukas Wunner warn_rcu_exit(rcu); 705f2f84b05SKees Cook return; 706f2f84b05SKees Cook } 707f2f84b05SKees Cook 708b2be0527SBen Hutchings args.fmt = fmt; 709b2be0527SBen Hutchings va_start(args.args, fmt); 7102553b67aSJosh Poimboeuf __warn(file, line, __builtin_return_address(0), taint, NULL, &args); 711b2be0527SBen Hutchings va_end(args.args); 7125a5d7e9bSPeter Zijlstra warn_rcu_exit(rcu); 713b2be0527SBen Hutchings } 714ee871133SKees Cook EXPORT_SYMBOL(warn_slowpath_fmt); 715a7bed27aSKees Cook #else 716a7bed27aSKees Cook void __warn_printk(const char *fmt, ...) 717a7bed27aSKees Cook { 7185a5d7e9bSPeter Zijlstra bool rcu = warn_rcu_enter(); 719a7bed27aSKees Cook va_list args; 720a7bed27aSKees Cook 721a7bed27aSKees Cook pr_warn(CUT_HERE); 722a7bed27aSKees Cook 723a7bed27aSKees Cook va_start(args, fmt); 724a7bed27aSKees Cook vprintk(fmt, args); 725a7bed27aSKees Cook va_end(args); 7265a5d7e9bSPeter Zijlstra warn_rcu_exit(rcu); 727a7bed27aSKees Cook } 728a7bed27aSKees Cook EXPORT_SYMBOL(__warn_printk); 72979b4cc5eSArjan van de Ven #endif 73079b4cc5eSArjan van de Ven 731b1fca27dSAndi Kleen /* Support resetting WARN*_ONCE state */ 732b1fca27dSAndi Kleen 733b1fca27dSAndi Kleen static int clear_warn_once_set(void *data, u64 val) 734b1fca27dSAndi Kleen { 735aaf5dcfbSAndi Kleen generic_bug_clear_once(); 736b1fca27dSAndi Kleen memset(__start_once, 0, __end_once - __start_once); 737b1fca27dSAndi Kleen return 0; 738b1fca27dSAndi Kleen } 739b1fca27dSAndi Kleen 7404169680eSYueHaibing DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set, 741b1fca27dSAndi Kleen "%lld\n"); 742b1fca27dSAndi Kleen 743b1fca27dSAndi Kleen static __init int register_warn_debugfs(void) 744b1fca27dSAndi Kleen { 745b1fca27dSAndi Kleen /* Don't care about failure */ 7464169680eSYueHaibing debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL, 7474169680eSYueHaibing &clear_warn_once_fops); 748b1fca27dSAndi Kleen return 0; 749b1fca27dSAndi Kleen } 750b1fca27dSAndi Kleen 751b1fca27dSAndi Kleen device_initcall(register_warn_debugfs); 752b1fca27dSAndi Kleen #endif 753b1fca27dSAndi Kleen 754050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 75554371a43SArjan van de Ven 7563162f751SArjan van de Ven /* 7573162f751SArjan van de Ven * Called when gcc's -fstack-protector feature is used, and 7583162f751SArjan van de Ven * gcc detects corruption of the on-stack canary value 7593162f751SArjan van de Ven */ 7605916d5f9SThomas Gleixner __visible noinstr void __stack_chk_fail(void) 7613162f751SArjan van de Ven { 7625916d5f9SThomas Gleixner instrumentation_begin(); 76395c4fb78SBorislav Petkov panic("stack-protector: Kernel stack is corrupted in: %pB", 764517a92c4SIngo Molnar __builtin_return_address(0)); 7655916d5f9SThomas Gleixner instrumentation_end(); 7663162f751SArjan van de Ven } 7673162f751SArjan van de Ven EXPORT_SYMBOL(__stack_chk_fail); 76854371a43SArjan van de Ven 7693162f751SArjan van de Ven #endif 770f44dd164SRusty Russell 771f44dd164SRusty Russell core_param(panic, panic_timeout, int, 0644); 772d999bd93SFeng Tang core_param(panic_print, panic_print, ulong, 0644); 773f44dd164SRusty Russell core_param(pause_on_oops, pause_on_oops, int, 0644); 7749e3961a0SPrarit Bhargava core_param(panic_on_warn, panic_on_warn, int, 0644); 775b26e27ddSHidehiro Kawai core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644); 776f06e5153SMasami Hiramatsu 777d404ab0aSOlaf Hering static int __init oops_setup(char *s) 778d404ab0aSOlaf Hering { 779d404ab0aSOlaf Hering if (!s) 780d404ab0aSOlaf Hering return -EINVAL; 781d404ab0aSOlaf Hering if (!strcmp(s, "panic")) 782d404ab0aSOlaf Hering panic_on_oops = 1; 783d404ab0aSOlaf Hering return 0; 784d404ab0aSOlaf Hering } 785d404ab0aSOlaf Hering early_param("oops", oops_setup); 786db38d5c1SRafael Aquini 787db38d5c1SRafael Aquini static int __init panic_on_taint_setup(char *s) 788db38d5c1SRafael Aquini { 789db38d5c1SRafael Aquini char *taint_str; 790db38d5c1SRafael Aquini 791db38d5c1SRafael Aquini if (!s) 792db38d5c1SRafael Aquini return -EINVAL; 793db38d5c1SRafael Aquini 794db38d5c1SRafael Aquini taint_str = strsep(&s, ","); 795db38d5c1SRafael Aquini if (kstrtoul(taint_str, 16, &panic_on_taint)) 796db38d5c1SRafael Aquini return -EINVAL; 797db38d5c1SRafael Aquini 798db38d5c1SRafael Aquini /* make sure panic_on_taint doesn't hold out-of-range TAINT flags */ 799db38d5c1SRafael Aquini panic_on_taint &= TAINT_FLAGS_MAX; 800db38d5c1SRafael Aquini 801db38d5c1SRafael Aquini if (!panic_on_taint) 802db38d5c1SRafael Aquini return -EINVAL; 803db38d5c1SRafael Aquini 804db38d5c1SRafael Aquini if (s && !strcmp(s, "nousertaint")) 805db38d5c1SRafael Aquini panic_on_taint_nousertaint = true; 806db38d5c1SRafael Aquini 8075d5dd3e4SAndy Shevchenko pr_info("panic_on_taint: bitmask=0x%lx nousertaint_mode=%s\n", 8085d5dd3e4SAndy Shevchenko panic_on_taint, str_enabled_disabled(panic_on_taint_nousertaint)); 809db38d5c1SRafael Aquini 810db38d5c1SRafael Aquini return 0; 811db38d5c1SRafael Aquini } 812db38d5c1SRafael Aquini early_param("panic_on_taint", panic_on_taint_setup); 813