1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_ENTRYCOMMON_H 3 #define __LINUX_ENTRYCOMMON_H 4 5 #include <linux/static_call_types.h> 6 #include <linux/ptrace.h> 7 #include <linux/syscalls.h> 8 #include <linux/seccomp.h> 9 #include <linux/sched.h> 10 #include <linux/context_tracking.h> 11 #include <linux/livepatch.h> 12 #include <linux/resume_user_mode.h> 13 #include <linux/tick.h> 14 #include <linux/kmsan.h> 15 16 #include <asm/entry-common.h> 17 18 /* 19 * Define dummy _TIF work flags if not defined by the architecture or for 20 * disabled functionality. 21 */ 22 #ifndef _TIF_PATCH_PENDING 23 # define _TIF_PATCH_PENDING (0) 24 #endif 25 26 #ifndef _TIF_UPROBE 27 # define _TIF_UPROBE (0) 28 #endif 29 30 /* 31 * SYSCALL_WORK flags handled in syscall_enter_from_user_mode() 32 */ 33 #ifndef ARCH_SYSCALL_WORK_ENTER 34 # define ARCH_SYSCALL_WORK_ENTER (0) 35 #endif 36 37 /* 38 * SYSCALL_WORK flags handled in syscall_exit_to_user_mode() 39 */ 40 #ifndef ARCH_SYSCALL_WORK_EXIT 41 # define ARCH_SYSCALL_WORK_EXIT (0) 42 #endif 43 44 #define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \ 45 SYSCALL_WORK_SYSCALL_TRACEPOINT | \ 46 SYSCALL_WORK_SYSCALL_TRACE | \ 47 SYSCALL_WORK_SYSCALL_EMU | \ 48 SYSCALL_WORK_SYSCALL_AUDIT | \ 49 SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ 50 ARCH_SYSCALL_WORK_ENTER) 51 #define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \ 52 SYSCALL_WORK_SYSCALL_TRACE | \ 53 SYSCALL_WORK_SYSCALL_AUDIT | \ 54 SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ 55 SYSCALL_WORK_SYSCALL_EXIT_TRAP | \ 56 ARCH_SYSCALL_WORK_EXIT) 57 58 /* 59 * TIF flags handled in exit_to_user_mode_loop() 60 */ 61 #ifndef ARCH_EXIT_TO_USER_MODE_WORK 62 # define ARCH_EXIT_TO_USER_MODE_WORK (0) 63 #endif 64 65 #define EXIT_TO_USER_MODE_WORK \ 66 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ 67 _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ 68 ARCH_EXIT_TO_USER_MODE_WORK) 69 70 /** 71 * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs 72 * @regs: Pointer to currents pt_regs 73 * 74 * Defaults to an empty implementation. Can be replaced by architecture 75 * specific code. 76 * 77 * Invoked from syscall_enter_from_user_mode() in the non-instrumentable 78 * section. Use __always_inline so the compiler cannot push it out of line 79 * and make it instrumentable. 80 */ 81 static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs); 82 83 #ifndef arch_enter_from_user_mode 84 static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {} 85 #endif 86 87 /** 88 * enter_from_user_mode - Establish state when coming from user mode 89 * 90 * Syscall/interrupt entry disables interrupts, but user mode is traced as 91 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. 92 * 93 * 1) Tell lockdep that interrupts are disabled 94 * 2) Invoke context tracking if enabled to reactivate RCU 95 * 3) Trace interrupts off state 96 * 97 * Invoked from architecture specific syscall entry code with interrupts 98 * disabled. The calling code has to be non-instrumentable. When the 99 * function returns all state is correct and interrupts are still 100 * disabled. The subsequent functions can be instrumented. 101 * 102 * This is invoked when there is architecture specific functionality to be 103 * done between establishing state and enabling interrupts. The caller must 104 * enable interrupts before invoking syscall_enter_from_user_mode_work(). 105 */ 106 static __always_inline void enter_from_user_mode(struct pt_regs *regs) 107 { 108 arch_enter_from_user_mode(regs); 109 lockdep_hardirqs_off(CALLER_ADDR0); 110 111 CT_WARN_ON(__ct_state() != CONTEXT_USER); 112 user_exit_irqoff(); 113 114 instrumentation_begin(); 115 kmsan_unpoison_entry_regs(regs); 116 trace_hardirqs_off_finish(); 117 instrumentation_end(); 118 } 119 120 /** 121 * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts 122 * @regs: Pointer to currents pt_regs 123 * 124 * Invoked from architecture specific syscall entry code with interrupts 125 * disabled. The calling code has to be non-instrumentable. When the 126 * function returns all state is correct, interrupts are enabled and the 127 * subsequent functions can be instrumented. 128 * 129 * This handles lockdep, RCU (context tracking) and tracing state, i.e. 130 * the functionality provided by enter_from_user_mode(). 131 * 132 * This is invoked when there is extra architecture specific functionality 133 * to be done between establishing state and handling user mode entry work. 134 */ 135 void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); 136 137 /** 138 * syscall_enter_from_user_mode_work - Check and handle work before invoking 139 * a syscall 140 * @regs: Pointer to currents pt_regs 141 * @syscall: The syscall number 142 * 143 * Invoked from architecture specific syscall entry code with interrupts 144 * enabled after invoking syscall_enter_from_user_mode_prepare() and extra 145 * architecture specific work. 146 * 147 * Returns: The original or a modified syscall number 148 * 149 * If the returned syscall number is -1 then the syscall should be 150 * skipped. In this case the caller may invoke syscall_set_error() or 151 * syscall_set_return_value() first. If neither of those are called and -1 152 * is returned, then the syscall will fail with ENOSYS. 153 * 154 * It handles the following work items: 155 * 156 * 1) syscall_work flag dependent invocations of 157 * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter() 158 * 2) Invocation of audit_syscall_entry() 159 */ 160 long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); 161 162 /** 163 * syscall_enter_from_user_mode - Establish state and check and handle work 164 * before invoking a syscall 165 * @regs: Pointer to currents pt_regs 166 * @syscall: The syscall number 167 * 168 * Invoked from architecture specific syscall entry code with interrupts 169 * disabled. The calling code has to be non-instrumentable. When the 170 * function returns all state is correct, interrupts are enabled and the 171 * subsequent functions can be instrumented. 172 * 173 * This is combination of syscall_enter_from_user_mode_prepare() and 174 * syscall_enter_from_user_mode_work(). 175 * 176 * Returns: The original or a modified syscall number. See 177 * syscall_enter_from_user_mode_work() for further explanation. 178 */ 179 long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); 180 181 /** 182 * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() 183 * @ti_work: Cached TIF flags gathered with interrupts disabled 184 * 185 * Defaults to local_irq_enable(). Can be supplied by architecture specific 186 * code. 187 */ 188 static inline void local_irq_enable_exit_to_user(unsigned long ti_work); 189 190 #ifndef local_irq_enable_exit_to_user 191 static inline void local_irq_enable_exit_to_user(unsigned long ti_work) 192 { 193 local_irq_enable(); 194 } 195 #endif 196 197 /** 198 * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() 199 * 200 * Defaults to local_irq_disable(). Can be supplied by architecture specific 201 * code. 202 */ 203 static inline void local_irq_disable_exit_to_user(void); 204 205 #ifndef local_irq_disable_exit_to_user 206 static inline void local_irq_disable_exit_to_user(void) 207 { 208 local_irq_disable(); 209 } 210 #endif 211 212 /** 213 * arch_exit_to_user_mode_work - Architecture specific TIF work for exit 214 * to user mode. 215 * @regs: Pointer to currents pt_regs 216 * @ti_work: Cached TIF flags gathered with interrupts disabled 217 * 218 * Invoked from exit_to_user_mode_loop() with interrupt enabled 219 * 220 * Defaults to NOOP. Can be supplied by architecture specific code. 221 */ 222 static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 223 unsigned long ti_work); 224 225 #ifndef arch_exit_to_user_mode_work 226 static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 227 unsigned long ti_work) 228 { 229 } 230 #endif 231 232 /** 233 * arch_exit_to_user_mode_prepare - Architecture specific preparation for 234 * exit to user mode. 235 * @regs: Pointer to currents pt_regs 236 * @ti_work: Cached TIF flags gathered with interrupts disabled 237 * 238 * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last 239 * function before return. Defaults to NOOP. 240 */ 241 static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 242 unsigned long ti_work); 243 244 #ifndef arch_exit_to_user_mode_prepare 245 static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 246 unsigned long ti_work) 247 { 248 } 249 #endif 250 251 /** 252 * arch_exit_to_user_mode - Architecture specific final work before 253 * exit to user mode. 254 * 255 * Invoked from exit_to_user_mode() with interrupt disabled as the last 256 * function before return. Defaults to NOOP. 257 * 258 * This needs to be __always_inline because it is non-instrumentable code 259 * invoked after context tracking switched to user mode. 260 * 261 * An architecture implementation must not do anything complex, no locking 262 * etc. The main purpose is for speculation mitigations. 263 */ 264 static __always_inline void arch_exit_to_user_mode(void); 265 266 #ifndef arch_exit_to_user_mode 267 static __always_inline void arch_exit_to_user_mode(void) { } 268 #endif 269 270 /** 271 * arch_do_signal_or_restart - Architecture specific signal delivery function 272 * @regs: Pointer to currents pt_regs 273 * 274 * Invoked from exit_to_user_mode_loop(). 275 */ 276 void arch_do_signal_or_restart(struct pt_regs *regs); 277 278 /** 279 * exit_to_user_mode_loop - do any pending work before leaving to user space 280 */ 281 unsigned long exit_to_user_mode_loop(struct pt_regs *regs, 282 unsigned long ti_work); 283 284 /** 285 * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required 286 * @regs: Pointer to pt_regs on entry stack 287 * 288 * 1) check that interrupts are disabled 289 * 2) call tick_nohz_user_enter_prepare() 290 * 3) call exit_to_user_mode_loop() if any flags from 291 * EXIT_TO_USER_MODE_WORK are set 292 * 4) check that interrupts are still disabled 293 */ 294 static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) 295 { 296 unsigned long ti_work; 297 298 lockdep_assert_irqs_disabled(); 299 300 /* Flush pending rcuog wakeup before the last need_resched() check */ 301 tick_nohz_user_enter_prepare(); 302 303 ti_work = read_thread_flags(); 304 if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) 305 ti_work = exit_to_user_mode_loop(regs, ti_work); 306 307 arch_exit_to_user_mode_prepare(regs, ti_work); 308 309 /* Ensure that kernel state is sane for a return to userspace */ 310 kmap_assert_nomap(); 311 lockdep_assert_irqs_disabled(); 312 lockdep_sys_exit(); 313 } 314 315 /** 316 * exit_to_user_mode - Fixup state when exiting to user mode 317 * 318 * Syscall/interrupt exit enables interrupts, but the kernel state is 319 * interrupts disabled when this is invoked. Also tell RCU about it. 320 * 321 * 1) Trace interrupts on state 322 * 2) Invoke context tracking if enabled to adjust RCU state 323 * 3) Invoke architecture specific last minute exit code, e.g. speculation 324 * mitigations, etc.: arch_exit_to_user_mode() 325 * 4) Tell lockdep that interrupts are enabled 326 * 327 * Invoked from architecture specific code when syscall_exit_to_user_mode() 328 * is not suitable as the last step before returning to userspace. Must be 329 * invoked with interrupts disabled and the caller must be 330 * non-instrumentable. 331 * The caller has to invoke syscall_exit_to_user_mode_work() before this. 332 */ 333 static __always_inline void exit_to_user_mode(void) 334 { 335 instrumentation_begin(); 336 trace_hardirqs_on_prepare(); 337 lockdep_hardirqs_on_prepare(); 338 instrumentation_end(); 339 340 user_enter_irqoff(); 341 arch_exit_to_user_mode(); 342 lockdep_hardirqs_on(CALLER_ADDR0); 343 } 344 345 /** 346 * syscall_exit_to_user_mode_work - Handle work before returning to user mode 347 * @regs: Pointer to currents pt_regs 348 * 349 * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling 350 * exit_to_user_mode() to perform the final transition to user mode. 351 * 352 * Calling convention is the same as for syscall_exit_to_user_mode() and it 353 * returns with all work handled and interrupts disabled. The caller must 354 * invoke exit_to_user_mode() before actually switching to user mode to 355 * make the final state transitions. Interrupts must stay disabled between 356 * return from this function and the invocation of exit_to_user_mode(). 357 */ 358 void syscall_exit_to_user_mode_work(struct pt_regs *regs); 359 360 /** 361 * syscall_exit_to_user_mode - Handle work before returning to user mode 362 * @regs: Pointer to currents pt_regs 363 * 364 * Invoked with interrupts enabled and fully valid regs. Returns with all 365 * work handled, interrupts disabled such that the caller can immediately 366 * switch to user mode. Called from architecture specific syscall and ret 367 * from fork code. 368 * 369 * The call order is: 370 * 1) One-time syscall exit work: 371 * - rseq syscall exit 372 * - audit 373 * - syscall tracing 374 * - ptrace (single stepping) 375 * 376 * 2) Preparatory work 377 * - Exit to user mode loop (common TIF handling). Invokes 378 * arch_exit_to_user_mode_work() for architecture specific TIF work 379 * - Architecture specific one time work arch_exit_to_user_mode_prepare() 380 * - Address limit and lockdep checks 381 * 382 * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the 383 * functionality in exit_to_user_mode(). 384 * 385 * This is a combination of syscall_exit_to_user_mode_work() (1,2) and 386 * exit_to_user_mode(). This function is preferred unless there is a 387 * compelling architectural reason to use the separate functions. 388 */ 389 void syscall_exit_to_user_mode(struct pt_regs *regs); 390 391 /** 392 * irqentry_enter_from_user_mode - Establish state before invoking the irq handler 393 * @regs: Pointer to currents pt_regs 394 * 395 * Invoked from architecture specific entry code with interrupts disabled. 396 * Can only be called when the interrupt entry came from user mode. The 397 * calling code must be non-instrumentable. When the function returns all 398 * state is correct and the subsequent functions can be instrumented. 399 * 400 * The function establishes state (lockdep, RCU (context tracking), tracing) 401 */ 402 void irqentry_enter_from_user_mode(struct pt_regs *regs); 403 404 /** 405 * irqentry_exit_to_user_mode - Interrupt exit work 406 * @regs: Pointer to current's pt_regs 407 * 408 * Invoked with interrupts disabled and fully valid regs. Returns with all 409 * work handled, interrupts disabled such that the caller can immediately 410 * switch to user mode. Called from architecture specific interrupt 411 * handling code. 412 * 413 * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). 414 * Interrupt exit is not invoking #1 which is the syscall specific one time 415 * work. 416 */ 417 void irqentry_exit_to_user_mode(struct pt_regs *regs); 418 419 #ifndef irqentry_state 420 /** 421 * struct irqentry_state - Opaque object for exception state storage 422 * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the 423 * exit path has to invoke ct_irq_exit(). 424 * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that 425 * lockdep state is restored correctly on exit from nmi. 426 * 427 * This opaque object is filled in by the irqentry_*_enter() functions and 428 * must be passed back into the corresponding irqentry_*_exit() functions 429 * when the exception is complete. 430 * 431 * Callers of irqentry_*_[enter|exit]() must consider this structure opaque 432 * and all members private. Descriptions of the members are provided to aid in 433 * the maintenance of the irqentry_*() functions. 434 */ 435 typedef struct irqentry_state { 436 union { 437 bool exit_rcu; 438 bool lockdep; 439 }; 440 } irqentry_state_t; 441 #endif 442 443 /** 444 * irqentry_enter - Handle state tracking on ordinary interrupt entries 445 * @regs: Pointer to pt_regs of interrupted context 446 * 447 * Invokes: 448 * - lockdep irqflag state tracking as low level ASM entry disabled 449 * interrupts. 450 * 451 * - Context tracking if the exception hit user mode. 452 * 453 * - The hardirq tracer to keep the state consistent as low level ASM 454 * entry disabled interrupts. 455 * 456 * As a precondition, this requires that the entry came from user mode, 457 * idle, or a kernel context in which RCU is watching. 458 * 459 * For kernel mode entries RCU handling is done conditional. If RCU is 460 * watching then the only RCU requirement is to check whether the tick has 461 * to be restarted. If RCU is not watching then ct_irq_enter() has to be 462 * invoked on entry and ct_irq_exit() on exit. 463 * 464 * Avoiding the ct_irq_enter/exit() calls is an optimization but also 465 * solves the problem of kernel mode pagefaults which can schedule, which 466 * is not possible after invoking ct_irq_enter() without undoing it. 467 * 468 * For user mode entries irqentry_enter_from_user_mode() is invoked to 469 * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit 470 * would not be possible. 471 * 472 * Returns: An opaque object that must be passed to idtentry_exit() 473 */ 474 irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); 475 476 /** 477 * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt 478 * 479 * Conditional reschedule with additional sanity checks. 480 */ 481 void raw_irqentry_exit_cond_resched(void); 482 #ifdef CONFIG_PREEMPT_DYNAMIC 483 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 484 #define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched 485 #define irqentry_exit_cond_resched_dynamic_disabled NULL 486 DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); 487 #define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)() 488 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 489 DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); 490 void dynamic_irqentry_exit_cond_resched(void); 491 #define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched() 492 #endif 493 #else /* CONFIG_PREEMPT_DYNAMIC */ 494 #define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() 495 #endif /* CONFIG_PREEMPT_DYNAMIC */ 496 497 /** 498 * irqentry_exit - Handle return from exception that used irqentry_enter() 499 * @regs: Pointer to pt_regs (exception entry regs) 500 * @state: Return value from matching call to irqentry_enter() 501 * 502 * Depending on the return target (kernel/user) this runs the necessary 503 * preemption and work checks if possible and required and returns to 504 * the caller with interrupts disabled and no further work pending. 505 * 506 * This is the last action before returning to the low level ASM code which 507 * just needs to return to the appropriate context. 508 * 509 * Counterpart to irqentry_enter(). 510 */ 511 void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); 512 513 /** 514 * irqentry_nmi_enter - Handle NMI entry 515 * @regs: Pointer to currents pt_regs 516 * 517 * Similar to irqentry_enter() but taking care of the NMI constraints. 518 */ 519 irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs); 520 521 /** 522 * irqentry_nmi_exit - Handle return from NMI handling 523 * @regs: Pointer to pt_regs (NMI entry regs) 524 * @irq_state: Return value from matching call to irqentry_nmi_enter() 525 * 526 * Last action before returning to the low level assembly code. 527 * 528 * Counterpart to irqentry_nmi_enter(). 529 */ 530 void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state); 531 532 #endif 533