1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_ENTRYCOMMON_H 3 #define __LINUX_ENTRYCOMMON_H 4 5 #include <linux/tracehook.h> 6 #include <linux/syscalls.h> 7 #include <linux/seccomp.h> 8 #include <linux/sched.h> 9 10 #include <asm/entry-common.h> 11 12 /* 13 * Define dummy _TIF work flags if not defined by the architecture or for 14 * disabled functionality. 15 */ 16 #ifndef _TIF_PATCH_PENDING 17 # define _TIF_PATCH_PENDING (0) 18 #endif 19 20 #ifndef _TIF_UPROBE 21 # define _TIF_UPROBE (0) 22 #endif 23 24 #ifndef _TIF_NOTIFY_SIGNAL 25 # define _TIF_NOTIFY_SIGNAL (0) 26 #endif 27 28 /* 29 * SYSCALL_WORK flags handled in syscall_enter_from_user_mode() 30 */ 31 #ifndef ARCH_SYSCALL_WORK_ENTER 32 # define ARCH_SYSCALL_WORK_ENTER (0) 33 #endif 34 35 /* 36 * SYSCALL_WORK flags handled in syscall_exit_to_user_mode() 37 */ 38 #ifndef ARCH_SYSCALL_WORK_EXIT 39 # define ARCH_SYSCALL_WORK_EXIT (0) 40 #endif 41 42 #define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \ 43 SYSCALL_WORK_SYSCALL_TRACEPOINT | \ 44 SYSCALL_WORK_SYSCALL_TRACE | \ 45 SYSCALL_WORK_SYSCALL_EMU | \ 46 SYSCALL_WORK_SYSCALL_AUDIT | \ 47 ARCH_SYSCALL_WORK_ENTER) 48 #define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \ 49 SYSCALL_WORK_SYSCALL_TRACE | \ 50 SYSCALL_WORK_SYSCALL_AUDIT | \ 51 ARCH_SYSCALL_WORK_EXIT) 52 53 /* 54 * TIF flags handled in exit_to_user_mode_loop() 55 */ 56 #ifndef ARCH_EXIT_TO_USER_MODE_WORK 57 # define ARCH_EXIT_TO_USER_MODE_WORK (0) 58 #endif 59 60 #define EXIT_TO_USER_MODE_WORK \ 61 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ 62 _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ 63 ARCH_EXIT_TO_USER_MODE_WORK) 64 65 /** 66 * arch_check_user_regs - Architecture specific sanity check for user mode regs 67 * @regs: Pointer to currents pt_regs 68 * 69 * Defaults to an empty implementation. Can be replaced by architecture 70 * specific code. 71 * 72 * Invoked from syscall_enter_from_user_mode() in the non-instrumentable 73 * section. Use __always_inline so the compiler cannot push it out of line 74 * and make it instrumentable. 75 */ 76 static __always_inline void arch_check_user_regs(struct pt_regs *regs); 77 78 #ifndef arch_check_user_regs 79 static __always_inline void arch_check_user_regs(struct pt_regs *regs) {} 80 #endif 81 82 /** 83 * arch_syscall_enter_tracehook - Wrapper around tracehook_report_syscall_entry() 84 * @regs: Pointer to currents pt_regs 85 * 86 * Returns: 0 on success or an error code to skip the syscall. 87 * 88 * Defaults to tracehook_report_syscall_entry(). Can be replaced by 89 * architecture specific code. 90 * 91 * Invoked from syscall_enter_from_user_mode() 92 */ 93 static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs); 94 95 #ifndef arch_syscall_enter_tracehook 96 static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs) 97 { 98 return tracehook_report_syscall_entry(regs); 99 } 100 #endif 101 102 /** 103 * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts 104 * @regs: Pointer to currents pt_regs 105 * 106 * Invoked from architecture specific syscall entry code with interrupts 107 * disabled. The calling code has to be non-instrumentable. When the 108 * function returns all state is correct, interrupts are enabled and the 109 * subsequent functions can be instrumented. 110 * 111 * This handles lockdep, RCU (context tracking) and tracing state. 112 * 113 * This is invoked when there is extra architecture specific functionality 114 * to be done between establishing state and handling user mode entry work. 115 */ 116 void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); 117 118 /** 119 * syscall_enter_from_user_mode_work - Check and handle work before invoking 120 * a syscall 121 * @regs: Pointer to currents pt_regs 122 * @syscall: The syscall number 123 * 124 * Invoked from architecture specific syscall entry code with interrupts 125 * enabled after invoking syscall_enter_from_user_mode_prepare() and extra 126 * architecture specific work. 127 * 128 * Returns: The original or a modified syscall number 129 * 130 * If the returned syscall number is -1 then the syscall should be 131 * skipped. In this case the caller may invoke syscall_set_error() or 132 * syscall_set_return_value() first. If neither of those are called and -1 133 * is returned, then the syscall will fail with ENOSYS. 134 * 135 * It handles the following work items: 136 * 137 * 1) syscall_work flag dependent invocations of 138 * arch_syscall_enter_tracehook(), __secure_computing(), trace_sys_enter() 139 * 2) Invocation of audit_syscall_entry() 140 */ 141 long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); 142 143 /** 144 * syscall_enter_from_user_mode - Establish state and check and handle work 145 * before invoking a syscall 146 * @regs: Pointer to currents pt_regs 147 * @syscall: The syscall number 148 * 149 * Invoked from architecture specific syscall entry code with interrupts 150 * disabled. The calling code has to be non-instrumentable. When the 151 * function returns all state is correct, interrupts are enabled and the 152 * subsequent functions can be instrumented. 153 * 154 * This is combination of syscall_enter_from_user_mode_prepare() and 155 * syscall_enter_from_user_mode_work(). 156 * 157 * Returns: The original or a modified syscall number. See 158 * syscall_enter_from_user_mode_work() for further explanation. 159 */ 160 long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); 161 162 /** 163 * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() 164 * @ti_work: Cached TIF flags gathered with interrupts disabled 165 * 166 * Defaults to local_irq_enable(). Can be supplied by architecture specific 167 * code. 168 */ 169 static inline void local_irq_enable_exit_to_user(unsigned long ti_work); 170 171 #ifndef local_irq_enable_exit_to_user 172 static inline void local_irq_enable_exit_to_user(unsigned long ti_work) 173 { 174 local_irq_enable(); 175 } 176 #endif 177 178 /** 179 * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() 180 * 181 * Defaults to local_irq_disable(). Can be supplied by architecture specific 182 * code. 183 */ 184 static inline void local_irq_disable_exit_to_user(void); 185 186 #ifndef local_irq_disable_exit_to_user 187 static inline void local_irq_disable_exit_to_user(void) 188 { 189 local_irq_disable(); 190 } 191 #endif 192 193 /** 194 * arch_exit_to_user_mode_work - Architecture specific TIF work for exit 195 * to user mode. 196 * @regs: Pointer to currents pt_regs 197 * @ti_work: Cached TIF flags gathered with interrupts disabled 198 * 199 * Invoked from exit_to_user_mode_loop() with interrupt enabled 200 * 201 * Defaults to NOOP. Can be supplied by architecture specific code. 202 */ 203 static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 204 unsigned long ti_work); 205 206 #ifndef arch_exit_to_user_mode_work 207 static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 208 unsigned long ti_work) 209 { 210 } 211 #endif 212 213 /** 214 * arch_exit_to_user_mode_prepare - Architecture specific preparation for 215 * exit to user mode. 216 * @regs: Pointer to currents pt_regs 217 * @ti_work: Cached TIF flags gathered with interrupts disabled 218 * 219 * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last 220 * function before return. Defaults to NOOP. 221 */ 222 static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 223 unsigned long ti_work); 224 225 #ifndef arch_exit_to_user_mode_prepare 226 static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 227 unsigned long ti_work) 228 { 229 } 230 #endif 231 232 /** 233 * arch_exit_to_user_mode - Architecture specific final work before 234 * exit to user mode. 235 * 236 * Invoked from exit_to_user_mode() with interrupt disabled as the last 237 * function before return. Defaults to NOOP. 238 * 239 * This needs to be __always_inline because it is non-instrumentable code 240 * invoked after context tracking switched to user mode. 241 * 242 * An architecture implementation must not do anything complex, no locking 243 * etc. The main purpose is for speculation mitigations. 244 */ 245 static __always_inline void arch_exit_to_user_mode(void); 246 247 #ifndef arch_exit_to_user_mode 248 static __always_inline void arch_exit_to_user_mode(void) { } 249 #endif 250 251 /** 252 * arch_do_signal_or_restart - Architecture specific signal delivery function 253 * @regs: Pointer to currents pt_regs 254 * @has_signal: actual signal to handle 255 * 256 * Invoked from exit_to_user_mode_loop(). 257 */ 258 void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal); 259 260 /** 261 * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit() 262 * @regs: Pointer to currents pt_regs 263 * @step: Indicator for single step 264 * 265 * Defaults to tracehook_report_syscall_exit(). Can be replaced by 266 * architecture specific code. 267 * 268 * Invoked from syscall_exit_to_user_mode() 269 */ 270 static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step); 271 272 #ifndef arch_syscall_exit_tracehook 273 static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step) 274 { 275 tracehook_report_syscall_exit(regs, step); 276 } 277 #endif 278 279 /** 280 * syscall_exit_to_user_mode - Handle work before returning to user mode 281 * @regs: Pointer to currents pt_regs 282 * 283 * Invoked with interrupts enabled and fully valid regs. Returns with all 284 * work handled, interrupts disabled such that the caller can immediately 285 * switch to user mode. Called from architecture specific syscall and ret 286 * from fork code. 287 * 288 * The call order is: 289 * 1) One-time syscall exit work: 290 * - rseq syscall exit 291 * - audit 292 * - syscall tracing 293 * - tracehook (single stepping) 294 * 295 * 2) Preparatory work 296 * - Exit to user mode loop (common TIF handling). Invokes 297 * arch_exit_to_user_mode_work() for architecture specific TIF work 298 * - Architecture specific one time work arch_exit_to_user_mode_prepare() 299 * - Address limit and lockdep checks 300 * 301 * 3) Final transition (lockdep, tracing, context tracking, RCU). Invokes 302 * arch_exit_to_user_mode() to handle e.g. speculation mitigations 303 */ 304 void syscall_exit_to_user_mode(struct pt_regs *regs); 305 306 /** 307 * irqentry_enter_from_user_mode - Establish state before invoking the irq handler 308 * @regs: Pointer to currents pt_regs 309 * 310 * Invoked from architecture specific entry code with interrupts disabled. 311 * Can only be called when the interrupt entry came from user mode. The 312 * calling code must be non-instrumentable. When the function returns all 313 * state is correct and the subsequent functions can be instrumented. 314 * 315 * The function establishes state (lockdep, RCU (context tracking), tracing) 316 */ 317 void irqentry_enter_from_user_mode(struct pt_regs *regs); 318 319 /** 320 * irqentry_exit_to_user_mode - Interrupt exit work 321 * @regs: Pointer to current's pt_regs 322 * 323 * Invoked with interrupts disbled and fully valid regs. Returns with all 324 * work handled, interrupts disabled such that the caller can immediately 325 * switch to user mode. Called from architecture specific interrupt 326 * handling code. 327 * 328 * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). 329 * Interrupt exit is not invoking #1 which is the syscall specific one time 330 * work. 331 */ 332 void irqentry_exit_to_user_mode(struct pt_regs *regs); 333 334 #ifndef irqentry_state 335 /** 336 * struct irqentry_state - Opaque object for exception state storage 337 * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the 338 * exit path has to invoke rcu_irq_exit(). 339 * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that 340 * lockdep state is restored correctly on exit from nmi. 341 * 342 * This opaque object is filled in by the irqentry_*_enter() functions and 343 * must be passed back into the corresponding irqentry_*_exit() functions 344 * when the exception is complete. 345 * 346 * Callers of irqentry_*_[enter|exit]() must consider this structure opaque 347 * and all members private. Descriptions of the members are provided to aid in 348 * the maintenance of the irqentry_*() functions. 349 */ 350 typedef struct irqentry_state { 351 union { 352 bool exit_rcu; 353 bool lockdep; 354 }; 355 } irqentry_state_t; 356 #endif 357 358 /** 359 * irqentry_enter - Handle state tracking on ordinary interrupt entries 360 * @regs: Pointer to pt_regs of interrupted context 361 * 362 * Invokes: 363 * - lockdep irqflag state tracking as low level ASM entry disabled 364 * interrupts. 365 * 366 * - Context tracking if the exception hit user mode. 367 * 368 * - The hardirq tracer to keep the state consistent as low level ASM 369 * entry disabled interrupts. 370 * 371 * As a precondition, this requires that the entry came from user mode, 372 * idle, or a kernel context in which RCU is watching. 373 * 374 * For kernel mode entries RCU handling is done conditional. If RCU is 375 * watching then the only RCU requirement is to check whether the tick has 376 * to be restarted. If RCU is not watching then rcu_irq_enter() has to be 377 * invoked on entry and rcu_irq_exit() on exit. 378 * 379 * Avoiding the rcu_irq_enter/exit() calls is an optimization but also 380 * solves the problem of kernel mode pagefaults which can schedule, which 381 * is not possible after invoking rcu_irq_enter() without undoing it. 382 * 383 * For user mode entries irqentry_enter_from_user_mode() is invoked to 384 * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit 385 * would not be possible. 386 * 387 * Returns: An opaque object that must be passed to idtentry_exit() 388 */ 389 irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); 390 391 /** 392 * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt 393 * 394 * Conditional reschedule with additional sanity checks. 395 */ 396 void irqentry_exit_cond_resched(void); 397 398 /** 399 * irqentry_exit - Handle return from exception that used irqentry_enter() 400 * @regs: Pointer to pt_regs (exception entry regs) 401 * @state: Return value from matching call to irqentry_enter() 402 * 403 * Depending on the return target (kernel/user) this runs the necessary 404 * preemption and work checks if possible and required and returns to 405 * the caller with interrupts disabled and no further work pending. 406 * 407 * This is the last action before returning to the low level ASM code which 408 * just needs to return to the appropriate context. 409 * 410 * Counterpart to irqentry_enter(). 411 */ 412 void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); 413 414 /** 415 * irqentry_nmi_enter - Handle NMI entry 416 * @regs: Pointer to currents pt_regs 417 * 418 * Similar to irqentry_enter() but taking care of the NMI constraints. 419 */ 420 irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs); 421 422 /** 423 * irqentry_nmi_exit - Handle return from NMI handling 424 * @regs: Pointer to pt_regs (NMI entry regs) 425 * @irq_state: Return value from matching call to irqentry_nmi_enter() 426 * 427 * Last action before returning to the low level assembly code. 428 * 429 * Counterpart to irqentry_nmi_enter(). 430 */ 431 void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state); 432 433 #endif 434