1 #ifndef _LINUX_CONTEXT_TRACKING_H 2 #define _LINUX_CONTEXT_TRACKING_H 3 4 #include <linux/sched.h> 5 #include <linux/percpu.h> 6 #include <asm/ptrace.h> 7 8 struct context_tracking { 9 /* 10 * When active is false, probes are unset in order 11 * to minimize overhead: TIF flags are cleared 12 * and calls to user_enter/exit are ignored. This 13 * may be further optimized using static keys. 14 */ 15 bool active; 16 enum ctx_state { 17 IN_KERNEL = 0, 18 IN_USER, 19 } state; 20 }; 21 22 #ifdef CONFIG_CONTEXT_TRACKING 23 DECLARE_PER_CPU(struct context_tracking, context_tracking); 24 25 static inline bool context_tracking_in_user(void) 26 { 27 return __this_cpu_read(context_tracking.state) == IN_USER; 28 } 29 30 static inline bool context_tracking_active(void) 31 { 32 return __this_cpu_read(context_tracking.active); 33 } 34 35 extern void user_enter(void); 36 extern void user_exit(void); 37 38 static inline enum ctx_state exception_enter(void) 39 { 40 enum ctx_state prev_ctx; 41 42 prev_ctx = this_cpu_read(context_tracking.state); 43 user_exit(); 44 45 return prev_ctx; 46 } 47 48 static inline void exception_exit(enum ctx_state prev_ctx) 49 { 50 if (prev_ctx == IN_USER) 51 user_enter(); 52 } 53 54 extern void context_tracking_task_switch(struct task_struct *prev, 55 struct task_struct *next); 56 #else 57 static inline bool context_tracking_in_user(void) { return false; } 58 static inline void user_enter(void) { } 59 static inline void user_exit(void) { } 60 static inline enum ctx_state exception_enter(void) { return 0; } 61 static inline void exception_exit(enum ctx_state prev_ctx) { } 62 static inline void context_tracking_task_switch(struct task_struct *prev, 63 struct task_struct *next) { } 64 #endif /* !CONFIG_CONTEXT_TRACKING */ 65 66 #endif 67