1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_CONTEXT_TRACKING_H 3 #define _LINUX_CONTEXT_TRACKING_H 4 5 #include <linux/sched.h> 6 #include <linux/vtime.h> 7 #include <linux/context_tracking_state.h> 8 #include <linux/instrumentation.h> 9 10 #include <asm/ptrace.h> 11 12 13 #ifdef CONFIG_CONTEXT_TRACKING 14 extern void context_tracking_cpu_set(int cpu); 15 16 /* Called with interrupts disabled. */ 17 extern void __context_tracking_enter(enum ctx_state state); 18 extern void __context_tracking_exit(enum ctx_state state); 19 20 extern void context_tracking_enter(enum ctx_state state); 21 extern void context_tracking_exit(enum ctx_state state); 22 extern void context_tracking_user_enter(void); 23 extern void context_tracking_user_exit(void); 24 25 static inline void user_enter(void) 26 { 27 if (context_tracking_enabled()) 28 context_tracking_enter(CONTEXT_USER); 29 30 } 31 static inline void user_exit(void) 32 { 33 if (context_tracking_enabled()) 34 context_tracking_exit(CONTEXT_USER); 35 } 36 37 /* Called with interrupts disabled. */ 38 static __always_inline void user_enter_irqoff(void) 39 { 40 if (context_tracking_enabled()) 41 __context_tracking_enter(CONTEXT_USER); 42 43 } 44 static __always_inline void user_exit_irqoff(void) 45 { 46 if (context_tracking_enabled()) 47 __context_tracking_exit(CONTEXT_USER); 48 } 49 50 static inline enum ctx_state exception_enter(void) 51 { 52 enum ctx_state prev_ctx; 53 54 if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) || 55 !context_tracking_enabled()) 56 return 0; 57 58 prev_ctx = this_cpu_read(context_tracking.state); 59 if (prev_ctx != CONTEXT_KERNEL) 60 context_tracking_exit(prev_ctx); 61 62 return prev_ctx; 63 } 64 65 static inline void exception_exit(enum ctx_state prev_ctx) 66 { 67 if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) && 68 context_tracking_enabled()) { 69 if (prev_ctx != CONTEXT_KERNEL) 70 context_tracking_enter(prev_ctx); 71 } 72 } 73 74 static __always_inline bool context_tracking_guest_enter(void) 75 { 76 if (context_tracking_enabled()) 77 __context_tracking_enter(CONTEXT_GUEST); 78 79 return context_tracking_enabled_this_cpu(); 80 } 81 82 static __always_inline void context_tracking_guest_exit(void) 83 { 84 if (context_tracking_enabled()) 85 __context_tracking_exit(CONTEXT_GUEST); 86 } 87 88 /** 89 * ct_state() - return the current context tracking state if known 90 * 91 * Returns the current cpu's context tracking state if context tracking 92 * is enabled. If context tracking is disabled, returns 93 * CONTEXT_DISABLED. This should be used primarily for debugging. 94 */ 95 static __always_inline enum ctx_state ct_state(void) 96 { 97 return context_tracking_enabled() ? 98 this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; 99 } 100 #else 101 static inline void user_enter(void) { } 102 static inline void user_exit(void) { } 103 static inline void user_enter_irqoff(void) { } 104 static inline void user_exit_irqoff(void) { } 105 static inline enum ctx_state exception_enter(void) { return 0; } 106 static inline void exception_exit(enum ctx_state prev_ctx) { } 107 static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } 108 static inline bool context_tracking_guest_enter(void) { return false; } 109 static inline void context_tracking_guest_exit(void) { } 110 111 #endif /* !CONFIG_CONTEXT_TRACKING */ 112 113 #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) 114 115 #ifdef CONFIG_CONTEXT_TRACKING_FORCE 116 extern void context_tracking_init(void); 117 #else 118 static inline void context_tracking_init(void) { } 119 #endif /* CONFIG_CONTEXT_TRACKING_FORCE */ 120 121 /* must be called with irqs disabled */ 122 static __always_inline void guest_enter_irqoff(void) 123 { 124 /* 125 * This is running in ioctl context so its safe to assume that it's the 126 * stime pending cputime to flush. 127 */ 128 instrumentation_begin(); 129 vtime_account_guest_enter(); 130 instrumentation_end(); 131 132 /* 133 * KVM does not hold any references to rcu protected data when it 134 * switches CPU into a guest mode. In fact switching to a guest mode 135 * is very similar to exiting to userspace from rcu point of view. In 136 * addition CPU may stay in a guest mode for quite a long time (up to 137 * one time slice). Lets treat guest mode as quiescent state, just like 138 * we do with user-mode execution. 139 */ 140 if (!context_tracking_guest_enter()) { 141 instrumentation_begin(); 142 rcu_virt_note_context_switch(smp_processor_id()); 143 instrumentation_end(); 144 } 145 } 146 147 static __always_inline void guest_exit_irqoff(void) 148 { 149 context_tracking_guest_exit(); 150 151 instrumentation_begin(); 152 /* Flush the guest cputime we spent on the guest */ 153 vtime_account_guest_exit(); 154 instrumentation_end(); 155 } 156 157 static inline void guest_exit(void) 158 { 159 unsigned long flags; 160 161 local_irq_save(flags); 162 guest_exit_irqoff(); 163 local_irq_restore(flags); 164 } 165 166 #endif 167