1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_CONTEXT_TRACKING_H 3 #define _LINUX_CONTEXT_TRACKING_H 4 5 #include <linux/sched.h> 6 #include <linux/vtime.h> 7 #include <linux/context_tracking_state.h> 8 #include <linux/instrumentation.h> 9 10 #include <asm/ptrace.h> 11 12 13 #ifdef CONFIG_CONTEXT_TRACKING 14 extern void context_tracking_cpu_set(int cpu); 15 16 /* Called with interrupts disabled. */ 17 extern void __context_tracking_enter(enum ctx_state state); 18 extern void __context_tracking_exit(enum ctx_state state); 19 20 extern void context_tracking_enter(enum ctx_state state); 21 extern void context_tracking_exit(enum ctx_state state); 22 extern void context_tracking_user_enter(void); 23 extern void context_tracking_user_exit(void); 24 25 static inline void user_enter(void) 26 { 27 if (context_tracking_enabled()) 28 context_tracking_enter(CONTEXT_USER); 29 30 } 31 static inline void user_exit(void) 32 { 33 if (context_tracking_enabled()) 34 context_tracking_exit(CONTEXT_USER); 35 } 36 37 /* Called with interrupts disabled. */ 38 static __always_inline void user_enter_irqoff(void) 39 { 40 if (context_tracking_enabled()) 41 __context_tracking_enter(CONTEXT_USER); 42 43 } 44 static __always_inline void user_exit_irqoff(void) 45 { 46 if (context_tracking_enabled()) 47 __context_tracking_exit(CONTEXT_USER); 48 } 49 50 static inline enum ctx_state exception_enter(void) 51 { 52 enum ctx_state prev_ctx; 53 54 if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) || 55 !context_tracking_enabled()) 56 return 0; 57 58 prev_ctx = this_cpu_read(context_tracking.state); 59 if (prev_ctx != CONTEXT_KERNEL) 60 context_tracking_exit(prev_ctx); 61 62 return prev_ctx; 63 } 64 65 static inline void exception_exit(enum ctx_state prev_ctx) 66 { 67 if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) && 68 context_tracking_enabled()) { 69 if (prev_ctx != CONTEXT_KERNEL) 70 context_tracking_enter(prev_ctx); 71 } 72 } 73 74 75 /** 76 * ct_state() - return the current context tracking state if known 77 * 78 * Returns the current cpu's context tracking state if context tracking 79 * is enabled. If context tracking is disabled, returns 80 * CONTEXT_DISABLED. This should be used primarily for debugging. 81 */ 82 static __always_inline enum ctx_state ct_state(void) 83 { 84 return context_tracking_enabled() ? 85 this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; 86 } 87 #else 88 static inline void user_enter(void) { } 89 static inline void user_exit(void) { } 90 static inline void user_enter_irqoff(void) { } 91 static inline void user_exit_irqoff(void) { } 92 static inline enum ctx_state exception_enter(void) { return 0; } 93 static inline void exception_exit(enum ctx_state prev_ctx) { } 94 static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } 95 #endif /* !CONFIG_CONTEXT_TRACKING */ 96 97 #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) 98 99 #ifdef CONFIG_CONTEXT_TRACKING_FORCE 100 extern void context_tracking_init(void); 101 #else 102 static inline void context_tracking_init(void) { } 103 #endif /* CONFIG_CONTEXT_TRACKING_FORCE */ 104 105 106 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 107 /* must be called with irqs disabled */ 108 static __always_inline void guest_enter_irqoff(void) 109 { 110 instrumentation_begin(); 111 if (vtime_accounting_enabled_this_cpu()) 112 vtime_guest_enter(current); 113 else 114 current->flags |= PF_VCPU; 115 instrumentation_end(); 116 117 if (context_tracking_enabled()) 118 __context_tracking_enter(CONTEXT_GUEST); 119 120 /* KVM does not hold any references to rcu protected data when it 121 * switches CPU into a guest mode. In fact switching to a guest mode 122 * is very similar to exiting to userspace from rcu point of view. In 123 * addition CPU may stay in a guest mode for quite a long time (up to 124 * one time slice). Lets treat guest mode as quiescent state, just like 125 * we do with user-mode execution. 126 */ 127 if (!context_tracking_enabled_this_cpu()) { 128 instrumentation_begin(); 129 rcu_virt_note_context_switch(smp_processor_id()); 130 instrumentation_end(); 131 } 132 } 133 134 static __always_inline void guest_exit_irqoff(void) 135 { 136 if (context_tracking_enabled()) 137 __context_tracking_exit(CONTEXT_GUEST); 138 139 instrumentation_begin(); 140 if (vtime_accounting_enabled_this_cpu()) 141 vtime_guest_exit(current); 142 else 143 current->flags &= ~PF_VCPU; 144 instrumentation_end(); 145 } 146 147 #else 148 static __always_inline void guest_enter_irqoff(void) 149 { 150 /* 151 * This is running in ioctl context so its safe 152 * to assume that it's the stime pending cputime 153 * to flush. 154 */ 155 instrumentation_begin(); 156 vtime_account_kernel(current); 157 current->flags |= PF_VCPU; 158 rcu_virt_note_context_switch(smp_processor_id()); 159 instrumentation_end(); 160 } 161 162 static __always_inline void guest_exit_irqoff(void) 163 { 164 instrumentation_begin(); 165 /* Flush the guest cputime we spent on the guest */ 166 vtime_account_kernel(current); 167 current->flags &= ~PF_VCPU; 168 instrumentation_end(); 169 } 170 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 171 172 static inline void guest_exit(void) 173 { 174 unsigned long flags; 175 176 local_irq_save(flags); 177 guest_exit_irqoff(); 178 local_irq_restore(flags); 179 } 180 181 #endif 182