1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_TRACE_RECURSION_H 3 #define _LINUX_TRACE_RECURSION_H 4 5 #include <linux/interrupt.h> 6 #include <linux/sched.h> 7 8 #ifdef CONFIG_TRACING 9 10 /* Only current can touch trace_recursion */ 11 12 /* 13 * For function tracing recursion: 14 * The order of these bits are important. 15 * 16 * When function tracing occurs, the following steps are made: 17 * If arch does not support a ftrace feature: 18 * call internal function (uses INTERNAL bits) which calls... 19 * If callback is registered to the "global" list, the list 20 * function is called and recursion checks the GLOBAL bits. 21 * then this function calls... 22 * The function callback, which can use the FTRACE bits to 23 * check for recursion. 24 * 25 * Now if the arch does not support a feature, and it calls 26 * the global list function which calls the ftrace callback 27 * all three of these steps will do a recursion protection. 28 * There's no reason to do one if the previous caller already 29 * did. The recursion that we are protecting against will 30 * go through the same steps again. 31 * 32 * To prevent the multiple recursion checks, if a recursion 33 * bit is set that is higher than the MAX bit of the current 34 * check, then we know that the check was made by the previous 35 * caller, and we can skip the current check. 36 */ 37 enum { 38 /* Function recursion bits */ 39 TRACE_FTRACE_BIT, 40 TRACE_FTRACE_NMI_BIT, 41 TRACE_FTRACE_IRQ_BIT, 42 TRACE_FTRACE_SIRQ_BIT, 43 44 /* INTERNAL_BITs must be greater than FTRACE_BITs */ 45 TRACE_INTERNAL_BIT, 46 TRACE_INTERNAL_NMI_BIT, 47 TRACE_INTERNAL_IRQ_BIT, 48 TRACE_INTERNAL_SIRQ_BIT, 49 50 TRACE_BRANCH_BIT, 51 /* 52 * Abuse of the trace_recursion. 53 * As we need a way to maintain state if we are tracing the function 54 * graph in irq because we want to trace a particular function that 55 * was called in irq context but we have irq tracing off. Since this 56 * can only be modified by current, we can reuse trace_recursion. 57 */ 58 TRACE_IRQ_BIT, 59 60 /* Set if the function is in the set_graph_function file */ 61 TRACE_GRAPH_BIT, 62 63 /* 64 * In the very unlikely case that an interrupt came in 65 * at a start of graph tracing, and we want to trace 66 * the function in that interrupt, the depth can be greater 67 * than zero, because of the preempted start of a previous 68 * trace. In an even more unlikely case, depth could be 2 69 * if a softirq interrupted the start of graph tracing, 70 * followed by an interrupt preempting a start of graph 71 * tracing in the softirq, and depth can even be 3 72 * if an NMI came in at the start of an interrupt function 73 * that preempted a softirq start of a function that 74 * preempted normal context!!!! Luckily, it can't be 75 * greater than 3, so the next two bits are a mask 76 * of what the depth is when we set TRACE_GRAPH_BIT 77 */ 78 79 TRACE_GRAPH_DEPTH_START_BIT, 80 TRACE_GRAPH_DEPTH_END_BIT, 81 82 /* 83 * To implement set_graph_notrace, if this bit is set, we ignore 84 * function graph tracing of called functions, until the return 85 * function is called to clear it. 86 */ 87 TRACE_GRAPH_NOTRACE_BIT, 88 89 /* 90 * When transitioning between context, the preempt_count() may 91 * not be correct. Allow for a single recursion to cover this case. 92 */ 93 TRACE_TRANSITION_BIT, 94 }; 95 96 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 97 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 98 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 99 100 #define trace_recursion_depth() \ 101 (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3) 102 #define trace_recursion_set_depth(depth) \ 103 do { \ 104 current->trace_recursion &= \ 105 ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \ 106 current->trace_recursion |= \ 107 ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \ 108 } while (0) 109 110 #define TRACE_CONTEXT_BITS 4 111 112 #define TRACE_FTRACE_START TRACE_FTRACE_BIT 113 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) 114 115 #define TRACE_LIST_START TRACE_INTERNAL_BIT 116 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) 117 118 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX 119 120 /* 121 * Used for setting context 122 * NMI = 0 123 * IRQ = 1 124 * SOFTIRQ = 2 125 * NORMAL = 3 126 */ 127 enum { 128 TRACE_CTX_NMI, 129 TRACE_CTX_IRQ, 130 TRACE_CTX_SOFTIRQ, 131 TRACE_CTX_NORMAL, 132 }; 133 134 static __always_inline int trace_get_context_bit(void) 135 { 136 unsigned long pc = preempt_count(); 137 138 if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) 139 return TRACE_CTX_NORMAL; 140 else 141 return pc & NMI_MASK ? TRACE_CTX_NMI : 142 pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ; 143 } 144 145 static __always_inline int trace_test_and_set_recursion(int start, int max) 146 { 147 unsigned int val = current->trace_recursion; 148 int bit; 149 150 /* A previous recursion check was made */ 151 if ((val & TRACE_CONTEXT_MASK) > max) 152 return 0; 153 154 bit = trace_get_context_bit() + start; 155 if (unlikely(val & (1 << bit))) { 156 /* 157 * It could be that preempt_count has not been updated during 158 * a switch between contexts. Allow for a single recursion. 159 */ 160 bit = TRACE_TRANSITION_BIT; 161 if (trace_recursion_test(bit)) 162 return -1; 163 trace_recursion_set(bit); 164 barrier(); 165 return bit + 1; 166 } 167 168 /* Normal check passed, clear the transition to allow it again */ 169 trace_recursion_clear(TRACE_TRANSITION_BIT); 170 171 val |= 1 << bit; 172 current->trace_recursion = val; 173 barrier(); 174 175 return bit + 1; 176 } 177 178 static __always_inline void trace_clear_recursion(int bit) 179 { 180 unsigned int val = current->trace_recursion; 181 182 if (!bit) 183 return; 184 185 bit--; 186 bit = 1 << bit; 187 val &= ~bit; 188 189 barrier(); 190 current->trace_recursion = val; 191 } 192 193 /** 194 * ftrace_test_recursion_trylock - tests for recursion in same context 195 * 196 * Use this for ftrace callbacks. This will detect if the function 197 * tracing recursed in the same context (normal vs interrupt), 198 * 199 * Returns: -1 if a recursion happened. 200 * >= 0 if no recursion 201 */ 202 static __always_inline int ftrace_test_recursion_trylock(void) 203 { 204 return trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); 205 } 206 207 /** 208 * ftrace_test_recursion_unlock - called when function callback is complete 209 * @bit: The return of a successful ftrace_test_recursion_trylock() 210 * 211 * This is used at the end of a ftrace callback. 212 */ 213 static __always_inline void ftrace_test_recursion_unlock(int bit) 214 { 215 trace_clear_recursion(bit); 216 } 217 218 #endif /* CONFIG_TRACING */ 219 #endif /* _LINUX_TRACE_RECURSION_H */ 220