10264c8c9SSteven Rostedt (VMware) /* SPDX-License-Identifier: GPL-2.0 */ 20264c8c9SSteven Rostedt (VMware) #ifndef _LINUX_TRACE_RECURSION_H 30264c8c9SSteven Rostedt (VMware) #define _LINUX_TRACE_RECURSION_H 40264c8c9SSteven Rostedt (VMware) 50264c8c9SSteven Rostedt (VMware) #include <linux/interrupt.h> 60264c8c9SSteven Rostedt (VMware) #include <linux/sched.h> 70264c8c9SSteven Rostedt (VMware) 80264c8c9SSteven Rostedt (VMware) #ifdef CONFIG_TRACING 90264c8c9SSteven Rostedt (VMware) 100264c8c9SSteven Rostedt (VMware) /* Only current can touch trace_recursion */ 110264c8c9SSteven Rostedt (VMware) 120264c8c9SSteven Rostedt (VMware) /* 130264c8c9SSteven Rostedt (VMware) * For function tracing recursion: 140264c8c9SSteven Rostedt (VMware) * The order of these bits are important. 150264c8c9SSteven Rostedt (VMware) * 160264c8c9SSteven Rostedt (VMware) * When function tracing occurs, the following steps are made: 170264c8c9SSteven Rostedt (VMware) * If arch does not support a ftrace feature: 180264c8c9SSteven Rostedt (VMware) * call internal function (uses INTERNAL bits) which calls... 190264c8c9SSteven Rostedt (VMware) * If callback is registered to the "global" list, the list 200264c8c9SSteven Rostedt (VMware) * function is called and recursion checks the GLOBAL bits. 210264c8c9SSteven Rostedt (VMware) * then this function calls... 220264c8c9SSteven Rostedt (VMware) * The function callback, which can use the FTRACE bits to 230264c8c9SSteven Rostedt (VMware) * check for recursion. 240264c8c9SSteven Rostedt (VMware) * 250264c8c9SSteven Rostedt (VMware) * Now if the arch does not support a feature, and it calls 260264c8c9SSteven Rostedt (VMware) * the global list function which calls the ftrace callback 270264c8c9SSteven Rostedt (VMware) * all three of these steps will do a recursion protection. 280264c8c9SSteven Rostedt (VMware) * There's no reason to do one if the previous caller already 290264c8c9SSteven Rostedt (VMware) * did. The recursion that we are protecting against will 300264c8c9SSteven Rostedt (VMware) * go through the same steps again. 310264c8c9SSteven Rostedt (VMware) * 320264c8c9SSteven Rostedt (VMware) * To prevent the multiple recursion checks, if a recursion 330264c8c9SSteven Rostedt (VMware) * bit is set that is higher than the MAX bit of the current 340264c8c9SSteven Rostedt (VMware) * check, then we know that the check was made by the previous 350264c8c9SSteven Rostedt (VMware) * caller, and we can skip the current check. 360264c8c9SSteven Rostedt (VMware) */ 370264c8c9SSteven Rostedt (VMware) enum { 380264c8c9SSteven Rostedt (VMware) /* Function recursion bits */ 390264c8c9SSteven Rostedt (VMware) TRACE_FTRACE_BIT, 400264c8c9SSteven Rostedt (VMware) TRACE_FTRACE_NMI_BIT, 410264c8c9SSteven Rostedt (VMware) TRACE_FTRACE_IRQ_BIT, 420264c8c9SSteven Rostedt (VMware) TRACE_FTRACE_SIRQ_BIT, 430264c8c9SSteven Rostedt (VMware) 440264c8c9SSteven Rostedt (VMware) /* INTERNAL_BITs must be greater than FTRACE_BITs */ 450264c8c9SSteven Rostedt (VMware) TRACE_INTERNAL_BIT, 460264c8c9SSteven Rostedt (VMware) TRACE_INTERNAL_NMI_BIT, 470264c8c9SSteven Rostedt (VMware) TRACE_INTERNAL_IRQ_BIT, 480264c8c9SSteven Rostedt (VMware) TRACE_INTERNAL_SIRQ_BIT, 490264c8c9SSteven Rostedt (VMware) 500264c8c9SSteven Rostedt (VMware) TRACE_BRANCH_BIT, 510264c8c9SSteven Rostedt (VMware) /* 520264c8c9SSteven Rostedt (VMware) * Abuse of the trace_recursion. 530264c8c9SSteven Rostedt (VMware) * As we need a way to maintain state if we are tracing the function 540264c8c9SSteven Rostedt (VMware) * graph in irq because we want to trace a particular function that 550264c8c9SSteven Rostedt (VMware) * was called in irq context but we have irq tracing off. Since this 560264c8c9SSteven Rostedt (VMware) * can only be modified by current, we can reuse trace_recursion. 570264c8c9SSteven Rostedt (VMware) */ 580264c8c9SSteven Rostedt (VMware) TRACE_IRQ_BIT, 590264c8c9SSteven Rostedt (VMware) 600264c8c9SSteven Rostedt (VMware) /* Set if the function is in the set_graph_function file */ 610264c8c9SSteven Rostedt (VMware) TRACE_GRAPH_BIT, 620264c8c9SSteven Rostedt (VMware) 630264c8c9SSteven Rostedt (VMware) /* 640264c8c9SSteven Rostedt (VMware) * In the very unlikely case that an interrupt came in 650264c8c9SSteven Rostedt (VMware) * at a start of graph tracing, and we want to trace 660264c8c9SSteven Rostedt (VMware) * the function in that interrupt, the depth can be greater 670264c8c9SSteven Rostedt (VMware) * than zero, because of the preempted start of a previous 680264c8c9SSteven Rostedt (VMware) * trace. In an even more unlikely case, depth could be 2 690264c8c9SSteven Rostedt (VMware) * if a softirq interrupted the start of graph tracing, 700264c8c9SSteven Rostedt (VMware) * followed by an interrupt preempting a start of graph 710264c8c9SSteven Rostedt (VMware) * tracing in the softirq, and depth can even be 3 720264c8c9SSteven Rostedt (VMware) * if an NMI came in at the start of an interrupt function 730264c8c9SSteven Rostedt (VMware) * that preempted a softirq start of a function that 740264c8c9SSteven Rostedt (VMware) * preempted normal context!!!! Luckily, it can't be 750264c8c9SSteven Rostedt (VMware) * greater than 3, so the next two bits are a mask 760264c8c9SSteven Rostedt (VMware) * of what the depth is when we set TRACE_GRAPH_BIT 770264c8c9SSteven Rostedt (VMware) */ 780264c8c9SSteven Rostedt (VMware) 790264c8c9SSteven Rostedt (VMware) TRACE_GRAPH_DEPTH_START_BIT, 800264c8c9SSteven Rostedt (VMware) TRACE_GRAPH_DEPTH_END_BIT, 810264c8c9SSteven Rostedt (VMware) 820264c8c9SSteven Rostedt (VMware) /* 830264c8c9SSteven Rostedt (VMware) * To implement set_graph_notrace, if this bit is set, we ignore 840264c8c9SSteven Rostedt (VMware) * function graph tracing of called functions, until the return 850264c8c9SSteven Rostedt (VMware) * function is called to clear it. 860264c8c9SSteven Rostedt (VMware) */ 870264c8c9SSteven Rostedt (VMware) TRACE_GRAPH_NOTRACE_BIT, 880264c8c9SSteven Rostedt (VMware) 890264c8c9SSteven Rostedt (VMware) /* 900264c8c9SSteven Rostedt (VMware) * When transitioning between context, the preempt_count() may 910264c8c9SSteven Rostedt (VMware) * not be correct. Allow for a single recursion to cover this case. 920264c8c9SSteven Rostedt (VMware) */ 930264c8c9SSteven Rostedt (VMware) TRACE_TRANSITION_BIT, 94773c1670SSteven Rostedt (VMware) 95773c1670SSteven Rostedt (VMware) /* Used to prevent recursion recording from recursing. */ 96773c1670SSteven Rostedt (VMware) TRACE_RECORD_RECURSION_BIT, 970264c8c9SSteven Rostedt (VMware) }; 980264c8c9SSteven Rostedt (VMware) 990264c8c9SSteven Rostedt (VMware) #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 1000264c8c9SSteven Rostedt (VMware) #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 1010264c8c9SSteven Rostedt (VMware) #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 1020264c8c9SSteven Rostedt (VMware) 1030264c8c9SSteven Rostedt (VMware) #define trace_recursion_depth() \ 1040264c8c9SSteven Rostedt (VMware) (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3) 1050264c8c9SSteven Rostedt (VMware) #define trace_recursion_set_depth(depth) \ 1060264c8c9SSteven Rostedt (VMware) do { \ 1070264c8c9SSteven Rostedt (VMware) current->trace_recursion &= \ 1080264c8c9SSteven Rostedt (VMware) ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \ 1090264c8c9SSteven Rostedt (VMware) current->trace_recursion |= \ 1100264c8c9SSteven Rostedt (VMware) ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \ 1110264c8c9SSteven Rostedt (VMware) } while (0) 1120264c8c9SSteven Rostedt (VMware) 1130264c8c9SSteven Rostedt (VMware) #define TRACE_CONTEXT_BITS 4 1140264c8c9SSteven Rostedt (VMware) 1150264c8c9SSteven Rostedt (VMware) #define TRACE_FTRACE_START TRACE_FTRACE_BIT 1160264c8c9SSteven Rostedt (VMware) #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) 1170264c8c9SSteven Rostedt (VMware) 1180264c8c9SSteven Rostedt (VMware) #define TRACE_LIST_START TRACE_INTERNAL_BIT 1190264c8c9SSteven Rostedt (VMware) #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) 1200264c8c9SSteven Rostedt (VMware) 1210264c8c9SSteven Rostedt (VMware) #define TRACE_CONTEXT_MASK TRACE_LIST_MAX 1220264c8c9SSteven Rostedt (VMware) 123da5afbebSSteven Rostedt (VMware) /* 124da5afbebSSteven Rostedt (VMware) * Used for setting context 125da5afbebSSteven Rostedt (VMware) * NMI = 0 126da5afbebSSteven Rostedt (VMware) * IRQ = 1 127da5afbebSSteven Rostedt (VMware) * SOFTIRQ = 2 128da5afbebSSteven Rostedt (VMware) * NORMAL = 3 129da5afbebSSteven Rostedt (VMware) */ 130da5afbebSSteven Rostedt (VMware) enum { 131da5afbebSSteven Rostedt (VMware) TRACE_CTX_NMI, 132da5afbebSSteven Rostedt (VMware) TRACE_CTX_IRQ, 133da5afbebSSteven Rostedt (VMware) TRACE_CTX_SOFTIRQ, 134da5afbebSSteven Rostedt (VMware) TRACE_CTX_NORMAL, 135da5afbebSSteven Rostedt (VMware) }; 136da5afbebSSteven Rostedt (VMware) 1370264c8c9SSteven Rostedt (VMware) static __always_inline int trace_get_context_bit(void) 1380264c8c9SSteven Rostedt (VMware) { 139*91ebe8bcSSteven Rostedt (VMware) unsigned char bit = interrupt_context_level(); 1409b84fadcSSteven Rostedt (VMware) 1419b84fadcSSteven Rostedt (VMware) return TRACE_CTX_NORMAL - bit; 1420264c8c9SSteven Rostedt (VMware) } 1430264c8c9SSteven Rostedt (VMware) 144773c1670SSteven Rostedt (VMware) #ifdef CONFIG_FTRACE_RECORD_RECURSION 145773c1670SSteven Rostedt (VMware) extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip); 146773c1670SSteven Rostedt (VMware) # define do_ftrace_record_recursion(ip, pip) \ 147773c1670SSteven Rostedt (VMware) do { \ 148773c1670SSteven Rostedt (VMware) if (!trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \ 149773c1670SSteven Rostedt (VMware) trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \ 150773c1670SSteven Rostedt (VMware) ftrace_record_recursion(ip, pip); \ 151773c1670SSteven Rostedt (VMware) trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \ 152773c1670SSteven Rostedt (VMware) } \ 153773c1670SSteven Rostedt (VMware) } while (0) 154773c1670SSteven Rostedt (VMware) #else 155773c1670SSteven Rostedt (VMware) # define do_ftrace_record_recursion(ip, pip) do { } while (0) 156773c1670SSteven Rostedt (VMware) #endif 157773c1670SSteven Rostedt (VMware) 158773c1670SSteven Rostedt (VMware) static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip, 159773c1670SSteven Rostedt (VMware) int start, int max) 1600264c8c9SSteven Rostedt (VMware) { 1617b68621fSSteven Rostedt (VMware) unsigned int val = READ_ONCE(current->trace_recursion); 1620264c8c9SSteven Rostedt (VMware) int bit; 1630264c8c9SSteven Rostedt (VMware) 1640264c8c9SSteven Rostedt (VMware) /* A previous recursion check was made */ 1650264c8c9SSteven Rostedt (VMware) if ((val & TRACE_CONTEXT_MASK) > max) 1660264c8c9SSteven Rostedt (VMware) return 0; 1670264c8c9SSteven Rostedt (VMware) 1680264c8c9SSteven Rostedt (VMware) bit = trace_get_context_bit() + start; 1690264c8c9SSteven Rostedt (VMware) if (unlikely(val & (1 << bit))) { 1700264c8c9SSteven Rostedt (VMware) /* 1710264c8c9SSteven Rostedt (VMware) * It could be that preempt_count has not been updated during 1720264c8c9SSteven Rostedt (VMware) * a switch between contexts. Allow for a single recursion. 1730264c8c9SSteven Rostedt (VMware) */ 1740264c8c9SSteven Rostedt (VMware) bit = TRACE_TRANSITION_BIT; 1757b68621fSSteven Rostedt (VMware) if (val & (1 << bit)) { 176773c1670SSteven Rostedt (VMware) do_ftrace_record_recursion(ip, pip); 1770264c8c9SSteven Rostedt (VMware) return -1; 178773c1670SSteven Rostedt (VMware) } 1797b68621fSSteven Rostedt (VMware) } else { 1800264c8c9SSteven Rostedt (VMware) /* Normal check passed, clear the transition to allow it again */ 1817b68621fSSteven Rostedt (VMware) val &= ~(1 << TRACE_TRANSITION_BIT); 1827b68621fSSteven Rostedt (VMware) } 1830264c8c9SSteven Rostedt (VMware) 1840264c8c9SSteven Rostedt (VMware) val |= 1 << bit; 1850264c8c9SSteven Rostedt (VMware) current->trace_recursion = val; 1860264c8c9SSteven Rostedt (VMware) barrier(); 1870264c8c9SSteven Rostedt (VMware) 1880264c8c9SSteven Rostedt (VMware) return bit + 1; 1890264c8c9SSteven Rostedt (VMware) } 1900264c8c9SSteven Rostedt (VMware) 1910264c8c9SSteven Rostedt (VMware) static __always_inline void trace_clear_recursion(int bit) 1920264c8c9SSteven Rostedt (VMware) { 1930264c8c9SSteven Rostedt (VMware) if (!bit) 1940264c8c9SSteven Rostedt (VMware) return; 1950264c8c9SSteven Rostedt (VMware) 1960264c8c9SSteven Rostedt (VMware) barrier(); 1977b68621fSSteven Rostedt (VMware) bit--; 1987b68621fSSteven Rostedt (VMware) trace_recursion_clear(bit); 1990264c8c9SSteven Rostedt (VMware) } 2000264c8c9SSteven Rostedt (VMware) 2016e4eb9cbSSteven Rostedt (VMware) /** 2026e4eb9cbSSteven Rostedt (VMware) * ftrace_test_recursion_trylock - tests for recursion in same context 2036e4eb9cbSSteven Rostedt (VMware) * 2046e4eb9cbSSteven Rostedt (VMware) * Use this for ftrace callbacks. This will detect if the function 2056e4eb9cbSSteven Rostedt (VMware) * tracing recursed in the same context (normal vs interrupt), 2066e4eb9cbSSteven Rostedt (VMware) * 2076e4eb9cbSSteven Rostedt (VMware) * Returns: -1 if a recursion happened. 2086e4eb9cbSSteven Rostedt (VMware) * >= 0 if no recursion 2096e4eb9cbSSteven Rostedt (VMware) */ 210773c1670SSteven Rostedt (VMware) static __always_inline int ftrace_test_recursion_trylock(unsigned long ip, 211773c1670SSteven Rostedt (VMware) unsigned long parent_ip) 2126e4eb9cbSSteven Rostedt (VMware) { 213773c1670SSteven Rostedt (VMware) return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX); 2146e4eb9cbSSteven Rostedt (VMware) } 2156e4eb9cbSSteven Rostedt (VMware) 2166e4eb9cbSSteven Rostedt (VMware) /** 2176e4eb9cbSSteven Rostedt (VMware) * ftrace_test_recursion_unlock - called when function callback is complete 2186e4eb9cbSSteven Rostedt (VMware) * @bit: The return of a successful ftrace_test_recursion_trylock() 2196e4eb9cbSSteven Rostedt (VMware) * 2206e4eb9cbSSteven Rostedt (VMware) * This is used at the end of a ftrace callback. 2216e4eb9cbSSteven Rostedt (VMware) */ 2226e4eb9cbSSteven Rostedt (VMware) static __always_inline void ftrace_test_recursion_unlock(int bit) 2236e4eb9cbSSteven Rostedt (VMware) { 2246e4eb9cbSSteven Rostedt (VMware) trace_clear_recursion(bit); 2256e4eb9cbSSteven Rostedt (VMware) } 2266e4eb9cbSSteven Rostedt (VMware) 2270264c8c9SSteven Rostedt (VMware) #endif /* CONFIG_TRACING */ 2280264c8c9SSteven Rostedt (VMware) #endif /* _LINUX_TRACE_RECURSION_H */ 229