1d864a3caSSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0
2d864a3caSSteven Rostedt (VMware) /*
3d864a3caSSteven Rostedt (VMware) * Infrastructure to took into function calls and returns.
4d864a3caSSteven Rostedt (VMware) * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]>
5d864a3caSSteven Rostedt (VMware) * Mostly borrowed from function tracer which
6d864a3caSSteven Rostedt (VMware) * is Copyright (c) Steven Rostedt <[email protected]>
7d864a3caSSteven Rostedt (VMware) *
8d864a3caSSteven Rostedt (VMware) * Highly modified by Steven Rostedt (VMware).
9d864a3caSSteven Rostedt (VMware) */
107aa1eaefSSteven Rostedt (VMware) #include <linux/bits.h>
1118bfee32SChristophe Leroy #include <linux/jump_label.h>
12e73e679fSSteven Rostedt (VMware) #include <linux/suspend.h>
13d864a3caSSteven Rostedt (VMware) #include <linux/ftrace.h>
14cc60ee81SSteven Rostedt (Google) #include <linux/static_call.h>
15e73e679fSSteven Rostedt (VMware) #include <linux/slab.h>
16d864a3caSSteven Rostedt (VMware)
17e73e679fSSteven Rostedt (VMware) #include <trace/events/sched.h>
18e73e679fSSteven Rostedt (VMware)
19e73e679fSSteven Rostedt (VMware) #include "ftrace_internal.h"
207d8b31b7SArnd Bergmann #include "trace.h"
21e73e679fSSteven Rostedt (VMware)
2242675b72SSteven Rostedt (VMware) /*
2342675b72SSteven Rostedt (VMware) * FGRAPH_FRAME_SIZE: Size in bytes of the meta data on the shadow stack
2442675b72SSteven Rostedt (VMware) * FGRAPH_FRAME_OFFSET: Size in long words of the meta data frame
257aa1eaefSSteven Rostedt (VMware) */
267aa1eaefSSteven Rostedt (VMware) #define FGRAPH_FRAME_SIZE sizeof(struct ftrace_ret_stack)
277aa1eaefSSteven Rostedt (VMware) #define FGRAPH_FRAME_OFFSET DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long))
287aa1eaefSSteven Rostedt (VMware)
297aa1eaefSSteven Rostedt (VMware) /*
307aa1eaefSSteven Rostedt (VMware) * On entry to a function (via function_graph_enter()), a new fgraph frame
317aa1eaefSSteven Rostedt (VMware) * (ftrace_ret_stack) is pushed onto the stack as well as a word that
327aa1eaefSSteven Rostedt (VMware) * holds a bitmask and a type (called "bitmap"). The bitmap is defined as:
337aa1eaefSSteven Rostedt (VMware) *
347aa1eaefSSteven Rostedt (VMware) * bits: 0 - 9 offset in words from the previous ftrace_ret_stack
357aa1eaefSSteven Rostedt (VMware) *
367aa1eaefSSteven Rostedt (VMware) * bits: 10 - 11 Type of storage
377aa1eaefSSteven Rostedt (VMware) * 0 - reserved
387aa1eaefSSteven Rostedt (VMware) * 1 - bitmap of fgraph_array index
3991c46b0aSSteven Rostedt (VMware) * 2 - reserved data
407aa1eaefSSteven Rostedt (VMware) *
417aa1eaefSSteven Rostedt (VMware) * For type with "bitmap of fgraph_array index" (FGRAPH_TYPE_BITMAP):
427aa1eaefSSteven Rostedt (VMware) * bits: 12 - 27 The bitmap of fgraph_ops fgraph_array index
437aa1eaefSSteven Rostedt (VMware) * That is, it's a bitmask of 0-15 (16 bits)
447aa1eaefSSteven Rostedt (VMware) * where if a corresponding ops in the fgraph_array[]
457aa1eaefSSteven Rostedt (VMware) * expects a callback from the return of the function
467aa1eaefSSteven Rostedt (VMware) * it's corresponding bit will be set.
477aa1eaefSSteven Rostedt (VMware) *
487aa1eaefSSteven Rostedt (VMware) *
497aa1eaefSSteven Rostedt (VMware) * The top of the ret_stack (when not empty) will always have a reference
507aa1eaefSSteven Rostedt (VMware) * word that points to the last fgraph frame that was saved.
517aa1eaefSSteven Rostedt (VMware) *
5291c46b0aSSteven Rostedt (VMware) * For reserved data:
5391c46b0aSSteven Rostedt (VMware) * bits: 12 - 17 The size in words that is stored
5491c46b0aSSteven Rostedt (VMware) * bits: 18 - 23 The index of fgraph_array, which shows who is stored
5591c46b0aSSteven Rostedt (VMware) *
567aa1eaefSSteven Rostedt (VMware) * That is, at the end of function_graph_enter, if the first and forth
577aa1eaefSSteven Rostedt (VMware) * fgraph_ops on the fgraph_array[] (index 0 and 3) needs their retfunc called
5891c46b0aSSteven Rostedt (VMware) * on the return of the function being traced, and the forth fgraph_ops
5991c46b0aSSteven Rostedt (VMware) * stored two words of data, this is what will be on the task's shadow
6091c46b0aSSteven Rostedt (VMware) * ret_stack: (the stack grows upward)
617aa1eaefSSteven Rostedt (VMware) *
624497412aSSteven Rostedt (VMware) * ret_stack[SHADOW_STACK_OFFSET]
634497412aSSteven Rostedt (VMware) * | SHADOW_STACK_TASK_VARS(ret_stack)[15] |
644497412aSSteven Rostedt (VMware) * ...
654497412aSSteven Rostedt (VMware) * | SHADOW_STACK_TASK_VARS(ret_stack)[0] |
667aa1eaefSSteven Rostedt (VMware) * ret_stack[SHADOW_STACK_MAX_OFFSET]
677aa1eaefSSteven Rostedt (VMware) * ...
687aa1eaefSSteven Rostedt (VMware) * | | <- task->curr_ret_stack
697aa1eaefSSteven Rostedt (VMware) * +--------------------------------------------+
7091c46b0aSSteven Rostedt (VMware) * | (3 << 12) | (3 << 10) | FGRAPH_FRAME_OFFSET|
7191c46b0aSSteven Rostedt (VMware) * | *or put another way* |
7291c46b0aSSteven Rostedt (VMware) * | (3 << FGRAPH_DATA_INDEX_SHIFT)| \ | This is for fgraph_ops[3].
7391c46b0aSSteven Rostedt (VMware) * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \ | The data size is 2 words.
7491c46b0aSSteven Rostedt (VMware) * | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT)| \ |
7591c46b0aSSteven Rostedt (VMware) * | (offset2:FGRAPH_FRAME_OFFSET+3) | <- the offset2 is from here
7691c46b0aSSteven Rostedt (VMware) * +--------------------------------------------+ ( It is 4 words from the ret_stack)
7791c46b0aSSteven Rostedt (VMware) * | STORED DATA WORD 2 |
7891c46b0aSSteven Rostedt (VMware) * | STORED DATA WORD 1 |
7991c46b0aSSteven Rostedt (VMware) * +--------------------------------------------+
807aa1eaefSSteven Rostedt (VMware) * | (9 << 12) | (1 << 10) | FGRAPH_FRAME_OFFSET|
817aa1eaefSSteven Rostedt (VMware) * | *or put another way* |
827aa1eaefSSteven Rostedt (VMware) * | (BIT(3)|BIT(0)) << FGRAPH_INDEX_SHIFT | \ |
837aa1eaefSSteven Rostedt (VMware) * | FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT| \ |
8491c46b0aSSteven Rostedt (VMware) * | (offset1:FGRAPH_FRAME_OFFSET) | <- the offset1 is from here
857aa1eaefSSteven Rostedt (VMware) * +--------------------------------------------+
867aa1eaefSSteven Rostedt (VMware) * | struct ftrace_ret_stack |
877aa1eaefSSteven Rostedt (VMware) * | (stores the saved ret pointer) | <- the offset points here
887aa1eaefSSteven Rostedt (VMware) * +--------------------------------------------+
897aa1eaefSSteven Rostedt (VMware) * | (X) | (N) | ( N words away from
907aa1eaefSSteven Rostedt (VMware) * | | previous ret_stack)
917aa1eaefSSteven Rostedt (VMware) * ...
927aa1eaefSSteven Rostedt (VMware) * ret_stack[0]
937aa1eaefSSteven Rostedt (VMware) *
947aa1eaefSSteven Rostedt (VMware) * If a backtrace is required, and the real return pointer needs to be
957aa1eaefSSteven Rostedt (VMware) * fetched, then it looks at the task's curr_ret_stack offset, if it
967aa1eaefSSteven Rostedt (VMware) * is greater than zero (reserved, or right before popped), it would mask
977aa1eaefSSteven Rostedt (VMware) * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the
987aa1eaefSSteven Rostedt (VMware) * ftrace_ret_stack structure stored on the shadow stack.
997aa1eaefSSteven Rostedt (VMware) */
1007aa1eaefSSteven Rostedt (VMware)
1017aa1eaefSSteven Rostedt (VMware) /*
1027aa1eaefSSteven Rostedt (VMware) * The following is for the top word on the stack:
1037aa1eaefSSteven Rostedt (VMware) *
1047aa1eaefSSteven Rostedt (VMware) * FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame
1057aa1eaefSSteven Rostedt (VMware) * FGRAPH_TYPE (10-11) holds the type of word this is.
1067aa1eaefSSteven Rostedt (VMware) * (RESERVED or BITMAP)
1077aa1eaefSSteven Rostedt (VMware) */
1087aa1eaefSSteven Rostedt (VMware) #define FGRAPH_FRAME_OFFSET_BITS 10
1097aa1eaefSSteven Rostedt (VMware) #define FGRAPH_FRAME_OFFSET_MASK GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0)
1107aa1eaefSSteven Rostedt (VMware)
1117aa1eaefSSteven Rostedt (VMware) #define FGRAPH_TYPE_BITS 2
1127aa1eaefSSteven Rostedt (VMware) #define FGRAPH_TYPE_MASK GENMASK(FGRAPH_TYPE_BITS - 1, 0)
1137aa1eaefSSteven Rostedt (VMware) #define FGRAPH_TYPE_SHIFT FGRAPH_FRAME_OFFSET_BITS
1147aa1eaefSSteven Rostedt (VMware)
1157aa1eaefSSteven Rostedt (VMware) enum {
1167aa1eaefSSteven Rostedt (VMware) FGRAPH_TYPE_RESERVED = 0,
1177aa1eaefSSteven Rostedt (VMware) FGRAPH_TYPE_BITMAP = 1,
11891c46b0aSSteven Rostedt (VMware) FGRAPH_TYPE_DATA = 2,
1197aa1eaefSSteven Rostedt (VMware) };
1207aa1eaefSSteven Rostedt (VMware)
1217aa1eaefSSteven Rostedt (VMware) /*
1227aa1eaefSSteven Rostedt (VMware) * For BITMAP type:
1237aa1eaefSSteven Rostedt (VMware) * FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called
1247aa1eaefSSteven Rostedt (VMware) */
1257aa1eaefSSteven Rostedt (VMware) #define FGRAPH_INDEX_BITS 16
1267aa1eaefSSteven Rostedt (VMware) #define FGRAPH_INDEX_MASK GENMASK(FGRAPH_INDEX_BITS - 1, 0)
1277aa1eaefSSteven Rostedt (VMware) #define FGRAPH_INDEX_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
1287aa1eaefSSteven Rostedt (VMware)
12991c46b0aSSteven Rostedt (VMware) /*
13091c46b0aSSteven Rostedt (VMware) * For DATA type:
13191c46b0aSSteven Rostedt (VMware) * FGRAPH_DATA (12-17) bits hold the size of data (in words)
13291c46b0aSSteven Rostedt (VMware) * FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for
13391c46b0aSSteven Rostedt (VMware) *
13491c46b0aSSteven Rostedt (VMware) * Note:
13591c46b0aSSteven Rostedt (VMware) * data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words.
13691c46b0aSSteven Rostedt (VMware) */
13791c46b0aSSteven Rostedt (VMware) #define FGRAPH_DATA_BITS 5
13891c46b0aSSteven Rostedt (VMware) #define FGRAPH_DATA_MASK GENMASK(FGRAPH_DATA_BITS - 1, 0)
13991c46b0aSSteven Rostedt (VMware) #define FGRAPH_DATA_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
14091c46b0aSSteven Rostedt (VMware) #define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS))
14191c46b0aSSteven Rostedt (VMware)
14291c46b0aSSteven Rostedt (VMware) #define FGRAPH_DATA_INDEX_BITS 4
14391c46b0aSSteven Rostedt (VMware) #define FGRAPH_DATA_INDEX_MASK GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0)
14491c46b0aSSteven Rostedt (VMware) #define FGRAPH_DATA_INDEX_SHIFT (FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS)
14591c46b0aSSteven Rostedt (VMware)
14691c46b0aSSteven Rostedt (VMware) #define FGRAPH_MAX_INDEX \
14791c46b0aSSteven Rostedt (VMware) ((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX)
14891c46b0aSSteven Rostedt (VMware)
1497aa1eaefSSteven Rostedt (VMware) #define FGRAPH_ARRAY_SIZE FGRAPH_INDEX_BITS
1507aa1eaefSSteven Rostedt (VMware)
1517aa1eaefSSteven Rostedt (VMware) /*
15242675b72SSteven Rostedt (VMware) * SHADOW_STACK_SIZE: The size in bytes of the entire shadow stack
15342675b72SSteven Rostedt (VMware) * SHADOW_STACK_OFFSET: The size in long words of the shadow stack
15442675b72SSteven Rostedt (VMware) * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added
15542675b72SSteven Rostedt (VMware) */
1566ea8b69dSSteven Rostedt #define SHADOW_STACK_SIZE (4096)
1577aa1eaefSSteven Rostedt (VMware) #define SHADOW_STACK_OFFSET (SHADOW_STACK_SIZE / sizeof(long))
15842675b72SSteven Rostedt (VMware) /* Leave on a buffer at the end */
1594497412aSSteven Rostedt (VMware) #define SHADOW_STACK_MAX_OFFSET \
1604497412aSSteven Rostedt (VMware) (SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE))
16142675b72SSteven Rostedt (VMware)
1627aa1eaefSSteven Rostedt (VMware) /* RET_STACK(): Return the frame from a given @offset from task @t */
1637aa1eaefSSteven Rostedt (VMware) #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset]))
16442675b72SSteven Rostedt (VMware)
1654497412aSSteven Rostedt (VMware) /*
1664497412aSSteven Rostedt (VMware) * Each fgraph_ops has a reservered unsigned long at the end (top) of the
1674497412aSSteven Rostedt (VMware) * ret_stack to store task specific state.
1684497412aSSteven Rostedt (VMware) */
1694497412aSSteven Rostedt (VMware) #define SHADOW_STACK_TASK_VARS(ret_stack) \
1704497412aSSteven Rostedt (VMware) ((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE]))
1714497412aSSteven Rostedt (VMware)
17218bfee32SChristophe Leroy DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
173e73e679fSSteven Rostedt (VMware) int ftrace_graph_active;
174e73e679fSSteven Rostedt (VMware)
17543409848SSteven Rostedt static struct kmem_cache *fgraph_stack_cachep;
17643409848SSteven Rostedt
177518d6804SSteven Rostedt (VMware) static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
178a5b6d4daSSteven Rostedt (Google) static unsigned long fgraph_array_bitmask;
179518d6804SSteven Rostedt (VMware)
1806d478659SMasami Hiramatsu (Google) /* LRU index table for fgraph_array */
1816d478659SMasami Hiramatsu (Google) static int fgraph_lru_table[FGRAPH_ARRAY_SIZE];
1826d478659SMasami Hiramatsu (Google) static int fgraph_lru_next;
1836d478659SMasami Hiramatsu (Google) static int fgraph_lru_last;
1846d478659SMasami Hiramatsu (Google)
1856d478659SMasami Hiramatsu (Google) /* Initialize fgraph_lru_table with unused index */
fgraph_lru_init(void)1866d478659SMasami Hiramatsu (Google) static void fgraph_lru_init(void)
1876d478659SMasami Hiramatsu (Google) {
1886d478659SMasami Hiramatsu (Google) int i;
1896d478659SMasami Hiramatsu (Google)
1906d478659SMasami Hiramatsu (Google) for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
1916d478659SMasami Hiramatsu (Google) fgraph_lru_table[i] = i;
1926d478659SMasami Hiramatsu (Google) }
1936d478659SMasami Hiramatsu (Google)
1946d478659SMasami Hiramatsu (Google) /* Release the used index to the LRU table */
fgraph_lru_release_index(int idx)1956d478659SMasami Hiramatsu (Google) static int fgraph_lru_release_index(int idx)
1966d478659SMasami Hiramatsu (Google) {
1976d478659SMasami Hiramatsu (Google) if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE ||
1986d478659SMasami Hiramatsu (Google) WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1))
1996d478659SMasami Hiramatsu (Google) return -1;
2006d478659SMasami Hiramatsu (Google)
2016d478659SMasami Hiramatsu (Google) fgraph_lru_table[fgraph_lru_last] = idx;
2026d478659SMasami Hiramatsu (Google) fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE;
203a5b6d4daSSteven Rostedt (Google)
204a5b6d4daSSteven Rostedt (Google) clear_bit(idx, &fgraph_array_bitmask);
2056d478659SMasami Hiramatsu (Google) return 0;
2066d478659SMasami Hiramatsu (Google) }
2076d478659SMasami Hiramatsu (Google)
2086d478659SMasami Hiramatsu (Google) /* Allocate a new index from LRU table */
fgraph_lru_alloc_index(void)2096d478659SMasami Hiramatsu (Google) static int fgraph_lru_alloc_index(void)
2106d478659SMasami Hiramatsu (Google) {
2116d478659SMasami Hiramatsu (Google) int idx = fgraph_lru_table[fgraph_lru_next];
2126d478659SMasami Hiramatsu (Google)
2136d478659SMasami Hiramatsu (Google) /* No id is available */
2146d478659SMasami Hiramatsu (Google) if (idx == -1)
2156d478659SMasami Hiramatsu (Google) return -1;
2166d478659SMasami Hiramatsu (Google)
2176d478659SMasami Hiramatsu (Google) fgraph_lru_table[fgraph_lru_next] = -1;
2186d478659SMasami Hiramatsu (Google) fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE;
219a5b6d4daSSteven Rostedt (Google)
220a5b6d4daSSteven Rostedt (Google) set_bit(idx, &fgraph_array_bitmask);
2216d478659SMasami Hiramatsu (Google) return idx;
2226d478659SMasami Hiramatsu (Google) }
2236d478659SMasami Hiramatsu (Google)
22491c46b0aSSteven Rostedt (VMware) /* Get the offset to the fgraph frame from a ret_stack value */
__get_offset(unsigned long val)22591c46b0aSSteven Rostedt (VMware) static inline int __get_offset(unsigned long val)
22691c46b0aSSteven Rostedt (VMware) {
22791c46b0aSSteven Rostedt (VMware) return val & FGRAPH_FRAME_OFFSET_MASK;
22891c46b0aSSteven Rostedt (VMware) }
22991c46b0aSSteven Rostedt (VMware)
23091c46b0aSSteven Rostedt (VMware) /* Get the type of word from a ret_stack value */
__get_type(unsigned long val)23191c46b0aSSteven Rostedt (VMware) static inline int __get_type(unsigned long val)
23291c46b0aSSteven Rostedt (VMware) {
23391c46b0aSSteven Rostedt (VMware) return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK;
23491c46b0aSSteven Rostedt (VMware) }
23591c46b0aSSteven Rostedt (VMware)
23691c46b0aSSteven Rostedt (VMware) /* Get the data_index for a DATA type ret_stack word */
__get_data_index(unsigned long val)23791c46b0aSSteven Rostedt (VMware) static inline int __get_data_index(unsigned long val)
23891c46b0aSSteven Rostedt (VMware) {
23991c46b0aSSteven Rostedt (VMware) return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK;
24091c46b0aSSteven Rostedt (VMware) }
24191c46b0aSSteven Rostedt (VMware)
24291c46b0aSSteven Rostedt (VMware) /* Get the data_size for a DATA type ret_stack word */
__get_data_size(unsigned long val)24391c46b0aSSteven Rostedt (VMware) static inline int __get_data_size(unsigned long val)
24491c46b0aSSteven Rostedt (VMware) {
24591c46b0aSSteven Rostedt (VMware) return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1;
24691c46b0aSSteven Rostedt (VMware) }
24791c46b0aSSteven Rostedt (VMware)
24891c46b0aSSteven Rostedt (VMware) /* Get the word from the ret_stack at @offset */
get_fgraph_entry(struct task_struct * t,int offset)24991c46b0aSSteven Rostedt (VMware) static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset)
25091c46b0aSSteven Rostedt (VMware) {
25191c46b0aSSteven Rostedt (VMware) return t->ret_stack[offset];
25291c46b0aSSteven Rostedt (VMware) }
25391c46b0aSSteven Rostedt (VMware)
2547aa1eaefSSteven Rostedt (VMware) /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */
get_frame_offset(struct task_struct * t,int offset)2557aa1eaefSSteven Rostedt (VMware) static inline int get_frame_offset(struct task_struct *t, int offset)
2567aa1eaefSSteven Rostedt (VMware) {
25791c46b0aSSteven Rostedt (VMware) return __get_offset(t->ret_stack[offset]);
2587aa1eaefSSteven Rostedt (VMware) }
2597aa1eaefSSteven Rostedt (VMware)
2607aa1eaefSSteven Rostedt (VMware) /* For BITMAP type: get the bitmask from the @offset at ret_stack */
2617aa1eaefSSteven Rostedt (VMware) static inline unsigned long
get_bitmap_bits(struct task_struct * t,int offset)2627aa1eaefSSteven Rostedt (VMware) get_bitmap_bits(struct task_struct *t, int offset)
2637aa1eaefSSteven Rostedt (VMware) {
2647aa1eaefSSteven Rostedt (VMware) return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK;
2657aa1eaefSSteven Rostedt (VMware) }
2667aa1eaefSSteven Rostedt (VMware)
2677aa1eaefSSteven Rostedt (VMware) /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */
2687aa1eaefSSteven Rostedt (VMware) static inline void
set_bitmap(struct task_struct * t,int offset,unsigned long bitmap)2697aa1eaefSSteven Rostedt (VMware) set_bitmap(struct task_struct *t, int offset, unsigned long bitmap)
2707aa1eaefSSteven Rostedt (VMware) {
2717aa1eaefSSteven Rostedt (VMware) t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) |
2727aa1eaefSSteven Rostedt (VMware) (FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
2737aa1eaefSSteven Rostedt (VMware) }
2747aa1eaefSSteven Rostedt (VMware)
27591c46b0aSSteven Rostedt (VMware) /* For DATA type: get the data saved under the ret_stack word at @offset */
get_data_type_data(struct task_struct * t,int offset)27691c46b0aSSteven Rostedt (VMware) static inline void *get_data_type_data(struct task_struct *t, int offset)
27791c46b0aSSteven Rostedt (VMware) {
27891c46b0aSSteven Rostedt (VMware) unsigned long val = t->ret_stack[offset];
27991c46b0aSSteven Rostedt (VMware)
28091c46b0aSSteven Rostedt (VMware) if (__get_type(val) != FGRAPH_TYPE_DATA)
28191c46b0aSSteven Rostedt (VMware) return NULL;
28291c46b0aSSteven Rostedt (VMware) offset -= __get_data_size(val);
28391c46b0aSSteven Rostedt (VMware) return (void *)&t->ret_stack[offset];
28491c46b0aSSteven Rostedt (VMware) }
28591c46b0aSSteven Rostedt (VMware)
28691c46b0aSSteven Rostedt (VMware) /* Create the ret_stack word for a DATA type */
make_data_type_val(int idx,int size,int offset)28791c46b0aSSteven Rostedt (VMware) static inline unsigned long make_data_type_val(int idx, int size, int offset)
28891c46b0aSSteven Rostedt (VMware) {
28991c46b0aSSteven Rostedt (VMware) return (idx << FGRAPH_DATA_INDEX_SHIFT) |
29091c46b0aSSteven Rostedt (VMware) ((size - 1) << FGRAPH_DATA_SHIFT) |
29191c46b0aSSteven Rostedt (VMware) (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset;
29291c46b0aSSteven Rostedt (VMware) }
29391c46b0aSSteven Rostedt (VMware)
2942fbb5499SSteven Rostedt (VMware) /* ftrace_graph_entry set to this to tell some archs to run function graph */
entry_run(struct ftrace_graph_ent * trace,struct fgraph_ops * ops,struct ftrace_regs * fregs)29541705c42SMasami Hiramatsu (Google) static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops,
29641705c42SMasami Hiramatsu (Google) struct ftrace_regs *fregs)
2972fbb5499SSteven Rostedt (VMware) {
2982fbb5499SSteven Rostedt (VMware) return 0;
2992fbb5499SSteven Rostedt (VMware) }
3002fbb5499SSteven Rostedt (VMware)
3012fbb5499SSteven Rostedt (VMware) /* ftrace_graph_return set to this to tell some archs to run function graph */
return_run(struct ftrace_graph_ret * trace,struct fgraph_ops * ops,struct ftrace_regs * fregs)3022ca8c112SMasami Hiramatsu (Google) static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops,
3032ca8c112SMasami Hiramatsu (Google) struct ftrace_regs *fregs)
3042fbb5499SSteven Rostedt (VMware) {
3052fbb5499SSteven Rostedt (VMware) }
3062fbb5499SSteven Rostedt (VMware)
ret_stack_set_task_var(struct task_struct * t,int idx,long val)3074497412aSSteven Rostedt (VMware) static void ret_stack_set_task_var(struct task_struct *t, int idx, long val)
3084497412aSSteven Rostedt (VMware) {
3094497412aSSteven Rostedt (VMware) unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
3104497412aSSteven Rostedt (VMware)
3114497412aSSteven Rostedt (VMware) gvals[idx] = val;
3124497412aSSteven Rostedt (VMware) }
3134497412aSSteven Rostedt (VMware)
3144497412aSSteven Rostedt (VMware) static unsigned long *
ret_stack_get_task_var(struct task_struct * t,int idx)3154497412aSSteven Rostedt (VMware) ret_stack_get_task_var(struct task_struct *t, int idx)
3164497412aSSteven Rostedt (VMware) {
3174497412aSSteven Rostedt (VMware) unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
3184497412aSSteven Rostedt (VMware)
3194497412aSSteven Rostedt (VMware) return &gvals[idx];
3204497412aSSteven Rostedt (VMware) }
3214497412aSSteven Rostedt (VMware)
ret_stack_init_task_vars(unsigned long * ret_stack)3224497412aSSteven Rostedt (VMware) static void ret_stack_init_task_vars(unsigned long *ret_stack)
3234497412aSSteven Rostedt (VMware) {
3244497412aSSteven Rostedt (VMware) unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack);
3254497412aSSteven Rostedt (VMware)
3264497412aSSteven Rostedt (VMware) memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE);
3274497412aSSteven Rostedt (VMware) }
3284497412aSSteven Rostedt (VMware)
3294497412aSSteven Rostedt (VMware) /**
33091c46b0aSSteven Rostedt (VMware) * fgraph_reserve_data - Reserve storage on the task's ret_stack
33191c46b0aSSteven Rostedt (VMware) * @idx: The index of fgraph_array
33291c46b0aSSteven Rostedt (VMware) * @size_bytes: The size in bytes to reserve
33391c46b0aSSteven Rostedt (VMware) *
33491c46b0aSSteven Rostedt (VMware) * Reserves space of up to FGRAPH_MAX_DATA_SIZE bytes on the
33591c46b0aSSteven Rostedt (VMware) * task's ret_stack shadow stack, for a given fgraph_ops during
33691c46b0aSSteven Rostedt (VMware) * the entryfunc() call. If entryfunc() returns zero, the storage
33791c46b0aSSteven Rostedt (VMware) * is discarded. An entryfunc() can only call this once per iteration.
33891c46b0aSSteven Rostedt (VMware) * The fgraph_ops retfunc() can retrieve this stored data with
33991c46b0aSSteven Rostedt (VMware) * fgraph_retrieve_data().
34091c46b0aSSteven Rostedt (VMware) *
34191c46b0aSSteven Rostedt (VMware) * Returns: On success, a pointer to the data on the stack.
34291c46b0aSSteven Rostedt (VMware) * Otherwise, NULL if there's not enough space left on the
34391c46b0aSSteven Rostedt (VMware) * ret_stack for the data, or if fgraph_reserve_data() was called
34491c46b0aSSteven Rostedt (VMware) * more than once for a single entryfunc() call.
34591c46b0aSSteven Rostedt (VMware) */
fgraph_reserve_data(int idx,int size_bytes)34691c46b0aSSteven Rostedt (VMware) void *fgraph_reserve_data(int idx, int size_bytes)
34791c46b0aSSteven Rostedt (VMware) {
34891c46b0aSSteven Rostedt (VMware) unsigned long val;
34991c46b0aSSteven Rostedt (VMware) void *data;
35091c46b0aSSteven Rostedt (VMware) int curr_ret_stack = current->curr_ret_stack;
35191c46b0aSSteven Rostedt (VMware) int data_size;
35291c46b0aSSteven Rostedt (VMware)
35391c46b0aSSteven Rostedt (VMware) if (size_bytes > FGRAPH_MAX_DATA_SIZE)
35491c46b0aSSteven Rostedt (VMware) return NULL;
35591c46b0aSSteven Rostedt (VMware)
35691c46b0aSSteven Rostedt (VMware) /* Convert the data size to number of longs. */
35791c46b0aSSteven Rostedt (VMware) data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3);
35891c46b0aSSteven Rostedt (VMware)
35991c46b0aSSteven Rostedt (VMware) val = get_fgraph_entry(current, curr_ret_stack - 1);
36091c46b0aSSteven Rostedt (VMware) data = ¤t->ret_stack[curr_ret_stack];
36191c46b0aSSteven Rostedt (VMware)
36291c46b0aSSteven Rostedt (VMware) curr_ret_stack += data_size + 1;
36391c46b0aSSteven Rostedt (VMware) if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET))
36491c46b0aSSteven Rostedt (VMware) return NULL;
36591c46b0aSSteven Rostedt (VMware)
36691c46b0aSSteven Rostedt (VMware) val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1);
36791c46b0aSSteven Rostedt (VMware)
36891c46b0aSSteven Rostedt (VMware) /* Set the last word to be reserved */
36991c46b0aSSteven Rostedt (VMware) current->ret_stack[curr_ret_stack - 1] = val;
37091c46b0aSSteven Rostedt (VMware)
37191c46b0aSSteven Rostedt (VMware) /* Make sure interrupts see this */
37291c46b0aSSteven Rostedt (VMware) barrier();
37391c46b0aSSteven Rostedt (VMware) current->curr_ret_stack = curr_ret_stack;
37491c46b0aSSteven Rostedt (VMware) /* Again sync with interrupts, and reset reserve */
37591c46b0aSSteven Rostedt (VMware) current->ret_stack[curr_ret_stack - 1] = val;
37691c46b0aSSteven Rostedt (VMware)
37791c46b0aSSteven Rostedt (VMware) return data;
37891c46b0aSSteven Rostedt (VMware) }
37991c46b0aSSteven Rostedt (VMware)
38091c46b0aSSteven Rostedt (VMware) /**
38191c46b0aSSteven Rostedt (VMware) * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data()
38291c46b0aSSteven Rostedt (VMware) * @idx: the index of fgraph_array (fgraph_ops::idx)
38391c46b0aSSteven Rostedt (VMware) * @size_bytes: pointer to retrieved data size.
38491c46b0aSSteven Rostedt (VMware) *
38591c46b0aSSteven Rostedt (VMware) * This is to be called by a fgraph_ops retfunc(), to retrieve data that
38691c46b0aSSteven Rostedt (VMware) * was stored by the fgraph_ops entryfunc() on the function entry.
38791c46b0aSSteven Rostedt (VMware) * That is, this will retrieve the data that was reserved on the
38891c46b0aSSteven Rostedt (VMware) * entry of the function that corresponds to the exit of the function
38991c46b0aSSteven Rostedt (VMware) * that the fgraph_ops retfunc() is called on.
39091c46b0aSSteven Rostedt (VMware) *
39191c46b0aSSteven Rostedt (VMware) * Returns: The stored data from fgraph_reserve_data() called by the
39291c46b0aSSteven Rostedt (VMware) * matching entryfunc() for the retfunc() this is called from.
39391c46b0aSSteven Rostedt (VMware) * Or NULL if there was nothing stored.
39491c46b0aSSteven Rostedt (VMware) */
fgraph_retrieve_data(int idx,int * size_bytes)39591c46b0aSSteven Rostedt (VMware) void *fgraph_retrieve_data(int idx, int *size_bytes)
39691c46b0aSSteven Rostedt (VMware) {
397a312a0f7SSteven Rostedt return fgraph_retrieve_parent_data(idx, size_bytes, 0);
39891c46b0aSSteven Rostedt (VMware) }
39991c46b0aSSteven Rostedt (VMware)
40091c46b0aSSteven Rostedt (VMware) /**
4014497412aSSteven Rostedt (VMware) * fgraph_get_task_var - retrieve a task specific state variable
4024497412aSSteven Rostedt (VMware) * @gops: The ftrace_ops that owns the task specific variable
4034497412aSSteven Rostedt (VMware) *
4044497412aSSteven Rostedt (VMware) * Every registered fgraph_ops has a task state variable
4054497412aSSteven Rostedt (VMware) * reserved on the task's ret_stack. This function returns the
4064497412aSSteven Rostedt (VMware) * address to that variable.
4074497412aSSteven Rostedt (VMware) *
4084497412aSSteven Rostedt (VMware) * Returns the address to the fgraph_ops @gops tasks specific
4094497412aSSteven Rostedt (VMware) * unsigned long variable.
4104497412aSSteven Rostedt (VMware) */
fgraph_get_task_var(struct fgraph_ops * gops)4114497412aSSteven Rostedt (VMware) unsigned long *fgraph_get_task_var(struct fgraph_ops *gops)
4124497412aSSteven Rostedt (VMware) {
4134497412aSSteven Rostedt (VMware) return ret_stack_get_task_var(current, gops->idx);
4144497412aSSteven Rostedt (VMware) }
4154497412aSSteven Rostedt (VMware)
4167aa1eaefSSteven Rostedt (VMware) /*
4177aa1eaefSSteven Rostedt (VMware) * @offset: The offset into @t->ret_stack to find the ret_stack entry
4187aa1eaefSSteven Rostedt (VMware) * @frame_offset: Where to place the offset into @t->ret_stack of that entry
4197aa1eaefSSteven Rostedt (VMware) *
4207aa1eaefSSteven Rostedt (VMware) * Returns a pointer to the previous ret_stack below @offset or NULL
4217aa1eaefSSteven Rostedt (VMware) * when it reaches the bottom of the stack.
4227aa1eaefSSteven Rostedt (VMware) *
4237aa1eaefSSteven Rostedt (VMware) * Calling this with:
4247aa1eaefSSteven Rostedt (VMware) *
4257aa1eaefSSteven Rostedt (VMware) * offset = task->curr_ret_stack;
4267aa1eaefSSteven Rostedt (VMware) * do {
4277aa1eaefSSteven Rostedt (VMware) * ret_stack = get_ret_stack(task, offset, &offset);
4287aa1eaefSSteven Rostedt (VMware) * } while (ret_stack);
4297aa1eaefSSteven Rostedt (VMware) *
4307aa1eaefSSteven Rostedt (VMware) * Will iterate through all the ret_stack entries from curr_ret_stack
4317aa1eaefSSteven Rostedt (VMware) * down to the first one.
4327aa1eaefSSteven Rostedt (VMware) */
4337aa1eaefSSteven Rostedt (VMware) static inline struct ftrace_ret_stack *
get_ret_stack(struct task_struct * t,int offset,int * frame_offset)4347aa1eaefSSteven Rostedt (VMware) get_ret_stack(struct task_struct *t, int offset, int *frame_offset)
4357aa1eaefSSteven Rostedt (VMware) {
4367aa1eaefSSteven Rostedt (VMware) int offs;
4377aa1eaefSSteven Rostedt (VMware)
4387aa1eaefSSteven Rostedt (VMware) BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long));
4397aa1eaefSSteven Rostedt (VMware)
4407aa1eaefSSteven Rostedt (VMware) if (unlikely(offset <= 0))
4417aa1eaefSSteven Rostedt (VMware) return NULL;
4427aa1eaefSSteven Rostedt (VMware)
4437aa1eaefSSteven Rostedt (VMware) offs = get_frame_offset(t, --offset);
4447aa1eaefSSteven Rostedt (VMware) if (WARN_ON_ONCE(offs <= 0 || offs > offset))
4457aa1eaefSSteven Rostedt (VMware) return NULL;
4467aa1eaefSSteven Rostedt (VMware)
4477aa1eaefSSteven Rostedt (VMware) offset -= offs;
4487aa1eaefSSteven Rostedt (VMware)
4497aa1eaefSSteven Rostedt (VMware) *frame_offset = offset;
4507aa1eaefSSteven Rostedt (VMware) return RET_STACK(t, offset);
4517aa1eaefSSteven Rostedt (VMware) }
4527aa1eaefSSteven Rostedt (VMware)
453a312a0f7SSteven Rostedt /**
454a312a0f7SSteven Rostedt * fgraph_retrieve_parent_data - get data from a parent function
455a312a0f7SSteven Rostedt * @idx: The index into the fgraph_array (fgraph_ops::idx)
456a312a0f7SSteven Rostedt * @size_bytes: A pointer to retrieved data size
457a312a0f7SSteven Rostedt * @depth: The depth to find the parent (0 is the current function)
458a312a0f7SSteven Rostedt *
459a312a0f7SSteven Rostedt * This is similar to fgraph_retrieve_data() but can be used to retrieve
460a312a0f7SSteven Rostedt * data from a parent caller function.
461a312a0f7SSteven Rostedt *
462a312a0f7SSteven Rostedt * Return: a pointer to the specified parent data or NULL if not found
463a312a0f7SSteven Rostedt */
fgraph_retrieve_parent_data(int idx,int * size_bytes,int depth)464a312a0f7SSteven Rostedt void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth)
465a312a0f7SSteven Rostedt {
466a312a0f7SSteven Rostedt struct ftrace_ret_stack *ret_stack = NULL;
467a312a0f7SSteven Rostedt int offset = current->curr_ret_stack;
468a312a0f7SSteven Rostedt unsigned long val;
469a312a0f7SSteven Rostedt
470a312a0f7SSteven Rostedt if (offset <= 0)
471a312a0f7SSteven Rostedt return NULL;
472a312a0f7SSteven Rostedt
473a312a0f7SSteven Rostedt for (;;) {
474a312a0f7SSteven Rostedt int next_offset;
475a312a0f7SSteven Rostedt
476a312a0f7SSteven Rostedt ret_stack = get_ret_stack(current, offset, &next_offset);
477a312a0f7SSteven Rostedt if (!ret_stack || --depth < 0)
478a312a0f7SSteven Rostedt break;
479a312a0f7SSteven Rostedt offset = next_offset;
480a312a0f7SSteven Rostedt }
481a312a0f7SSteven Rostedt
482a312a0f7SSteven Rostedt if (!ret_stack)
483a312a0f7SSteven Rostedt return NULL;
484a312a0f7SSteven Rostedt
485a312a0f7SSteven Rostedt offset--;
486a312a0f7SSteven Rostedt
487a312a0f7SSteven Rostedt val = get_fgraph_entry(current, offset);
488a312a0f7SSteven Rostedt while (__get_type(val) == FGRAPH_TYPE_DATA) {
489a312a0f7SSteven Rostedt if (__get_data_index(val) == idx)
490a312a0f7SSteven Rostedt goto found;
491a312a0f7SSteven Rostedt offset -= __get_data_size(val) + 1;
492a312a0f7SSteven Rostedt val = get_fgraph_entry(current, offset);
493a312a0f7SSteven Rostedt }
494a312a0f7SSteven Rostedt return NULL;
495a312a0f7SSteven Rostedt found:
496a312a0f7SSteven Rostedt if (size_bytes)
497a312a0f7SSteven Rostedt *size_bytes = __get_data_size(val) * sizeof(long);
498a312a0f7SSteven Rostedt return get_data_type_data(current, offset);
499a312a0f7SSteven Rostedt }
500a312a0f7SSteven Rostedt
501e73e679fSSteven Rostedt (VMware) /* Both enabled by default (can be cleared by function_graph tracer flags */
5023c9880f3SSteven Rostedt bool fgraph_sleep_time = true;
503d864a3caSSteven Rostedt (VMware)
504ff979b2aSChengming Zhou #ifdef CONFIG_DYNAMIC_FTRACE
505e999995cSChengming Zhou /*
506e999995cSChengming Zhou * archs can override this function if they must do something
507e999995cSChengming Zhou * to enable hook for graph tracer.
508e999995cSChengming Zhou */
ftrace_enable_ftrace_graph_caller(void)509e999995cSChengming Zhou int __weak ftrace_enable_ftrace_graph_caller(void)
510e999995cSChengming Zhou {
511e999995cSChengming Zhou return 0;
512e999995cSChengming Zhou }
513e999995cSChengming Zhou
514e999995cSChengming Zhou /*
515e999995cSChengming Zhou * archs can override this function if they must do something
516e999995cSChengming Zhou * to disable hook for graph tracer.
517e999995cSChengming Zhou */
ftrace_disable_ftrace_graph_caller(void)518e999995cSChengming Zhou int __weak ftrace_disable_ftrace_graph_caller(void)
519e999995cSChengming Zhou {
520e999995cSChengming Zhou return 0;
521e999995cSChengming Zhou }
522ff979b2aSChengming Zhou #endif
523e999995cSChengming Zhou
ftrace_graph_entry_stub(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)52437238abeSSteven Rostedt (VMware) int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
52541705c42SMasami Hiramatsu (Google) struct fgraph_ops *gops,
52641705c42SMasami Hiramatsu (Google) struct ftrace_regs *fregs)
527518d6804SSteven Rostedt (VMware) {
528518d6804SSteven Rostedt (VMware) return 0;
529518d6804SSteven Rostedt (VMware) }
530518d6804SSteven Rostedt (VMware)
ftrace_graph_ret_stub(struct ftrace_graph_ret * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)53137238abeSSteven Rostedt (VMware) static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace,
5322ca8c112SMasami Hiramatsu (Google) struct fgraph_ops *gops,
5332ca8c112SMasami Hiramatsu (Google) struct ftrace_regs *fregs)
534518d6804SSteven Rostedt (VMware) {
535518d6804SSteven Rostedt (VMware) }
536518d6804SSteven Rostedt (VMware)
537518d6804SSteven Rostedt (VMware) static struct fgraph_ops fgraph_stub = {
538518d6804SSteven Rostedt (VMware) .entryfunc = ftrace_graph_entry_stub,
539518d6804SSteven Rostedt (VMware) .retfunc = ftrace_graph_ret_stub,
540518d6804SSteven Rostedt (VMware) };
541518d6804SSteven Rostedt (VMware)
542cc60ee81SSteven Rostedt (Google) static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub;
543cc60ee81SSteven Rostedt (Google) DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub);
544fe835e3cSSteven Rostedt (Google) DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub);
5450c4d8cbbSSteven Rostedt (Google) static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct);
546cc60ee81SSteven Rostedt (Google)
547d864a3caSSteven Rostedt (VMware) /**
548f2cc020dSIngo Molnar * ftrace_graph_stop - set to permanently disable function graph tracing
549d864a3caSSteven Rostedt (VMware) *
550d864a3caSSteven Rostedt (VMware) * In case of an error int function graph tracing, this is called
551d864a3caSSteven Rostedt (VMware) * to try to keep function graph tracing from causing any more harm.
552d864a3caSSteven Rostedt (VMware) * Usually this is pretty severe and this is called to try to at least
553d864a3caSSteven Rostedt (VMware) * get a warning out to the user.
554d864a3caSSteven Rostedt (VMware) */
ftrace_graph_stop(void)555d864a3caSSteven Rostedt (VMware) void ftrace_graph_stop(void)
556d864a3caSSteven Rostedt (VMware) {
55718bfee32SChristophe Leroy static_branch_enable(&kill_ftrace_graph);
558d864a3caSSteven Rostedt (VMware) }
559d864a3caSSteven Rostedt (VMware)
560d864a3caSSteven Rostedt (VMware) /* Add a function return address to the trace stack on thread info.*/
561d864a3caSSteven Rostedt (VMware) static int
ftrace_push_return_trace(unsigned long ret,unsigned long func,unsigned long frame_pointer,unsigned long * retp,int fgraph_idx)562d864a3caSSteven Rostedt (VMware) ftrace_push_return_trace(unsigned long ret, unsigned long func,
5637aa1eaefSSteven Rostedt (VMware) unsigned long frame_pointer, unsigned long *retp,
5647aa1eaefSSteven Rostedt (VMware) int fgraph_idx)
565d864a3caSSteven Rostedt (VMware) {
56642675b72SSteven Rostedt (VMware) struct ftrace_ret_stack *ret_stack;
5677aa1eaefSSteven Rostedt (VMware) unsigned long val;
5687aa1eaefSSteven Rostedt (VMware) int offset;
569d864a3caSSteven Rostedt (VMware)
570d864a3caSSteven Rostedt (VMware) if (unlikely(ftrace_graph_is_dead()))
571d864a3caSSteven Rostedt (VMware) return -EBUSY;
572d864a3caSSteven Rostedt (VMware)
573d864a3caSSteven Rostedt (VMware) if (!current->ret_stack)
574d864a3caSSteven Rostedt (VMware) return -EBUSY;
575d864a3caSSteven Rostedt (VMware)
57659e5f04eSSteven Rostedt (VMware) BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long));
57759e5f04eSSteven Rostedt (VMware)
5787aa1eaefSSteven Rostedt (VMware) /* Set val to "reserved" with the delta to the new fgraph frame */
5797aa1eaefSSteven Rostedt (VMware) val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
5807aa1eaefSSteven Rostedt (VMware)
581d864a3caSSteven Rostedt (VMware) /*
582d864a3caSSteven Rostedt (VMware) * We must make sure the ret_stack is tested before we read
583d864a3caSSteven Rostedt (VMware) * anything else.
584d864a3caSSteven Rostedt (VMware) */
585d864a3caSSteven Rostedt (VMware) smp_rmb();
586d864a3caSSteven Rostedt (VMware)
5877aa1eaefSSteven Rostedt (VMware) /*
5887aa1eaefSSteven Rostedt (VMware) * Check if there's room on the shadow stack to fit a fraph frame
5897aa1eaefSSteven Rostedt (VMware) * and a bitmap word.
5907aa1eaefSSteven Rostedt (VMware) */
5917aa1eaefSSteven Rostedt (VMware) if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) {
592d864a3caSSteven Rostedt (VMware) atomic_inc(¤t->trace_overrun);
593d864a3caSSteven Rostedt (VMware) return -EBUSY;
594d864a3caSSteven Rostedt (VMware) }
595d864a3caSSteven Rostedt (VMware)
5967aa1eaefSSteven Rostedt (VMware) offset = READ_ONCE(current->curr_ret_stack);
5977aa1eaefSSteven Rostedt (VMware) ret_stack = RET_STACK(current, offset);
5987aa1eaefSSteven Rostedt (VMware) offset += FGRAPH_FRAME_OFFSET;
5997aa1eaefSSteven Rostedt (VMware)
6007aa1eaefSSteven Rostedt (VMware) /* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */
6017aa1eaefSSteven Rostedt (VMware) current->ret_stack[offset] = val;
6027aa1eaefSSteven Rostedt (VMware) ret_stack->ret = ret;
6037aa1eaefSSteven Rostedt (VMware) /*
6047aa1eaefSSteven Rostedt (VMware) * The unwinders expect curr_ret_stack to point to either zero
6057aa1eaefSSteven Rostedt (VMware) * or an offset where to find the next ret_stack. Even though the
6067aa1eaefSSteven Rostedt (VMware) * ret stack might be bogus, we want to write the ret and the
6077aa1eaefSSteven Rostedt (VMware) * offset to find the ret_stack before we increment the stack point.
6087aa1eaefSSteven Rostedt (VMware) * If an interrupt comes in now before we increment the curr_ret_stack
6097aa1eaefSSteven Rostedt (VMware) * it may blow away what we wrote. But that's fine, because the
6107aa1eaefSSteven Rostedt (VMware) * offset will still be correct (even though the 'ret' won't be).
6117aa1eaefSSteven Rostedt (VMware) * What we worry about is the offset being correct after we increment
6127aa1eaefSSteven Rostedt (VMware) * the curr_ret_stack and before we update that offset, as if an
6137aa1eaefSSteven Rostedt (VMware) * interrupt comes in and does an unwind stack dump, it will need
6147aa1eaefSSteven Rostedt (VMware) * at least a correct offset!
6157aa1eaefSSteven Rostedt (VMware) */
616d864a3caSSteven Rostedt (VMware) barrier();
6177aa1eaefSSteven Rostedt (VMware) WRITE_ONCE(current->curr_ret_stack, offset + 1);
6187aa1eaefSSteven Rostedt (VMware) /*
6197aa1eaefSSteven Rostedt (VMware) * This next barrier is to ensure that an interrupt coming in
6207aa1eaefSSteven Rostedt (VMware) * will not corrupt what we are about to write.
6217aa1eaefSSteven Rostedt (VMware) */
6227aa1eaefSSteven Rostedt (VMware) barrier();
6237aa1eaefSSteven Rostedt (VMware)
6247aa1eaefSSteven Rostedt (VMware) /* Still keep it reserved even if an interrupt came in */
6257aa1eaefSSteven Rostedt (VMware) current->ret_stack[offset] = val;
6267aa1eaefSSteven Rostedt (VMware)
62742675b72SSteven Rostedt (VMware) ret_stack->ret = ret;
62842675b72SSteven Rostedt (VMware) ret_stack->func = func;
629d864a3caSSteven Rostedt (VMware) #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
63042675b72SSteven Rostedt (VMware) ret_stack->fp = frame_pointer;
631d864a3caSSteven Rostedt (VMware) #endif
63242675b72SSteven Rostedt (VMware) ret_stack->retp = retp;
6337aa1eaefSSteven Rostedt (VMware) return offset;
634d864a3caSSteven Rostedt (VMware) }
635d864a3caSSteven Rostedt (VMware)
636d2ccbccbSSteven Rostedt (VMware) /*
637d2ccbccbSSteven Rostedt (VMware) * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
638d2ccbccbSSteven Rostedt (VMware) * functions. But those archs currently don't support direct functions
639d2ccbccbSSteven Rostedt (VMware) * anyway, and ftrace_find_rec_direct() is just a stub for them.
640d2ccbccbSSteven Rostedt (VMware) * Define MCOUNT_INSN_SIZE to keep those archs compiling.
641d2ccbccbSSteven Rostedt (VMware) */
642d2ccbccbSSteven Rostedt (VMware) #ifndef MCOUNT_INSN_SIZE
643d2ccbccbSSteven Rostedt (VMware) /* Make sure this only works without direct calls */
644d2ccbccbSSteven Rostedt (VMware) # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
645d2ccbccbSSteven Rostedt (VMware) # error MCOUNT_INSN_SIZE not defined with direct calls enabled
646d2ccbccbSSteven Rostedt (VMware) # endif
647d2ccbccbSSteven Rostedt (VMware) # define MCOUNT_INSN_SIZE 0
648d2ccbccbSSteven Rostedt (VMware) #endif
649d2ccbccbSSteven Rostedt (VMware)
6507aa1eaefSSteven Rostedt (VMware) /* If the caller does not use ftrace, call this function. */
function_graph_enter_regs(unsigned long ret,unsigned long func,unsigned long frame_pointer,unsigned long * retp,struct ftrace_regs * fregs)65141705c42SMasami Hiramatsu (Google) int function_graph_enter_regs(unsigned long ret, unsigned long func,
65241705c42SMasami Hiramatsu (Google) unsigned long frame_pointer, unsigned long *retp,
65341705c42SMasami Hiramatsu (Google) struct ftrace_regs *fregs)
654d864a3caSSteven Rostedt (VMware) {
655d864a3caSSteven Rostedt (VMware) struct ftrace_graph_ent trace;
6567aa1eaefSSteven Rostedt (VMware) unsigned long bitmap = 0;
6577aa1eaefSSteven Rostedt (VMware) int offset;
658d576aec2SMasami Hiramatsu (Google) int bit;
6597aa1eaefSSteven Rostedt (VMware) int i;
660d864a3caSSteven Rostedt (VMware)
661d576aec2SMasami Hiramatsu (Google) bit = ftrace_test_recursion_trylock(func, ret);
662d576aec2SMasami Hiramatsu (Google) if (bit < 0)
663d576aec2SMasami Hiramatsu (Google) return -EBUSY;
664d576aec2SMasami Hiramatsu (Google)
665d864a3caSSteven Rostedt (VMware) trace.func = func;
666d864a3caSSteven Rostedt (VMware) trace.depth = ++current->curr_ret_depth;
667d864a3caSSteven Rostedt (VMware)
6687aa1eaefSSteven Rostedt (VMware) offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0);
6697aa1eaefSSteven Rostedt (VMware) if (offset < 0)
670d864a3caSSteven Rostedt (VMware) goto out;
671d864a3caSSteven Rostedt (VMware)
672cc60ee81SSteven Rostedt (Google) #ifdef CONFIG_HAVE_STATIC_CALL
673cc60ee81SSteven Rostedt (Google) if (static_branch_likely(&fgraph_do_direct)) {
674cc60ee81SSteven Rostedt (Google) int save_curr_ret_stack = current->curr_ret_stack;
675cc60ee81SSteven Rostedt (Google)
67641705c42SMasami Hiramatsu (Google) if (static_call(fgraph_func)(&trace, fgraph_direct_gops, fregs))
677cc60ee81SSteven Rostedt (Google) bitmap |= BIT(fgraph_direct_gops->idx);
678cc60ee81SSteven Rostedt (Google) else
679cc60ee81SSteven Rostedt (Google) /* Clear out any saved storage */
680cc60ee81SSteven Rostedt (Google) current->curr_ret_stack = save_curr_ret_stack;
681cc60ee81SSteven Rostedt (Google) } else
682cc60ee81SSteven Rostedt (Google) #endif
683cc60ee81SSteven Rostedt (Google) {
684a5b6d4daSSteven Rostedt (Google) for_each_set_bit(i, &fgraph_array_bitmask,
685a5b6d4daSSteven Rostedt (Google) sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
68663a8dfb8SSteven Rostedt (Google) struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
68791c46b0aSSteven Rostedt (VMware) int save_curr_ret_stack;
6887aa1eaefSSteven Rostedt (VMware)
6897aa1eaefSSteven Rostedt (VMware) if (gops == &fgraph_stub)
6907aa1eaefSSteven Rostedt (VMware) continue;
6917aa1eaefSSteven Rostedt (VMware)
69291c46b0aSSteven Rostedt (VMware) save_curr_ret_stack = current->curr_ret_stack;
693c132be2cSSteven Rostedt (VMware) if (ftrace_ops_test(&gops->ops, func, NULL) &&
69441705c42SMasami Hiramatsu (Google) gops->entryfunc(&trace, gops, fregs))
6957aa1eaefSSteven Rostedt (VMware) bitmap |= BIT(i);
69691c46b0aSSteven Rostedt (VMware) else
69791c46b0aSSteven Rostedt (VMware) /* Clear out any saved storage */
69891c46b0aSSteven Rostedt (VMware) current->curr_ret_stack = save_curr_ret_stack;
6997aa1eaefSSteven Rostedt (VMware) }
700cc60ee81SSteven Rostedt (Google) }
7017aa1eaefSSteven Rostedt (VMware)
7027aa1eaefSSteven Rostedt (VMware) if (!bitmap)
703d864a3caSSteven Rostedt (VMware) goto out_ret;
704d864a3caSSteven Rostedt (VMware)
7057aa1eaefSSteven Rostedt (VMware) /*
7067aa1eaefSSteven Rostedt (VMware) * Since this function uses fgraph_idx = 0 as a tail-call checking
7077aa1eaefSSteven Rostedt (VMware) * flag, set that bit always.
7087aa1eaefSSteven Rostedt (VMware) */
7097aa1eaefSSteven Rostedt (VMware) set_bitmap(current, offset, bitmap | BIT(0));
710d576aec2SMasami Hiramatsu (Google) ftrace_test_recursion_unlock(bit);
711d864a3caSSteven Rostedt (VMware) return 0;
712d864a3caSSteven Rostedt (VMware) out_ret:
7137aa1eaefSSteven Rostedt (VMware) current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
714d864a3caSSteven Rostedt (VMware) out:
715d864a3caSSteven Rostedt (VMware) current->curr_ret_depth--;
716d576aec2SMasami Hiramatsu (Google) ftrace_test_recursion_unlock(bit);
717d864a3caSSteven Rostedt (VMware) return -EBUSY;
718d864a3caSSteven Rostedt (VMware) }
719d864a3caSSteven Rostedt (VMware)
720d864a3caSSteven Rostedt (VMware) /* Retrieve a function return address to the trace stack on thread info.*/
7217aa1eaefSSteven Rostedt (VMware) static struct ftrace_ret_stack *
ftrace_pop_return_trace(struct ftrace_graph_ret * trace,unsigned long * ret,unsigned long frame_pointer,int * offset)722d864a3caSSteven Rostedt (VMware) ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
7237aa1eaefSSteven Rostedt (VMware) unsigned long frame_pointer, int *offset)
724d864a3caSSteven Rostedt (VMware) {
72542675b72SSteven Rostedt (VMware) struct ftrace_ret_stack *ret_stack;
726d864a3caSSteven Rostedt (VMware)
7277aa1eaefSSteven Rostedt (VMware) ret_stack = get_ret_stack(current, current->curr_ret_stack, offset);
728d864a3caSSteven Rostedt (VMware)
7297aa1eaefSSteven Rostedt (VMware) if (unlikely(!ret_stack)) {
730d864a3caSSteven Rostedt (VMware) ftrace_graph_stop();
7317aa1eaefSSteven Rostedt (VMware) WARN(1, "Bad function graph ret_stack pointer: %d",
7327aa1eaefSSteven Rostedt (VMware) current->curr_ret_stack);
733d864a3caSSteven Rostedt (VMware) /* Might as well panic, otherwise we have no where to go */
734d864a3caSSteven Rostedt (VMware) *ret = (unsigned long)panic;
7357aa1eaefSSteven Rostedt (VMware) return NULL;
736d864a3caSSteven Rostedt (VMware) }
737d864a3caSSteven Rostedt (VMware)
738d864a3caSSteven Rostedt (VMware) #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
739d864a3caSSteven Rostedt (VMware) /*
740d864a3caSSteven Rostedt (VMware) * The arch may choose to record the frame pointer used
741d864a3caSSteven Rostedt (VMware) * and check it here to make sure that it is what we expect it
742d864a3caSSteven Rostedt (VMware) * to be. If gcc does not set the place holder of the return
743d864a3caSSteven Rostedt (VMware) * address in the frame pointer, and does a copy instead, then
744d864a3caSSteven Rostedt (VMware) * the function graph trace will fail. This test detects this
745d864a3caSSteven Rostedt (VMware) * case.
746d864a3caSSteven Rostedt (VMware) *
747d864a3caSSteven Rostedt (VMware) * Currently, x86_32 with optimize for size (-Os) makes the latest
748d864a3caSSteven Rostedt (VMware) * gcc do the above.
749d864a3caSSteven Rostedt (VMware) *
750d864a3caSSteven Rostedt (VMware) * Note, -mfentry does not use frame pointers, and this test
751d864a3caSSteven Rostedt (VMware) * is not needed if CC_USING_FENTRY is set.
752d864a3caSSteven Rostedt (VMware) */
75342675b72SSteven Rostedt (VMware) if (unlikely(ret_stack->fp != frame_pointer)) {
754d864a3caSSteven Rostedt (VMware) ftrace_graph_stop();
755d864a3caSSteven Rostedt (VMware) WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
756d864a3caSSteven Rostedt (VMware) " from func %ps return to %lx\n",
7577aa1eaefSSteven Rostedt (VMware) ret_stack->fp,
758d864a3caSSteven Rostedt (VMware) frame_pointer,
75942675b72SSteven Rostedt (VMware) (void *)ret_stack->func,
76042675b72SSteven Rostedt (VMware) ret_stack->ret);
761d864a3caSSteven Rostedt (VMware) *ret = (unsigned long)panic;
7627aa1eaefSSteven Rostedt (VMware) return NULL;
763d864a3caSSteven Rostedt (VMware) }
764d864a3caSSteven Rostedt (VMware) #endif
765d864a3caSSteven Rostedt (VMware)
7667aa1eaefSSteven Rostedt (VMware) *offset += FGRAPH_FRAME_OFFSET;
76742675b72SSteven Rostedt (VMware) *ret = ret_stack->ret;
76842675b72SSteven Rostedt (VMware) trace->func = ret_stack->func;
769d864a3caSSteven Rostedt (VMware) trace->overrun = atomic_read(¤t->trace_overrun);
7707aa1eaefSSteven Rostedt (VMware) trace->depth = current->curr_ret_depth;
771d864a3caSSteven Rostedt (VMware) /*
772d864a3caSSteven Rostedt (VMware) * We still want to trace interrupts coming in if
773d864a3caSSteven Rostedt (VMware) * max_depth is set to 1. Make sure the decrement is
774d864a3caSSteven Rostedt (VMware) * seen before ftrace_graph_return.
775d864a3caSSteven Rostedt (VMware) */
776d864a3caSSteven Rostedt (VMware) barrier();
7777aa1eaefSSteven Rostedt (VMware)
7787aa1eaefSSteven Rostedt (VMware) return ret_stack;
779d864a3caSSteven Rostedt (VMware) }
780d864a3caSSteven Rostedt (VMware)
781d864a3caSSteven Rostedt (VMware) /*
782e73e679fSSteven Rostedt (VMware) * Hibernation protection.
783e73e679fSSteven Rostedt (VMware) * The state of the current task is too much unstable during
784e73e679fSSteven Rostedt (VMware) * suspend/restore to disk. We want to protect against that.
785e73e679fSSteven Rostedt (VMware) */
786e73e679fSSteven Rostedt (VMware) static int
ftrace_suspend_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)787e73e679fSSteven Rostedt (VMware) ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
788e73e679fSSteven Rostedt (VMware) void *unused)
789e73e679fSSteven Rostedt (VMware) {
790e73e679fSSteven Rostedt (VMware) switch (state) {
791e73e679fSSteven Rostedt (VMware) case PM_HIBERNATION_PREPARE:
792e73e679fSSteven Rostedt (VMware) pause_graph_tracing();
793e73e679fSSteven Rostedt (VMware) break;
794e73e679fSSteven Rostedt (VMware)
795e73e679fSSteven Rostedt (VMware) case PM_POST_HIBERNATION:
796e73e679fSSteven Rostedt (VMware) unpause_graph_tracing();
797e73e679fSSteven Rostedt (VMware) break;
798e73e679fSSteven Rostedt (VMware) }
799e73e679fSSteven Rostedt (VMware) return NOTIFY_DONE;
800e73e679fSSteven Rostedt (VMware) }
801e73e679fSSteven Rostedt (VMware)
802e73e679fSSteven Rostedt (VMware) static struct notifier_block ftrace_suspend_notifier = {
803e73e679fSSteven Rostedt (VMware) .notifier_call = ftrace_suspend_notifier_call,
804e73e679fSSteven Rostedt (VMware) };
805e73e679fSSteven Rostedt (VMware)
806e73e679fSSteven Rostedt (VMware) /*
807d864a3caSSteven Rostedt (VMware) * Send the trace to the ring-buffer.
808d864a3caSSteven Rostedt (VMware) * @return the original return address.
809d864a3caSSteven Rostedt (VMware) */
810a3ed4157SMasami Hiramatsu (Google) static inline unsigned long
__ftrace_return_to_handler(struct ftrace_regs * fregs,unsigned long frame_pointer)811a3ed4157SMasami Hiramatsu (Google) __ftrace_return_to_handler(struct ftrace_regs *fregs, unsigned long frame_pointer)
812d864a3caSSteven Rostedt (VMware) {
8137aa1eaefSSteven Rostedt (VMware) struct ftrace_ret_stack *ret_stack;
814d864a3caSSteven Rostedt (VMware) struct ftrace_graph_ret trace;
8157aa1eaefSSteven Rostedt (VMware) unsigned long bitmap;
816d864a3caSSteven Rostedt (VMware) unsigned long ret;
8177aa1eaefSSteven Rostedt (VMware) int offset;
8187aa1eaefSSteven Rostedt (VMware) int i;
819d864a3caSSteven Rostedt (VMware)
8207aa1eaefSSteven Rostedt (VMware) ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset);
8217aa1eaefSSteven Rostedt (VMware)
8227aa1eaefSSteven Rostedt (VMware) if (unlikely(!ret_stack)) {
8237aa1eaefSSteven Rostedt (VMware) ftrace_graph_stop();
8247aa1eaefSSteven Rostedt (VMware) WARN_ON(1);
8257aa1eaefSSteven Rostedt (VMware) /* Might as well panic. What else to do? */
8267aa1eaefSSteven Rostedt (VMware) return (unsigned long)panic;
8277aa1eaefSSteven Rostedt (VMware) }
8287aa1eaefSSteven Rostedt (VMware)
8292ca8c112SMasami Hiramatsu (Google) if (fregs)
8302ca8c112SMasami Hiramatsu (Google) ftrace_regs_set_instruction_pointer(fregs, ret);
8312ca8c112SMasami Hiramatsu (Google)
832a1be9cccSDonglin Peng #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
833a3ed4157SMasami Hiramatsu (Google) trace.retval = ftrace_regs_get_return_value(fregs);
834a1be9cccSDonglin Peng #endif
8357aa1eaefSSteven Rostedt (VMware)
8367aa1eaefSSteven Rostedt (VMware) bitmap = get_bitmap_bits(current, offset);
837420e1354SSteven Rostedt (Google)
838fe835e3cSSteven Rostedt (Google) #ifdef CONFIG_HAVE_STATIC_CALL
839fe835e3cSSteven Rostedt (Google) if (static_branch_likely(&fgraph_do_direct)) {
840fe835e3cSSteven Rostedt (Google) if (test_bit(fgraph_direct_gops->idx, &bitmap))
8412ca8c112SMasami Hiramatsu (Google) static_call(fgraph_retfunc)(&trace, fgraph_direct_gops, fregs);
842fe835e3cSSteven Rostedt (Google) } else
843fe835e3cSSteven Rostedt (Google) #endif
844fe835e3cSSteven Rostedt (Google) {
845420e1354SSteven Rostedt (Google) for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
846d6547403SZilin Guan struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
8477aa1eaefSSteven Rostedt (VMware)
8487aa1eaefSSteven Rostedt (VMware) if (gops == &fgraph_stub)
8497aa1eaefSSteven Rostedt (VMware) continue;
8507aa1eaefSSteven Rostedt (VMware)
8512ca8c112SMasami Hiramatsu (Google) gops->retfunc(&trace, gops, fregs);
8527aa1eaefSSteven Rostedt (VMware) }
853fe835e3cSSteven Rostedt (Google) }
8547aa1eaefSSteven Rostedt (VMware)
855d864a3caSSteven Rostedt (VMware) /*
856d864a3caSSteven Rostedt (VMware) * The ftrace_graph_return() may still access the current
857d864a3caSSteven Rostedt (VMware) * ret_stack structure, we need to make sure the update of
858d864a3caSSteven Rostedt (VMware) * curr_ret_stack is after that.
859d864a3caSSteven Rostedt (VMware) */
860d864a3caSSteven Rostedt (VMware) barrier();
86191c46b0aSSteven Rostedt (VMware) current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET;
86291c46b0aSSteven Rostedt (VMware)
8637aa1eaefSSteven Rostedt (VMware) current->curr_ret_depth--;
864d864a3caSSteven Rostedt (VMware) return ret;
865d864a3caSSteven Rostedt (VMware) }
866e73e679fSSteven Rostedt (VMware)
867a1be9cccSDonglin Peng /*
868*97d6a9c4SHaiyue Wang * After all architectures have selected HAVE_FUNCTION_GRAPH_FREGS, we can
869a3ed4157SMasami Hiramatsu (Google) * leave only ftrace_return_to_handler(fregs).
870a1be9cccSDonglin Peng */
871a3ed4157SMasami Hiramatsu (Google) #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
ftrace_return_to_handler(struct ftrace_regs * fregs)872a3ed4157SMasami Hiramatsu (Google) unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs)
873a1be9cccSDonglin Peng {
874a3ed4157SMasami Hiramatsu (Google) return __ftrace_return_to_handler(fregs,
875a3ed4157SMasami Hiramatsu (Google) ftrace_regs_get_frame_pointer(fregs));
876a1be9cccSDonglin Peng }
877a1be9cccSDonglin Peng #else
ftrace_return_to_handler(unsigned long frame_pointer)878a1be9cccSDonglin Peng unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
879a1be9cccSDonglin Peng {
880a1be9cccSDonglin Peng return __ftrace_return_to_handler(NULL, frame_pointer);
881a1be9cccSDonglin Peng }
882a1be9cccSDonglin Peng #endif
883a1be9cccSDonglin Peng
88445fe439bSSteven Rostedt (VMware) /**
88545fe439bSSteven Rostedt (VMware) * ftrace_graph_get_ret_stack - return the entry of the shadow stack
8867aa1eaefSSteven Rostedt (VMware) * @task: The task to read the shadow stack from.
88745fe439bSSteven Rostedt (VMware) * @idx: Index down the shadow stack
88845fe439bSSteven Rostedt (VMware) *
88945fe439bSSteven Rostedt (VMware) * Return the ret_struct on the shadow stack of the @task at the
89045fe439bSSteven Rostedt (VMware) * call graph at @idx starting with zero. If @idx is zero, it
89145fe439bSSteven Rostedt (VMware) * will return the last saved ret_stack entry. If it is greater than
89245fe439bSSteven Rostedt (VMware) * zero, it will return the corresponding ret_stack for the depth
89345fe439bSSteven Rostedt (VMware) * of saved return addresses.
89445fe439bSSteven Rostedt (VMware) */
895b0e21a61SSteven Rostedt (VMware) struct ftrace_ret_stack *
ftrace_graph_get_ret_stack(struct task_struct * task,int idx)896b0e21a61SSteven Rostedt (VMware) ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
897b0e21a61SSteven Rostedt (VMware) {
8987aa1eaefSSteven Rostedt (VMware) struct ftrace_ret_stack *ret_stack = NULL;
8997aa1eaefSSteven Rostedt (VMware) int offset = task->curr_ret_stack;
900b0e21a61SSteven Rostedt (VMware)
9017aa1eaefSSteven Rostedt (VMware) if (offset < 0)
902b0e21a61SSteven Rostedt (VMware) return NULL;
90342675b72SSteven Rostedt (VMware)
9047aa1eaefSSteven Rostedt (VMware) do {
9057aa1eaefSSteven Rostedt (VMware) ret_stack = get_ret_stack(task, offset, &offset);
9067aa1eaefSSteven Rostedt (VMware) } while (ret_stack && --idx >= 0);
9077aa1eaefSSteven Rostedt (VMware)
9087aa1eaefSSteven Rostedt (VMware) return ret_stack;
909b0e21a61SSteven Rostedt (VMware) }
910b0e21a61SSteven Rostedt (VMware)
91176b42b63SSteven Rostedt (VMware) /**
9120a6c61bcSMasami Hiramatsu (Google) * ftrace_graph_top_ret_addr - return the top return address in the shadow stack
9130a6c61bcSMasami Hiramatsu (Google) * @task: The task to read the shadow stack from.
9140a6c61bcSMasami Hiramatsu (Google) *
9150a6c61bcSMasami Hiramatsu (Google) * Return the first return address on the shadow stack of the @task, which is
9160a6c61bcSMasami Hiramatsu (Google) * not the fgraph's return_to_handler.
9170a6c61bcSMasami Hiramatsu (Google) */
ftrace_graph_top_ret_addr(struct task_struct * task)9180a6c61bcSMasami Hiramatsu (Google) unsigned long ftrace_graph_top_ret_addr(struct task_struct *task)
9190a6c61bcSMasami Hiramatsu (Google) {
9200a6c61bcSMasami Hiramatsu (Google) unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
9210a6c61bcSMasami Hiramatsu (Google) struct ftrace_ret_stack *ret_stack = NULL;
9220a6c61bcSMasami Hiramatsu (Google) int offset = task->curr_ret_stack;
9230a6c61bcSMasami Hiramatsu (Google)
9240a6c61bcSMasami Hiramatsu (Google) if (offset < 0)
9250a6c61bcSMasami Hiramatsu (Google) return 0;
9260a6c61bcSMasami Hiramatsu (Google)
9270a6c61bcSMasami Hiramatsu (Google) do {
9280a6c61bcSMasami Hiramatsu (Google) ret_stack = get_ret_stack(task, offset, &offset);
9290a6c61bcSMasami Hiramatsu (Google) } while (ret_stack && ret_stack->ret == return_handler);
9300a6c61bcSMasami Hiramatsu (Google)
9310a6c61bcSMasami Hiramatsu (Google) return ret_stack ? ret_stack->ret : 0;
9320a6c61bcSMasami Hiramatsu (Google) }
9330a6c61bcSMasami Hiramatsu (Google)
9340a6c61bcSMasami Hiramatsu (Google) /**
93529c1c24aSSteven Rostedt (Google) * ftrace_graph_ret_addr - return the original value of the return address
93629c1c24aSSteven Rostedt (Google) * @task: The task the unwinder is being executed on
93729c1c24aSSteven Rostedt (Google) * @idx: An initialized pointer to the next stack index to use
93829c1c24aSSteven Rostedt (Google) * @ret: The current return address (likely pointing to return_handler)
93929c1c24aSSteven Rostedt (Google) * @retp: The address on the stack of the current return location
94076b42b63SSteven Rostedt (VMware) *
94176b42b63SSteven Rostedt (VMware) * This function can be called by stack unwinding code to convert a found stack
94229c1c24aSSteven Rostedt (Google) * return address (@ret) to its original value, in case the function graph
94376b42b63SSteven Rostedt (VMware) * tracer has modified it to be 'return_to_handler'. If the address hasn't
94429c1c24aSSteven Rostedt (Google) * been modified, the unchanged value of @ret is returned.
94576b42b63SSteven Rostedt (VMware) *
94629c1c24aSSteven Rostedt (Google) * @idx holds the last index used to know where to start from. It should be
94729c1c24aSSteven Rostedt (Google) * initialized to zero for the first iteration as that will mean to start
94829c1c24aSSteven Rostedt (Google) * at the top of the shadow stack. If the location is found, this pointer
94929c1c24aSSteven Rostedt (Google) * will be assigned that location so that if called again, it will continue
95029c1c24aSSteven Rostedt (Google) * where it left off.
95176b42b63SSteven Rostedt (VMware) *
9525f7fb89aSSteven Rostedt (Google) * @retp is a pointer to the return address on the stack.
95376b42b63SSteven Rostedt (VMware) */
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)95476b42b63SSteven Rostedt (VMware) unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
95576b42b63SSteven Rostedt (VMware) unsigned long ret, unsigned long *retp)
95676b42b63SSteven Rostedt (VMware) {
95742675b72SSteven Rostedt (VMware) struct ftrace_ret_stack *ret_stack;
958375bb572SMasami Hiramatsu (Google) unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
959474ec3e8SOleg Nesterov int i;
96076b42b63SSteven Rostedt (VMware)
961375bb572SMasami Hiramatsu (Google) if (ret != return_handler)
96276b42b63SSteven Rostedt (VMware) return ret;
96376b42b63SSteven Rostedt (VMware)
96429c1c24aSSteven Rostedt (Google) if (!idx)
96529c1c24aSSteven Rostedt (Google) return ret;
96629c1c24aSSteven Rostedt (Google)
96729c1c24aSSteven Rostedt (Google) i = *idx ? : task->curr_ret_stack;
9687aa1eaefSSteven Rostedt (VMware) while (i > 0) {
969604b72b3SPetr Pavlu ret_stack = get_ret_stack(task, i, &i);
9707aa1eaefSSteven Rostedt (VMware) if (!ret_stack)
9717aa1eaefSSteven Rostedt (VMware) break;
972375bb572SMasami Hiramatsu (Google) /*
973375bb572SMasami Hiramatsu (Google) * For the tail-call, there would be 2 or more ftrace_ret_stacks on
974375bb572SMasami Hiramatsu (Google) * the ret_stack, which records "return_to_handler" as the return
975375bb572SMasami Hiramatsu (Google) * address except for the last one.
976375bb572SMasami Hiramatsu (Google) * But on the real stack, there should be 1 entry because tail-call
977375bb572SMasami Hiramatsu (Google) * reuses the return address on the stack and jump to the next function.
978375bb572SMasami Hiramatsu (Google) * Thus we will continue to find real return address.
979375bb572SMasami Hiramatsu (Google) */
980375bb572SMasami Hiramatsu (Google) if (ret_stack->retp == retp &&
98129c1c24aSSteven Rostedt (Google) ret_stack->ret != return_handler) {
98229c1c24aSSteven Rostedt (Google) *idx = i;
98342675b72SSteven Rostedt (VMware) return ret_stack->ret;
98442675b72SSteven Rostedt (VMware) }
98529c1c24aSSteven Rostedt (Google) }
98676b42b63SSteven Rostedt (VMware)
98776b42b63SSteven Rostedt (VMware) return ret;
98876b42b63SSteven Rostedt (VMware) }
98976b42b63SSteven Rostedt (VMware)
990e73e679fSSteven Rostedt (VMware) static struct ftrace_ops graph_ops = {
9910c0593b4SSteven Rostedt (VMware) .func = ftrace_graph_func,
992c132be2cSSteven Rostedt (VMware) .flags = FTRACE_OPS_GRAPH_STUB,
993e73e679fSSteven Rostedt (VMware) #ifdef FTRACE_GRAPH_TRAMP_ADDR
994e73e679fSSteven Rostedt (VMware) .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
995e73e679fSSteven Rostedt (VMware) /* trampoline_size is only needed for dynamically allocated tramps */
996e73e679fSSteven Rostedt (VMware) #endif
997e73e679fSSteven Rostedt (VMware) };
998e73e679fSSteven Rostedt (VMware)
fgraph_init_ops(struct ftrace_ops * dst_ops,struct ftrace_ops * src_ops)999c132be2cSSteven Rostedt (VMware) void fgraph_init_ops(struct ftrace_ops *dst_ops,
1000c132be2cSSteven Rostedt (VMware) struct ftrace_ops *src_ops)
1001c132be2cSSteven Rostedt (VMware) {
1002c132be2cSSteven Rostedt (VMware) dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB;
1003c132be2cSSteven Rostedt (VMware)
1004c132be2cSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE
1005c132be2cSSteven Rostedt (VMware) if (src_ops) {
1006c132be2cSSteven Rostedt (VMware) dst_ops->func_hash = &src_ops->local_hash;
1007c132be2cSSteven Rostedt (VMware) mutex_init(&dst_ops->local_hash.regex_lock);
1008c132be2cSSteven Rostedt (VMware) INIT_LIST_HEAD(&dst_ops->subop_list);
1009c132be2cSSteven Rostedt (VMware) dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED;
1010c132be2cSSteven Rostedt (VMware) }
1011c132be2cSSteven Rostedt (VMware) #endif
1012c132be2cSSteven Rostedt (VMware) }
1013c132be2cSSteven Rostedt (VMware)
ftrace_graph_sleep_time_control(bool enable)1014e73e679fSSteven Rostedt (VMware) void ftrace_graph_sleep_time_control(bool enable)
1015e73e679fSSteven Rostedt (VMware) {
1016e73e679fSSteven Rostedt (VMware) fgraph_sleep_time = enable;
1017e73e679fSSteven Rostedt (VMware) }
1018e73e679fSSteven Rostedt (VMware)
1019b83b43ffSSteven Rostedt (VMware) /*
1020b83b43ffSSteven Rostedt (VMware) * Simply points to ftrace_stub, but with the proper protocol.
1021b83b43ffSSteven Rostedt (VMware) * Defined by the linker script in linux/vmlinux.lds.h
1022b83b43ffSSteven Rostedt (VMware) */
10232ca8c112SMasami Hiramatsu (Google) void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops,
10242ca8c112SMasami Hiramatsu (Google) struct ftrace_regs *fregs);
1025b83b43ffSSteven Rostedt (VMware)
1026e73e679fSSteven Rostedt (VMware) /* The callbacks that hook a function */
102746f94692SSteven Rostedt (VMware) trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
1028e73e679fSSteven Rostedt (VMware) trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1029e73e679fSSteven Rostedt (VMware)
1030e73e679fSSteven Rostedt (VMware) /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
alloc_retstack_tasklist(unsigned long ** ret_stack_list)103142675b72SSteven Rostedt (VMware) static int alloc_retstack_tasklist(unsigned long **ret_stack_list)
1032e73e679fSSteven Rostedt (VMware) {
1033e73e679fSSteven Rostedt (VMware) int i;
1034e73e679fSSteven Rostedt (VMware) int ret = 0;
1035e73e679fSSteven Rostedt (VMware) int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1036e73e679fSSteven Rostedt (VMware) struct task_struct *g, *t;
1037e73e679fSSteven Rostedt (VMware)
103843409848SSteven Rostedt if (WARN_ON_ONCE(!fgraph_stack_cachep))
103943409848SSteven Rostedt return -ENOMEM;
104043409848SSteven Rostedt
1041e73e679fSSteven Rostedt (VMware) for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
104243409848SSteven Rostedt ret_stack_list[i] = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1043e73e679fSSteven Rostedt (VMware) if (!ret_stack_list[i]) {
1044e73e679fSSteven Rostedt (VMware) start = 0;
1045e73e679fSSteven Rostedt (VMware) end = i;
1046e73e679fSSteven Rostedt (VMware) ret = -ENOMEM;
1047e73e679fSSteven Rostedt (VMware) goto free;
1048e73e679fSSteven Rostedt (VMware) }
1049e73e679fSSteven Rostedt (VMware) }
1050e73e679fSSteven Rostedt (VMware)
105140d14da3SDavidlohr Bueso rcu_read_lock();
105240d14da3SDavidlohr Bueso for_each_process_thread(g, t) {
1053e73e679fSSteven Rostedt (VMware) if (start == end) {
1054e73e679fSSteven Rostedt (VMware) ret = -EAGAIN;
1055e73e679fSSteven Rostedt (VMware) goto unlock;
1056e73e679fSSteven Rostedt (VMware) }
1057e73e679fSSteven Rostedt (VMware)
1058e73e679fSSteven Rostedt (VMware) if (t->ret_stack == NULL) {
1059e73e679fSSteven Rostedt (VMware) atomic_set(&t->trace_overrun, 0);
10604497412aSSteven Rostedt (VMware) ret_stack_init_task_vars(ret_stack_list[start]);
106142675b72SSteven Rostedt (VMware) t->curr_ret_stack = 0;
1062e73e679fSSteven Rostedt (VMware) t->curr_ret_depth = -1;
106342675b72SSteven Rostedt (VMware) /* Make sure the tasks see the 0 first: */
1064e73e679fSSteven Rostedt (VMware) smp_wmb();
1065e73e679fSSteven Rostedt (VMware) t->ret_stack = ret_stack_list[start++];
1066e73e679fSSteven Rostedt (VMware) }
106740d14da3SDavidlohr Bueso }
1068e73e679fSSteven Rostedt (VMware)
1069e73e679fSSteven Rostedt (VMware) unlock:
107040d14da3SDavidlohr Bueso rcu_read_unlock();
1071e73e679fSSteven Rostedt (VMware) free:
1072e73e679fSSteven Rostedt (VMware) for (i = start; i < end; i++)
107343409848SSteven Rostedt kmem_cache_free(fgraph_stack_cachep, ret_stack_list[i]);
1074e73e679fSSteven Rostedt (VMware) return ret;
1075e73e679fSSteven Rostedt (VMware) }
1076e73e679fSSteven Rostedt (VMware)
1077e73e679fSSteven Rostedt (VMware) static void
ftrace_graph_probe_sched_switch(void * ignore,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)1078e73e679fSSteven Rostedt (VMware) ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
1079fa2c3254SValentin Schneider struct task_struct *prev,
10809c2136beSDelyan Kratunov struct task_struct *next,
10819c2136beSDelyan Kratunov unsigned int prev_state)
1082e73e679fSSteven Rostedt (VMware) {
1083e73e679fSSteven Rostedt (VMware) unsigned long long timestamp;
1084e73e679fSSteven Rostedt (VMware)
1085e73e679fSSteven Rostedt (VMware) /*
1086e73e679fSSteven Rostedt (VMware) * Does the user want to count the time a function was asleep.
1087e73e679fSSteven Rostedt (VMware) * If so, do not update the time stamps.
1088e73e679fSSteven Rostedt (VMware) */
1089e73e679fSSteven Rostedt (VMware) if (fgraph_sleep_time)
1090e73e679fSSteven Rostedt (VMware) return;
1091e73e679fSSteven Rostedt (VMware)
1092e73e679fSSteven Rostedt (VMware) timestamp = trace_clock_local();
1093e73e679fSSteven Rostedt (VMware)
1094e73e679fSSteven Rostedt (VMware) prev->ftrace_timestamp = timestamp;
1095e73e679fSSteven Rostedt (VMware)
1096e73e679fSSteven Rostedt (VMware) /* only process tasks that we timestamped */
1097e73e679fSSteven Rostedt (VMware) if (!next->ftrace_timestamp)
1098e73e679fSSteven Rostedt (VMware) return;
1099e73e679fSSteven Rostedt (VMware)
11003c9880f3SSteven Rostedt next->ftrace_sleeptime += timestamp - next->ftrace_timestamp;
1101e73e679fSSteven Rostedt (VMware) }
1102e73e679fSSteven Rostedt (VMware)
110342675b72SSteven Rostedt (VMware) static DEFINE_PER_CPU(unsigned long *, idle_ret_stack);
1104e73e679fSSteven Rostedt (VMware)
1105e73e679fSSteven Rostedt (VMware) static void
graph_init_task(struct task_struct * t,unsigned long * ret_stack)110642675b72SSteven Rostedt (VMware) graph_init_task(struct task_struct *t, unsigned long *ret_stack)
1107e73e679fSSteven Rostedt (VMware) {
1108e73e679fSSteven Rostedt (VMware) atomic_set(&t->trace_overrun, 0);
11094497412aSSteven Rostedt (VMware) ret_stack_init_task_vars(ret_stack);
1110e73e679fSSteven Rostedt (VMware) t->ftrace_timestamp = 0;
11117aa1eaefSSteven Rostedt (VMware) t->curr_ret_stack = 0;
11127aa1eaefSSteven Rostedt (VMware) t->curr_ret_depth = -1;
1113e73e679fSSteven Rostedt (VMware) /* make curr_ret_stack visible before we add the ret_stack */
1114e73e679fSSteven Rostedt (VMware) smp_wmb();
1115e73e679fSSteven Rostedt (VMware) t->ret_stack = ret_stack;
1116e73e679fSSteven Rostedt (VMware) }
1117e73e679fSSteven Rostedt (VMware)
1118e73e679fSSteven Rostedt (VMware) /*
1119e73e679fSSteven Rostedt (VMware) * Allocate a return stack for the idle task. May be the first
1120e73e679fSSteven Rostedt (VMware) * time through, or it may be done by CPU hotplug online.
1121e73e679fSSteven Rostedt (VMware) */
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)1122e73e679fSSteven Rostedt (VMware) void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
1123e73e679fSSteven Rostedt (VMware) {
112442675b72SSteven Rostedt (VMware) t->curr_ret_stack = 0;
1125e73e679fSSteven Rostedt (VMware) t->curr_ret_depth = -1;
1126e73e679fSSteven Rostedt (VMware) /*
1127e73e679fSSteven Rostedt (VMware) * The idle task has no parent, it either has its own
1128e73e679fSSteven Rostedt (VMware) * stack or no stack at all.
1129e73e679fSSteven Rostedt (VMware) */
1130e73e679fSSteven Rostedt (VMware) if (t->ret_stack)
1131e73e679fSSteven Rostedt (VMware) WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
1132e73e679fSSteven Rostedt (VMware)
1133e73e679fSSteven Rostedt (VMware) if (ftrace_graph_active) {
113442675b72SSteven Rostedt (VMware) unsigned long *ret_stack;
1135e73e679fSSteven Rostedt (VMware)
113643409848SSteven Rostedt if (WARN_ON_ONCE(!fgraph_stack_cachep))
113743409848SSteven Rostedt return;
113843409848SSteven Rostedt
1139e73e679fSSteven Rostedt (VMware) ret_stack = per_cpu(idle_ret_stack, cpu);
1140e73e679fSSteven Rostedt (VMware) if (!ret_stack) {
114143409848SSteven Rostedt ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1142e73e679fSSteven Rostedt (VMware) if (!ret_stack)
1143e73e679fSSteven Rostedt (VMware) return;
1144e73e679fSSteven Rostedt (VMware) per_cpu(idle_ret_stack, cpu) = ret_stack;
1145e73e679fSSteven Rostedt (VMware) }
1146e73e679fSSteven Rostedt (VMware) graph_init_task(t, ret_stack);
1147e73e679fSSteven Rostedt (VMware) }
1148e73e679fSSteven Rostedt (VMware) }
1149e73e679fSSteven Rostedt (VMware)
1150e73e679fSSteven Rostedt (VMware) /* Allocate a return stack for newly created task */
ftrace_graph_init_task(struct task_struct * t)1151e73e679fSSteven Rostedt (VMware) void ftrace_graph_init_task(struct task_struct *t)
1152e73e679fSSteven Rostedt (VMware) {
1153e73e679fSSteven Rostedt (VMware) /* Make sure we do not use the parent ret_stack */
1154e73e679fSSteven Rostedt (VMware) t->ret_stack = NULL;
115542675b72SSteven Rostedt (VMware) t->curr_ret_stack = 0;
1156e73e679fSSteven Rostedt (VMware) t->curr_ret_depth = -1;
1157e73e679fSSteven Rostedt (VMware)
1158e73e679fSSteven Rostedt (VMware) if (ftrace_graph_active) {
115942675b72SSteven Rostedt (VMware) unsigned long *ret_stack;
1160e73e679fSSteven Rostedt (VMware)
116143409848SSteven Rostedt if (WARN_ON_ONCE(!fgraph_stack_cachep))
116243409848SSteven Rostedt return;
116343409848SSteven Rostedt
116443409848SSteven Rostedt ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1165e73e679fSSteven Rostedt (VMware) if (!ret_stack)
1166e73e679fSSteven Rostedt (VMware) return;
1167e73e679fSSteven Rostedt (VMware) graph_init_task(t, ret_stack);
1168e73e679fSSteven Rostedt (VMware) }
1169e73e679fSSteven Rostedt (VMware) }
1170e73e679fSSteven Rostedt (VMware)
ftrace_graph_exit_task(struct task_struct * t)1171e73e679fSSteven Rostedt (VMware) void ftrace_graph_exit_task(struct task_struct *t)
1172e73e679fSSteven Rostedt (VMware) {
117342675b72SSteven Rostedt (VMware) unsigned long *ret_stack = t->ret_stack;
1174e73e679fSSteven Rostedt (VMware)
1175e73e679fSSteven Rostedt (VMware) t->ret_stack = NULL;
1176e73e679fSSteven Rostedt (VMware) /* NULL must become visible to IRQs before we free it: */
1177e73e679fSSteven Rostedt (VMware) barrier();
1178e73e679fSSteven Rostedt (VMware)
117943409848SSteven Rostedt if (ret_stack) {
118043409848SSteven Rostedt if (WARN_ON_ONCE(!fgraph_stack_cachep))
118143409848SSteven Rostedt return;
118243409848SSteven Rostedt kmem_cache_free(fgraph_stack_cachep, ret_stack);
118343409848SSteven Rostedt }
1184e73e679fSSteven Rostedt (VMware) }
1185e73e679fSSteven Rostedt (VMware)
11864267fda4SSteven Rostedt (Google) #ifdef CONFIG_DYNAMIC_FTRACE
fgraph_pid_func(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)1187df3ec5daSSteven Rostedt (Google) static int fgraph_pid_func(struct ftrace_graph_ent *trace,
118841705c42SMasami Hiramatsu (Google) struct fgraph_ops *gops,
118941705c42SMasami Hiramatsu (Google) struct ftrace_regs *fregs)
1190df3ec5daSSteven Rostedt (Google) {
1191df3ec5daSSteven Rostedt (Google) struct trace_array *tr = gops->ops.private;
1192df3ec5daSSteven Rostedt (Google) int pid;
1193df3ec5daSSteven Rostedt (Google)
1194df3ec5daSSteven Rostedt (Google) if (tr) {
1195df3ec5daSSteven Rostedt (Google) pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
1196df3ec5daSSteven Rostedt (Google) if (pid == FTRACE_PID_IGNORE)
1197df3ec5daSSteven Rostedt (Google) return 0;
1198df3ec5daSSteven Rostedt (Google) if (pid != FTRACE_PID_TRACE &&
1199df3ec5daSSteven Rostedt (Google) pid != current->pid)
1200df3ec5daSSteven Rostedt (Google) return 0;
1201df3ec5daSSteven Rostedt (Google) }
1202df3ec5daSSteven Rostedt (Google)
120341705c42SMasami Hiramatsu (Google) return gops->saved_func(trace, gops, fregs);
1204df3ec5daSSteven Rostedt (Google) }
1205df3ec5daSSteven Rostedt (Google)
fgraph_update_pid_func(void)1206df3ec5daSSteven Rostedt (Google) void fgraph_update_pid_func(void)
1207df3ec5daSSteven Rostedt (Google) {
1208df3ec5daSSteven Rostedt (Google) struct fgraph_ops *gops;
1209df3ec5daSSteven Rostedt (Google) struct ftrace_ops *op;
1210df3ec5daSSteven Rostedt (Google)
1211df3ec5daSSteven Rostedt (Google) if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
1212df3ec5daSSteven Rostedt (Google) return;
1213df3ec5daSSteven Rostedt (Google)
1214df3ec5daSSteven Rostedt (Google) list_for_each_entry(op, &graph_ops.subop_list, list) {
1215df3ec5daSSteven Rostedt (Google) if (op->flags & FTRACE_OPS_FL_PID) {
1216df3ec5daSSteven Rostedt (Google) gops = container_of(op, struct fgraph_ops, ops);
1217df3ec5daSSteven Rostedt (Google) gops->entryfunc = ftrace_pids_enabled(op) ?
1218df3ec5daSSteven Rostedt (Google) fgraph_pid_func : gops->saved_func;
1219cc60ee81SSteven Rostedt (Google) if (ftrace_graph_active == 1)
1220cc60ee81SSteven Rostedt (Google) static_call_update(fgraph_func, gops->entryfunc);
1221df3ec5daSSteven Rostedt (Google) }
1222df3ec5daSSteven Rostedt (Google) }
1223df3ec5daSSteven Rostedt (Google) }
12244267fda4SSteven Rostedt (Google) #endif
1225df3ec5daSSteven Rostedt (Google)
1226e73e679fSSteven Rostedt (VMware) /* Allocate a return stack for each task */
start_graph_tracing(void)1227e73e679fSSteven Rostedt (VMware) static int start_graph_tracing(void)
1228e73e679fSSteven Rostedt (VMware) {
122942675b72SSteven Rostedt (VMware) unsigned long **ret_stack_list;
1230cc252bb5SSteven Rostedt int ret, cpu;
1231e73e679fSSteven Rostedt (VMware)
1232fae4078cSSteven Rostedt ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE,
1233fae4078cSSteven Rostedt sizeof(*ret_stack_list), GFP_KERNEL);
1234e73e679fSSteven Rostedt (VMware)
1235e73e679fSSteven Rostedt (VMware) if (!ret_stack_list)
1236e73e679fSSteven Rostedt (VMware) return -ENOMEM;
1237e73e679fSSteven Rostedt (VMware)
1238cc252bb5SSteven Rostedt /* The cpu_boot init_task->ret_stack will never be freed */
1239cc252bb5SSteven Rostedt for_each_online_cpu(cpu) {
1240cc252bb5SSteven Rostedt if (!idle_task(cpu)->ret_stack)
1241cc252bb5SSteven Rostedt ftrace_graph_init_idle_task(idle_task(cpu), cpu);
1242cc252bb5SSteven Rostedt }
1243cc252bb5SSteven Rostedt
1244e73e679fSSteven Rostedt (VMware) do {
1245e73e679fSSteven Rostedt (VMware) ret = alloc_retstack_tasklist(ret_stack_list);
1246e73e679fSSteven Rostedt (VMware) } while (ret == -EAGAIN);
1247e73e679fSSteven Rostedt (VMware)
1248e73e679fSSteven Rostedt (VMware) if (!ret) {
1249e73e679fSSteven Rostedt (VMware) ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1250e73e679fSSteven Rostedt (VMware) if (ret)
1251e73e679fSSteven Rostedt (VMware) pr_info("ftrace_graph: Couldn't activate tracepoint"
1252e73e679fSSteven Rostedt (VMware) " probe to kernel_sched_switch\n");
1253e73e679fSSteven Rostedt (VMware) }
1254e73e679fSSteven Rostedt (VMware)
1255e73e679fSSteven Rostedt (VMware) kfree(ret_stack_list);
1256e73e679fSSteven Rostedt (VMware) return ret;
1257e73e679fSSteven Rostedt (VMware) }
1258e73e679fSSteven Rostedt (VMware)
init_task_vars(int idx)12594497412aSSteven Rostedt (VMware) static void init_task_vars(int idx)
12604497412aSSteven Rostedt (VMware) {
12614497412aSSteven Rostedt (VMware) struct task_struct *g, *t;
12624497412aSSteven Rostedt (VMware) int cpu;
12634497412aSSteven Rostedt (VMware)
12644497412aSSteven Rostedt (VMware) for_each_online_cpu(cpu) {
12654497412aSSteven Rostedt (VMware) if (idle_task(cpu)->ret_stack)
12664497412aSSteven Rostedt (VMware) ret_stack_set_task_var(idle_task(cpu), idx, 0);
12674497412aSSteven Rostedt (VMware) }
12684497412aSSteven Rostedt (VMware)
12694497412aSSteven Rostedt (VMware) read_lock(&tasklist_lock);
12704497412aSSteven Rostedt (VMware) for_each_process_thread(g, t) {
12714497412aSSteven Rostedt (VMware) if (t->ret_stack)
12724497412aSSteven Rostedt (VMware) ret_stack_set_task_var(t, idx, 0);
12734497412aSSteven Rostedt (VMware) }
12744497412aSSteven Rostedt (VMware) read_unlock(&tasklist_lock);
12754497412aSSteven Rostedt (VMware) }
12764497412aSSteven Rostedt (VMware)
ftrace_graph_enable_direct(bool enable_branch,struct fgraph_ops * gops)1277a069a22fSMasami Hiramatsu (Google) static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *gops)
1278cc60ee81SSteven Rostedt (Google) {
1279cc60ee81SSteven Rostedt (Google) trace_func_graph_ent_t func = NULL;
1280fe835e3cSSteven Rostedt (Google) trace_func_graph_ret_t retfunc = NULL;
1281cc60ee81SSteven Rostedt (Google) int i;
1282cc60ee81SSteven Rostedt (Google)
1283a069a22fSMasami Hiramatsu (Google) if (gops) {
1284a069a22fSMasami Hiramatsu (Google) func = gops->entryfunc;
1285a069a22fSMasami Hiramatsu (Google) retfunc = gops->retfunc;
1286a069a22fSMasami Hiramatsu (Google) fgraph_direct_gops = gops;
1287a069a22fSMasami Hiramatsu (Google) } else {
1288cc60ee81SSteven Rostedt (Google) for_each_set_bit(i, &fgraph_array_bitmask,
1289cc60ee81SSteven Rostedt (Google) sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
1290cc60ee81SSteven Rostedt (Google) func = fgraph_array[i]->entryfunc;
1291fe835e3cSSteven Rostedt (Google) retfunc = fgraph_array[i]->retfunc;
1292cc60ee81SSteven Rostedt (Google) fgraph_direct_gops = fgraph_array[i];
1293cc60ee81SSteven Rostedt (Google) }
1294a069a22fSMasami Hiramatsu (Google) }
1295cc60ee81SSteven Rostedt (Google) if (WARN_ON_ONCE(!func))
1296cc60ee81SSteven Rostedt (Google) return;
1297cc60ee81SSteven Rostedt (Google)
1298cc60ee81SSteven Rostedt (Google) static_call_update(fgraph_func, func);
1299fe835e3cSSteven Rostedt (Google) static_call_update(fgraph_retfunc, retfunc);
1300cc60ee81SSteven Rostedt (Google) if (enable_branch)
1301cc60ee81SSteven Rostedt (Google) static_branch_disable(&fgraph_do_direct);
1302cc60ee81SSteven Rostedt (Google) }
1303cc60ee81SSteven Rostedt (Google)
ftrace_graph_disable_direct(bool disable_branch)1304cc60ee81SSteven Rostedt (Google) static void ftrace_graph_disable_direct(bool disable_branch)
1305cc60ee81SSteven Rostedt (Google) {
1306cc60ee81SSteven Rostedt (Google) if (disable_branch)
1307cc60ee81SSteven Rostedt (Google) static_branch_disable(&fgraph_do_direct);
1308cc60ee81SSteven Rostedt (Google) static_call_update(fgraph_func, ftrace_graph_entry_stub);
1309fe835e3cSSteven Rostedt (Google) static_call_update(fgraph_retfunc, ftrace_graph_ret_stub);
1310cc60ee81SSteven Rostedt (Google) fgraph_direct_gops = &fgraph_stub;
1311cc60ee81SSteven Rostedt (Google) }
1312cc60ee81SSteven Rostedt (Google)
13132c02f737SSteven Rostedt /* The cpu_boot init_task->ret_stack will never be freed */
fgraph_cpu_init(unsigned int cpu)13142c02f737SSteven Rostedt static int fgraph_cpu_init(unsigned int cpu)
13152c02f737SSteven Rostedt {
13162c02f737SSteven Rostedt if (!idle_task(cpu)->ret_stack)
13172c02f737SSteven Rostedt ftrace_graph_init_idle_task(idle_task(cpu), cpu);
13182c02f737SSteven Rostedt return 0;
13192c02f737SSteven Rostedt }
13202c02f737SSteven Rostedt
register_ftrace_graph(struct fgraph_ops * gops)1321688f7089SSteven Rostedt (VMware) int register_ftrace_graph(struct fgraph_ops *gops)
1322e73e679fSSteven Rostedt (VMware) {
13232c02f737SSteven Rostedt static bool fgraph_initialized;
1324c132be2cSSteven Rostedt (VMware) int command = 0;
1325e73e679fSSteven Rostedt (VMware) int ret = 0;
13266d478659SMasami Hiramatsu (Google) int i = -1;
1327e73e679fSSteven Rostedt (VMware)
1328bd3734dbSLi Huafei guard(mutex)(&ftrace_lock);
1329e73e679fSSteven Rostedt (VMware)
133043409848SSteven Rostedt if (!fgraph_stack_cachep) {
133143409848SSteven Rostedt fgraph_stack_cachep = kmem_cache_create("fgraph_stack",
133243409848SSteven Rostedt SHADOW_STACK_SIZE,
133343409848SSteven Rostedt SHADOW_STACK_SIZE, 0, NULL);
133443409848SSteven Rostedt if (!fgraph_stack_cachep)
133543409848SSteven Rostedt return -ENOMEM;
133643409848SSteven Rostedt }
133743409848SSteven Rostedt
13382c02f737SSteven Rostedt if (!fgraph_initialized) {
1339a574e7f8SSteven Rostedt ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph:online",
13402c02f737SSteven Rostedt fgraph_cpu_init, NULL);
13412c02f737SSteven Rostedt if (ret < 0) {
13422c02f737SSteven Rostedt pr_warn("fgraph: Error to init cpu hotplug support\n");
13432c02f737SSteven Rostedt return ret;
13442c02f737SSteven Rostedt }
13452c02f737SSteven Rostedt fgraph_initialized = true;
13462c02f737SSteven Rostedt ret = 0;
13472c02f737SSteven Rostedt }
1348e73e679fSSteven Rostedt (VMware)
1349518d6804SSteven Rostedt (VMware) if (!fgraph_array[0]) {
1350518d6804SSteven Rostedt (VMware) /* The array must always have real data on it */
1351518d6804SSteven Rostedt (VMware) for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
1352518d6804SSteven Rostedt (VMware) fgraph_array[i] = &fgraph_stub;
13536d478659SMasami Hiramatsu (Google) fgraph_lru_init();
1354518d6804SSteven Rostedt (VMware) }
1355518d6804SSteven Rostedt (VMware)
13566d478659SMasami Hiramatsu (Google) i = fgraph_lru_alloc_index();
1357bd3734dbSLi Huafei if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub))
1358bd3734dbSLi Huafei return -ENOSPC;
13597aa1eaefSSteven Rostedt (VMware) gops->idx = i;
1360e73e679fSSteven Rostedt (VMware)
1361e73e679fSSteven Rostedt (VMware) ftrace_graph_active++;
1362518d6804SSteven Rostedt (VMware)
1363cc60ee81SSteven Rostedt (Google) if (ftrace_graph_active == 2)
1364cc60ee81SSteven Rostedt (Google) ftrace_graph_disable_direct(true);
1365cc60ee81SSteven Rostedt (Google)
1366518d6804SSteven Rostedt (VMware) if (ftrace_graph_active == 1) {
1367a069a22fSMasami Hiramatsu (Google) ftrace_graph_enable_direct(false, gops);
1368518d6804SSteven Rostedt (VMware) register_pm_notifier(&ftrace_suspend_notifier);
1369e73e679fSSteven Rostedt (VMware) ret = start_graph_tracing();
1370c132be2cSSteven Rostedt (VMware) if (ret)
1371c132be2cSSteven Rostedt (VMware) goto error;
1372e73e679fSSteven Rostedt (VMware) /*
13732fbb5499SSteven Rostedt (VMware) * Some archs just test to see if these are not
13742fbb5499SSteven Rostedt (VMware) * the default function
1375e73e679fSSteven Rostedt (VMware) */
13762fbb5499SSteven Rostedt (VMware) ftrace_graph_return = return_run;
13772fbb5499SSteven Rostedt (VMware) ftrace_graph_entry = entry_run;
1378c132be2cSSteven Rostedt (VMware) command = FTRACE_START_FUNC_RET;
13794497412aSSteven Rostedt (VMware) } else {
13804497412aSSteven Rostedt (VMware) init_task_vars(gops->idx);
1381c132be2cSSteven Rostedt (VMware) }
1382df3ec5daSSteven Rostedt (Google) /* Always save the function, and reset at unregistering */
1383df3ec5daSSteven Rostedt (Google) gops->saved_func = gops->entryfunc;
1384df3ec5daSSteven Rostedt (Google)
1385c132be2cSSteven Rostedt (VMware) ret = ftrace_startup_subops(&graph_ops, &gops->ops, command);
1386a069a22fSMasami Hiramatsu (Google) if (!ret)
1387a069a22fSMasami Hiramatsu (Google) fgraph_array[i] = gops;
1388a069a22fSMasami Hiramatsu (Google)
1389c132be2cSSteven Rostedt (VMware) error:
1390c132be2cSSteven Rostedt (VMware) if (ret) {
1391c132be2cSSteven Rostedt (VMware) ftrace_graph_active--;
1392df3ec5daSSteven Rostedt (Google) gops->saved_func = NULL;
13936d478659SMasami Hiramatsu (Google) fgraph_lru_release_index(i);
1394518d6804SSteven Rostedt (VMware) }
1395e73e679fSSteven Rostedt (VMware) return ret;
1396e73e679fSSteven Rostedt (VMware) }
1397e73e679fSSteven Rostedt (VMware)
unregister_ftrace_graph(struct fgraph_ops * gops)1398688f7089SSteven Rostedt (VMware) void unregister_ftrace_graph(struct fgraph_ops *gops)
1399e73e679fSSteven Rostedt (VMware) {
1400c132be2cSSteven Rostedt (VMware) int command = 0;
1401518d6804SSteven Rostedt (VMware)
14026348a3faSSteven Rostedt guard(mutex)(&ftrace_lock);
1403e73e679fSSteven Rostedt (VMware)
1404e73e679fSSteven Rostedt (VMware) if (unlikely(!ftrace_graph_active))
14056348a3faSSteven Rostedt return;
1406e73e679fSSteven Rostedt (VMware)
14076d478659SMasami Hiramatsu (Google) if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE ||
14086d478659SMasami Hiramatsu (Google) fgraph_array[gops->idx] != gops))
14096348a3faSSteven Rostedt return;
1410518d6804SSteven Rostedt (VMware)
14116d478659SMasami Hiramatsu (Google) if (fgraph_lru_release_index(gops->idx) < 0)
14126348a3faSSteven Rostedt return;
1413c132be2cSSteven Rostedt (VMware)
1414c132be2cSSteven Rostedt (VMware) fgraph_array[gops->idx] = &fgraph_stub;
1415518d6804SSteven Rostedt (VMware)
1416e73e679fSSteven Rostedt (VMware) ftrace_graph_active--;
1417c132be2cSSteven Rostedt (VMware)
1418c132be2cSSteven Rostedt (VMware) if (!ftrace_graph_active)
1419c132be2cSSteven Rostedt (VMware) command = FTRACE_STOP_FUNC_RET;
1420c132be2cSSteven Rostedt (VMware)
1421c132be2cSSteven Rostedt (VMware) ftrace_shutdown_subops(&graph_ops, &gops->ops, command);
1422c132be2cSSteven Rostedt (VMware)
1423cc60ee81SSteven Rostedt (Google) if (ftrace_graph_active == 1)
1424a069a22fSMasami Hiramatsu (Google) ftrace_graph_enable_direct(true, NULL);
1425cc60ee81SSteven Rostedt (Google) else if (!ftrace_graph_active)
1426cc60ee81SSteven Rostedt (Google) ftrace_graph_disable_direct(false);
1427cc60ee81SSteven Rostedt (Google)
1428518d6804SSteven Rostedt (VMware) if (!ftrace_graph_active) {
142946f94692SSteven Rostedt (VMware) ftrace_graph_return = ftrace_stub_graph;
1430e73e679fSSteven Rostedt (VMware) ftrace_graph_entry = ftrace_graph_entry_stub;
1431e73e679fSSteven Rostedt (VMware) unregister_pm_notifier(&ftrace_suspend_notifier);
1432e73e679fSSteven Rostedt (VMware) unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1433518d6804SSteven Rostedt (VMware) }
1434df3ec5daSSteven Rostedt (Google) gops->saved_func = NULL;
1435e73e679fSSteven Rostedt (VMware) }
1436