1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Infrastructure to took into function calls and returns. 4 * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]> 5 * Mostly borrowed from function tracer which 6 * is Copyright (c) Steven Rostedt <[email protected]> 7 * 8 * Highly modified by Steven Rostedt (VMware). 9 */ 10 #include <linux/bits.h> 11 #include <linux/jump_label.h> 12 #include <linux/suspend.h> 13 #include <linux/ftrace.h> 14 #include <linux/static_call.h> 15 #include <linux/slab.h> 16 17 #include <trace/events/sched.h> 18 19 #include "ftrace_internal.h" 20 #include "trace.h" 21 22 /* 23 * FGRAPH_FRAME_SIZE: Size in bytes of the meta data on the shadow stack 24 * FGRAPH_FRAME_OFFSET: Size in long words of the meta data frame 25 */ 26 #define FGRAPH_FRAME_SIZE sizeof(struct ftrace_ret_stack) 27 #define FGRAPH_FRAME_OFFSET DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long)) 28 29 /* 30 * On entry to a function (via function_graph_enter()), a new fgraph frame 31 * (ftrace_ret_stack) is pushed onto the stack as well as a word that 32 * holds a bitmask and a type (called "bitmap"). The bitmap is defined as: 33 * 34 * bits: 0 - 9 offset in words from the previous ftrace_ret_stack 35 * 36 * bits: 10 - 11 Type of storage 37 * 0 - reserved 38 * 1 - bitmap of fgraph_array index 39 * 2 - reserved data 40 * 41 * For type with "bitmap of fgraph_array index" (FGRAPH_TYPE_BITMAP): 42 * bits: 12 - 27 The bitmap of fgraph_ops fgraph_array index 43 * That is, it's a bitmask of 0-15 (16 bits) 44 * where if a corresponding ops in the fgraph_array[] 45 * expects a callback from the return of the function 46 * it's corresponding bit will be set. 47 * 48 * 49 * The top of the ret_stack (when not empty) will always have a reference 50 * word that points to the last fgraph frame that was saved. 51 * 52 * For reserved data: 53 * bits: 12 - 17 The size in words that is stored 54 * bits: 18 - 23 The index of fgraph_array, which shows who is stored 55 * 56 * That is, at the end of function_graph_enter, if the first and forth 57 * fgraph_ops on the fgraph_array[] (index 0 and 3) needs their retfunc called 58 * on the return of the function being traced, and the forth fgraph_ops 59 * stored two words of data, this is what will be on the task's shadow 60 * ret_stack: (the stack grows upward) 61 * 62 * ret_stack[SHADOW_STACK_OFFSET] 63 * | SHADOW_STACK_TASK_VARS(ret_stack)[15] | 64 * ... 65 * | SHADOW_STACK_TASK_VARS(ret_stack)[0] | 66 * ret_stack[SHADOW_STACK_MAX_OFFSET] 67 * ... 68 * | | <- task->curr_ret_stack 69 * +--------------------------------------------+ 70 * | (3 << 12) | (3 << 10) | FGRAPH_FRAME_OFFSET| 71 * | *or put another way* | 72 * | (3 << FGRAPH_DATA_INDEX_SHIFT)| \ | This is for fgraph_ops[3]. 73 * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \ | The data size is 2 words. 74 * | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT)| \ | 75 * | (offset2:FGRAPH_FRAME_OFFSET+3) | <- the offset2 is from here 76 * +--------------------------------------------+ ( It is 4 words from the ret_stack) 77 * | STORED DATA WORD 2 | 78 * | STORED DATA WORD 1 | 79 * +--------------------------------------------+ 80 * | (9 << 12) | (1 << 10) | FGRAPH_FRAME_OFFSET| 81 * | *or put another way* | 82 * | (BIT(3)|BIT(0)) << FGRAPH_INDEX_SHIFT | \ | 83 * | FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT| \ | 84 * | (offset1:FGRAPH_FRAME_OFFSET) | <- the offset1 is from here 85 * +--------------------------------------------+ 86 * | struct ftrace_ret_stack | 87 * | (stores the saved ret pointer) | <- the offset points here 88 * +--------------------------------------------+ 89 * | (X) | (N) | ( N words away from 90 * | | previous ret_stack) 91 * ... 92 * ret_stack[0] 93 * 94 * If a backtrace is required, and the real return pointer needs to be 95 * fetched, then it looks at the task's curr_ret_stack offset, if it 96 * is greater than zero (reserved, or right before popped), it would mask 97 * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the 98 * ftrace_ret_stack structure stored on the shadow stack. 99 */ 100 101 /* 102 * The following is for the top word on the stack: 103 * 104 * FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame 105 * FGRAPH_TYPE (10-11) holds the type of word this is. 106 * (RESERVED or BITMAP) 107 */ 108 #define FGRAPH_FRAME_OFFSET_BITS 10 109 #define FGRAPH_FRAME_OFFSET_MASK GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0) 110 111 #define FGRAPH_TYPE_BITS 2 112 #define FGRAPH_TYPE_MASK GENMASK(FGRAPH_TYPE_BITS - 1, 0) 113 #define FGRAPH_TYPE_SHIFT FGRAPH_FRAME_OFFSET_BITS 114 115 enum { 116 FGRAPH_TYPE_RESERVED = 0, 117 FGRAPH_TYPE_BITMAP = 1, 118 FGRAPH_TYPE_DATA = 2, 119 }; 120 121 /* 122 * For BITMAP type: 123 * FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called 124 */ 125 #define FGRAPH_INDEX_BITS 16 126 #define FGRAPH_INDEX_MASK GENMASK(FGRAPH_INDEX_BITS - 1, 0) 127 #define FGRAPH_INDEX_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS) 128 129 /* 130 * For DATA type: 131 * FGRAPH_DATA (12-17) bits hold the size of data (in words) 132 * FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for 133 * 134 * Note: 135 * data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words. 136 */ 137 #define FGRAPH_DATA_BITS 5 138 #define FGRAPH_DATA_MASK GENMASK(FGRAPH_DATA_BITS - 1, 0) 139 #define FGRAPH_DATA_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS) 140 #define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS)) 141 142 #define FGRAPH_DATA_INDEX_BITS 4 143 #define FGRAPH_DATA_INDEX_MASK GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0) 144 #define FGRAPH_DATA_INDEX_SHIFT (FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS) 145 146 #define FGRAPH_MAX_INDEX \ 147 ((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX) 148 149 #define FGRAPH_ARRAY_SIZE FGRAPH_INDEX_BITS 150 151 /* 152 * SHADOW_STACK_SIZE: The size in bytes of the entire shadow stack 153 * SHADOW_STACK_OFFSET: The size in long words of the shadow stack 154 * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added 155 */ 156 #define SHADOW_STACK_SIZE (PAGE_SIZE) 157 #define SHADOW_STACK_OFFSET (SHADOW_STACK_SIZE / sizeof(long)) 158 /* Leave on a buffer at the end */ 159 #define SHADOW_STACK_MAX_OFFSET \ 160 (SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE)) 161 162 /* RET_STACK(): Return the frame from a given @offset from task @t */ 163 #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset])) 164 165 /* 166 * Each fgraph_ops has a reservered unsigned long at the end (top) of the 167 * ret_stack to store task specific state. 168 */ 169 #define SHADOW_STACK_TASK_VARS(ret_stack) \ 170 ((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE])) 171 172 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); 173 int ftrace_graph_active; 174 175 static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE]; 176 static unsigned long fgraph_array_bitmask; 177 178 /* LRU index table for fgraph_array */ 179 static int fgraph_lru_table[FGRAPH_ARRAY_SIZE]; 180 static int fgraph_lru_next; 181 static int fgraph_lru_last; 182 183 /* Initialize fgraph_lru_table with unused index */ 184 static void fgraph_lru_init(void) 185 { 186 int i; 187 188 for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) 189 fgraph_lru_table[i] = i; 190 } 191 192 /* Release the used index to the LRU table */ 193 static int fgraph_lru_release_index(int idx) 194 { 195 if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE || 196 WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1)) 197 return -1; 198 199 fgraph_lru_table[fgraph_lru_last] = idx; 200 fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE; 201 202 clear_bit(idx, &fgraph_array_bitmask); 203 return 0; 204 } 205 206 /* Allocate a new index from LRU table */ 207 static int fgraph_lru_alloc_index(void) 208 { 209 int idx = fgraph_lru_table[fgraph_lru_next]; 210 211 /* No id is available */ 212 if (idx == -1) 213 return -1; 214 215 fgraph_lru_table[fgraph_lru_next] = -1; 216 fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE; 217 218 set_bit(idx, &fgraph_array_bitmask); 219 return idx; 220 } 221 222 /* Get the offset to the fgraph frame from a ret_stack value */ 223 static inline int __get_offset(unsigned long val) 224 { 225 return val & FGRAPH_FRAME_OFFSET_MASK; 226 } 227 228 /* Get the type of word from a ret_stack value */ 229 static inline int __get_type(unsigned long val) 230 { 231 return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK; 232 } 233 234 /* Get the data_index for a DATA type ret_stack word */ 235 static inline int __get_data_index(unsigned long val) 236 { 237 return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK; 238 } 239 240 /* Get the data_size for a DATA type ret_stack word */ 241 static inline int __get_data_size(unsigned long val) 242 { 243 return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1; 244 } 245 246 /* Get the word from the ret_stack at @offset */ 247 static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset) 248 { 249 return t->ret_stack[offset]; 250 } 251 252 /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */ 253 static inline int get_frame_offset(struct task_struct *t, int offset) 254 { 255 return __get_offset(t->ret_stack[offset]); 256 } 257 258 /* For BITMAP type: get the bitmask from the @offset at ret_stack */ 259 static inline unsigned long 260 get_bitmap_bits(struct task_struct *t, int offset) 261 { 262 return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK; 263 } 264 265 /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */ 266 static inline void 267 set_bitmap(struct task_struct *t, int offset, unsigned long bitmap) 268 { 269 t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) | 270 (FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET; 271 } 272 273 /* For DATA type: get the data saved under the ret_stack word at @offset */ 274 static inline void *get_data_type_data(struct task_struct *t, int offset) 275 { 276 unsigned long val = t->ret_stack[offset]; 277 278 if (__get_type(val) != FGRAPH_TYPE_DATA) 279 return NULL; 280 offset -= __get_data_size(val); 281 return (void *)&t->ret_stack[offset]; 282 } 283 284 /* Create the ret_stack word for a DATA type */ 285 static inline unsigned long make_data_type_val(int idx, int size, int offset) 286 { 287 return (idx << FGRAPH_DATA_INDEX_SHIFT) | 288 ((size - 1) << FGRAPH_DATA_SHIFT) | 289 (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset; 290 } 291 292 /* ftrace_graph_entry set to this to tell some archs to run function graph */ 293 static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops) 294 { 295 return 0; 296 } 297 298 /* ftrace_graph_return set to this to tell some archs to run function graph */ 299 static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops) 300 { 301 } 302 303 static void ret_stack_set_task_var(struct task_struct *t, int idx, long val) 304 { 305 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack); 306 307 gvals[idx] = val; 308 } 309 310 static unsigned long * 311 ret_stack_get_task_var(struct task_struct *t, int idx) 312 { 313 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack); 314 315 return &gvals[idx]; 316 } 317 318 static void ret_stack_init_task_vars(unsigned long *ret_stack) 319 { 320 unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack); 321 322 memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE); 323 } 324 325 /** 326 * fgraph_reserve_data - Reserve storage on the task's ret_stack 327 * @idx: The index of fgraph_array 328 * @size_bytes: The size in bytes to reserve 329 * 330 * Reserves space of up to FGRAPH_MAX_DATA_SIZE bytes on the 331 * task's ret_stack shadow stack, for a given fgraph_ops during 332 * the entryfunc() call. If entryfunc() returns zero, the storage 333 * is discarded. An entryfunc() can only call this once per iteration. 334 * The fgraph_ops retfunc() can retrieve this stored data with 335 * fgraph_retrieve_data(). 336 * 337 * Returns: On success, a pointer to the data on the stack. 338 * Otherwise, NULL if there's not enough space left on the 339 * ret_stack for the data, or if fgraph_reserve_data() was called 340 * more than once for a single entryfunc() call. 341 */ 342 void *fgraph_reserve_data(int idx, int size_bytes) 343 { 344 unsigned long val; 345 void *data; 346 int curr_ret_stack = current->curr_ret_stack; 347 int data_size; 348 349 if (size_bytes > FGRAPH_MAX_DATA_SIZE) 350 return NULL; 351 352 /* Convert the data size to number of longs. */ 353 data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3); 354 355 val = get_fgraph_entry(current, curr_ret_stack - 1); 356 data = ¤t->ret_stack[curr_ret_stack]; 357 358 curr_ret_stack += data_size + 1; 359 if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET)) 360 return NULL; 361 362 val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1); 363 364 /* Set the last word to be reserved */ 365 current->ret_stack[curr_ret_stack - 1] = val; 366 367 /* Make sure interrupts see this */ 368 barrier(); 369 current->curr_ret_stack = curr_ret_stack; 370 /* Again sync with interrupts, and reset reserve */ 371 current->ret_stack[curr_ret_stack - 1] = val; 372 373 return data; 374 } 375 376 /** 377 * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data() 378 * @idx: the index of fgraph_array (fgraph_ops::idx) 379 * @size_bytes: pointer to retrieved data size. 380 * 381 * This is to be called by a fgraph_ops retfunc(), to retrieve data that 382 * was stored by the fgraph_ops entryfunc() on the function entry. 383 * That is, this will retrieve the data that was reserved on the 384 * entry of the function that corresponds to the exit of the function 385 * that the fgraph_ops retfunc() is called on. 386 * 387 * Returns: The stored data from fgraph_reserve_data() called by the 388 * matching entryfunc() for the retfunc() this is called from. 389 * Or NULL if there was nothing stored. 390 */ 391 void *fgraph_retrieve_data(int idx, int *size_bytes) 392 { 393 return fgraph_retrieve_parent_data(idx, size_bytes, 0); 394 } 395 396 /** 397 * fgraph_get_task_var - retrieve a task specific state variable 398 * @gops: The ftrace_ops that owns the task specific variable 399 * 400 * Every registered fgraph_ops has a task state variable 401 * reserved on the task's ret_stack. This function returns the 402 * address to that variable. 403 * 404 * Returns the address to the fgraph_ops @gops tasks specific 405 * unsigned long variable. 406 */ 407 unsigned long *fgraph_get_task_var(struct fgraph_ops *gops) 408 { 409 return ret_stack_get_task_var(current, gops->idx); 410 } 411 412 /* 413 * @offset: The offset into @t->ret_stack to find the ret_stack entry 414 * @frame_offset: Where to place the offset into @t->ret_stack of that entry 415 * 416 * Returns a pointer to the previous ret_stack below @offset or NULL 417 * when it reaches the bottom of the stack. 418 * 419 * Calling this with: 420 * 421 * offset = task->curr_ret_stack; 422 * do { 423 * ret_stack = get_ret_stack(task, offset, &offset); 424 * } while (ret_stack); 425 * 426 * Will iterate through all the ret_stack entries from curr_ret_stack 427 * down to the first one. 428 */ 429 static inline struct ftrace_ret_stack * 430 get_ret_stack(struct task_struct *t, int offset, int *frame_offset) 431 { 432 int offs; 433 434 BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long)); 435 436 if (unlikely(offset <= 0)) 437 return NULL; 438 439 offs = get_frame_offset(t, --offset); 440 if (WARN_ON_ONCE(offs <= 0 || offs > offset)) 441 return NULL; 442 443 offset -= offs; 444 445 *frame_offset = offset; 446 return RET_STACK(t, offset); 447 } 448 449 /** 450 * fgraph_retrieve_parent_data - get data from a parent function 451 * @idx: The index into the fgraph_array (fgraph_ops::idx) 452 * @size_bytes: A pointer to retrieved data size 453 * @depth: The depth to find the parent (0 is the current function) 454 * 455 * This is similar to fgraph_retrieve_data() but can be used to retrieve 456 * data from a parent caller function. 457 * 458 * Return: a pointer to the specified parent data or NULL if not found 459 */ 460 void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth) 461 { 462 struct ftrace_ret_stack *ret_stack = NULL; 463 int offset = current->curr_ret_stack; 464 unsigned long val; 465 466 if (offset <= 0) 467 return NULL; 468 469 for (;;) { 470 int next_offset; 471 472 ret_stack = get_ret_stack(current, offset, &next_offset); 473 if (!ret_stack || --depth < 0) 474 break; 475 offset = next_offset; 476 } 477 478 if (!ret_stack) 479 return NULL; 480 481 offset--; 482 483 val = get_fgraph_entry(current, offset); 484 while (__get_type(val) == FGRAPH_TYPE_DATA) { 485 if (__get_data_index(val) == idx) 486 goto found; 487 offset -= __get_data_size(val) + 1; 488 val = get_fgraph_entry(current, offset); 489 } 490 return NULL; 491 found: 492 if (size_bytes) 493 *size_bytes = __get_data_size(val) * sizeof(long); 494 return get_data_type_data(current, offset); 495 } 496 497 /* Both enabled by default (can be cleared by function_graph tracer flags */ 498 bool fgraph_sleep_time = true; 499 500 #ifdef CONFIG_DYNAMIC_FTRACE 501 /* 502 * archs can override this function if they must do something 503 * to enable hook for graph tracer. 504 */ 505 int __weak ftrace_enable_ftrace_graph_caller(void) 506 { 507 return 0; 508 } 509 510 /* 511 * archs can override this function if they must do something 512 * to disable hook for graph tracer. 513 */ 514 int __weak ftrace_disable_ftrace_graph_caller(void) 515 { 516 return 0; 517 } 518 #endif 519 520 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, 521 struct fgraph_ops *gops) 522 { 523 return 0; 524 } 525 526 static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace, 527 struct fgraph_ops *gops) 528 { 529 } 530 531 static struct fgraph_ops fgraph_stub = { 532 .entryfunc = ftrace_graph_entry_stub, 533 .retfunc = ftrace_graph_ret_stub, 534 }; 535 536 static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub; 537 DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub); 538 DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub); 539 static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct); 540 541 /** 542 * ftrace_graph_stop - set to permanently disable function graph tracing 543 * 544 * In case of an error int function graph tracing, this is called 545 * to try to keep function graph tracing from causing any more harm. 546 * Usually this is pretty severe and this is called to try to at least 547 * get a warning out to the user. 548 */ 549 void ftrace_graph_stop(void) 550 { 551 static_branch_enable(&kill_ftrace_graph); 552 } 553 554 /* Add a function return address to the trace stack on thread info.*/ 555 static int 556 ftrace_push_return_trace(unsigned long ret, unsigned long func, 557 unsigned long frame_pointer, unsigned long *retp, 558 int fgraph_idx) 559 { 560 struct ftrace_ret_stack *ret_stack; 561 unsigned long long calltime; 562 unsigned long val; 563 int offset; 564 565 if (unlikely(ftrace_graph_is_dead())) 566 return -EBUSY; 567 568 if (!current->ret_stack) 569 return -EBUSY; 570 571 BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long)); 572 573 /* Set val to "reserved" with the delta to the new fgraph frame */ 574 val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET; 575 576 /* 577 * We must make sure the ret_stack is tested before we read 578 * anything else. 579 */ 580 smp_rmb(); 581 582 /* 583 * Check if there's room on the shadow stack to fit a fraph frame 584 * and a bitmap word. 585 */ 586 if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) { 587 atomic_inc(¤t->trace_overrun); 588 return -EBUSY; 589 } 590 591 calltime = trace_clock_local(); 592 593 offset = READ_ONCE(current->curr_ret_stack); 594 ret_stack = RET_STACK(current, offset); 595 offset += FGRAPH_FRAME_OFFSET; 596 597 /* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */ 598 current->ret_stack[offset] = val; 599 ret_stack->ret = ret; 600 /* 601 * The unwinders expect curr_ret_stack to point to either zero 602 * or an offset where to find the next ret_stack. Even though the 603 * ret stack might be bogus, we want to write the ret and the 604 * offset to find the ret_stack before we increment the stack point. 605 * If an interrupt comes in now before we increment the curr_ret_stack 606 * it may blow away what we wrote. But that's fine, because the 607 * offset will still be correct (even though the 'ret' won't be). 608 * What we worry about is the offset being correct after we increment 609 * the curr_ret_stack and before we update that offset, as if an 610 * interrupt comes in and does an unwind stack dump, it will need 611 * at least a correct offset! 612 */ 613 barrier(); 614 WRITE_ONCE(current->curr_ret_stack, offset + 1); 615 /* 616 * This next barrier is to ensure that an interrupt coming in 617 * will not corrupt what we are about to write. 618 */ 619 barrier(); 620 621 /* Still keep it reserved even if an interrupt came in */ 622 current->ret_stack[offset] = val; 623 624 ret_stack->ret = ret; 625 ret_stack->func = func; 626 ret_stack->calltime = calltime; 627 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 628 ret_stack->fp = frame_pointer; 629 #endif 630 ret_stack->retp = retp; 631 return offset; 632 } 633 634 /* 635 * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct 636 * functions. But those archs currently don't support direct functions 637 * anyway, and ftrace_find_rec_direct() is just a stub for them. 638 * Define MCOUNT_INSN_SIZE to keep those archs compiling. 639 */ 640 #ifndef MCOUNT_INSN_SIZE 641 /* Make sure this only works without direct calls */ 642 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 643 # error MCOUNT_INSN_SIZE not defined with direct calls enabled 644 # endif 645 # define MCOUNT_INSN_SIZE 0 646 #endif 647 648 /* If the caller does not use ftrace, call this function. */ 649 int function_graph_enter(unsigned long ret, unsigned long func, 650 unsigned long frame_pointer, unsigned long *retp) 651 { 652 struct ftrace_graph_ent trace; 653 unsigned long bitmap = 0; 654 int offset; 655 int i; 656 657 trace.func = func; 658 trace.depth = ++current->curr_ret_depth; 659 660 offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0); 661 if (offset < 0) 662 goto out; 663 664 #ifdef CONFIG_HAVE_STATIC_CALL 665 if (static_branch_likely(&fgraph_do_direct)) { 666 int save_curr_ret_stack = current->curr_ret_stack; 667 668 if (static_call(fgraph_func)(&trace, fgraph_direct_gops)) 669 bitmap |= BIT(fgraph_direct_gops->idx); 670 else 671 /* Clear out any saved storage */ 672 current->curr_ret_stack = save_curr_ret_stack; 673 } else 674 #endif 675 { 676 for_each_set_bit(i, &fgraph_array_bitmask, 677 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) { 678 struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]); 679 int save_curr_ret_stack; 680 681 if (gops == &fgraph_stub) 682 continue; 683 684 save_curr_ret_stack = current->curr_ret_stack; 685 if (ftrace_ops_test(&gops->ops, func, NULL) && 686 gops->entryfunc(&trace, gops)) 687 bitmap |= BIT(i); 688 else 689 /* Clear out any saved storage */ 690 current->curr_ret_stack = save_curr_ret_stack; 691 } 692 } 693 694 if (!bitmap) 695 goto out_ret; 696 697 /* 698 * Since this function uses fgraph_idx = 0 as a tail-call checking 699 * flag, set that bit always. 700 */ 701 set_bitmap(current, offset, bitmap | BIT(0)); 702 703 return 0; 704 out_ret: 705 current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1; 706 out: 707 current->curr_ret_depth--; 708 return -EBUSY; 709 } 710 711 /* Retrieve a function return address to the trace stack on thread info.*/ 712 static struct ftrace_ret_stack * 713 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, 714 unsigned long frame_pointer, int *offset) 715 { 716 struct ftrace_ret_stack *ret_stack; 717 718 ret_stack = get_ret_stack(current, current->curr_ret_stack, offset); 719 720 if (unlikely(!ret_stack)) { 721 ftrace_graph_stop(); 722 WARN(1, "Bad function graph ret_stack pointer: %d", 723 current->curr_ret_stack); 724 /* Might as well panic, otherwise we have no where to go */ 725 *ret = (unsigned long)panic; 726 return NULL; 727 } 728 729 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 730 /* 731 * The arch may choose to record the frame pointer used 732 * and check it here to make sure that it is what we expect it 733 * to be. If gcc does not set the place holder of the return 734 * address in the frame pointer, and does a copy instead, then 735 * the function graph trace will fail. This test detects this 736 * case. 737 * 738 * Currently, x86_32 with optimize for size (-Os) makes the latest 739 * gcc do the above. 740 * 741 * Note, -mfentry does not use frame pointers, and this test 742 * is not needed if CC_USING_FENTRY is set. 743 */ 744 if (unlikely(ret_stack->fp != frame_pointer)) { 745 ftrace_graph_stop(); 746 WARN(1, "Bad frame pointer: expected %lx, received %lx\n" 747 " from func %ps return to %lx\n", 748 ret_stack->fp, 749 frame_pointer, 750 (void *)ret_stack->func, 751 ret_stack->ret); 752 *ret = (unsigned long)panic; 753 return NULL; 754 } 755 #endif 756 757 *offset += FGRAPH_FRAME_OFFSET; 758 *ret = ret_stack->ret; 759 trace->func = ret_stack->func; 760 trace->calltime = ret_stack->calltime; 761 trace->overrun = atomic_read(¤t->trace_overrun); 762 trace->depth = current->curr_ret_depth; 763 /* 764 * We still want to trace interrupts coming in if 765 * max_depth is set to 1. Make sure the decrement is 766 * seen before ftrace_graph_return. 767 */ 768 barrier(); 769 770 return ret_stack; 771 } 772 773 /* 774 * Hibernation protection. 775 * The state of the current task is too much unstable during 776 * suspend/restore to disk. We want to protect against that. 777 */ 778 static int 779 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, 780 void *unused) 781 { 782 switch (state) { 783 case PM_HIBERNATION_PREPARE: 784 pause_graph_tracing(); 785 break; 786 787 case PM_POST_HIBERNATION: 788 unpause_graph_tracing(); 789 break; 790 } 791 return NOTIFY_DONE; 792 } 793 794 static struct notifier_block ftrace_suspend_notifier = { 795 .notifier_call = ftrace_suspend_notifier_call, 796 }; 797 798 /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */ 799 struct fgraph_ret_regs; 800 801 /* 802 * Send the trace to the ring-buffer. 803 * @return the original return address. 804 */ 805 static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs, 806 unsigned long frame_pointer) 807 { 808 struct ftrace_ret_stack *ret_stack; 809 struct ftrace_graph_ret trace; 810 unsigned long bitmap; 811 unsigned long ret; 812 int offset; 813 int i; 814 815 ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset); 816 817 if (unlikely(!ret_stack)) { 818 ftrace_graph_stop(); 819 WARN_ON(1); 820 /* Might as well panic. What else to do? */ 821 return (unsigned long)panic; 822 } 823 824 trace.rettime = trace_clock_local(); 825 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL 826 trace.retval = fgraph_ret_regs_return_value(ret_regs); 827 #endif 828 829 bitmap = get_bitmap_bits(current, offset); 830 831 #ifdef CONFIG_HAVE_STATIC_CALL 832 if (static_branch_likely(&fgraph_do_direct)) { 833 if (test_bit(fgraph_direct_gops->idx, &bitmap)) 834 static_call(fgraph_retfunc)(&trace, fgraph_direct_gops); 835 } else 836 #endif 837 { 838 for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) { 839 struct fgraph_ops *gops = fgraph_array[i]; 840 841 if (gops == &fgraph_stub) 842 continue; 843 844 gops->retfunc(&trace, gops); 845 } 846 } 847 848 /* 849 * The ftrace_graph_return() may still access the current 850 * ret_stack structure, we need to make sure the update of 851 * curr_ret_stack is after that. 852 */ 853 barrier(); 854 current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET; 855 856 current->curr_ret_depth--; 857 return ret; 858 } 859 860 /* 861 * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can 862 * leave only ftrace_return_to_handler(ret_regs). 863 */ 864 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL 865 unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs) 866 { 867 return __ftrace_return_to_handler(ret_regs, 868 fgraph_ret_regs_frame_pointer(ret_regs)); 869 } 870 #else 871 unsigned long ftrace_return_to_handler(unsigned long frame_pointer) 872 { 873 return __ftrace_return_to_handler(NULL, frame_pointer); 874 } 875 #endif 876 877 /** 878 * ftrace_graph_get_ret_stack - return the entry of the shadow stack 879 * @task: The task to read the shadow stack from. 880 * @idx: Index down the shadow stack 881 * 882 * Return the ret_struct on the shadow stack of the @task at the 883 * call graph at @idx starting with zero. If @idx is zero, it 884 * will return the last saved ret_stack entry. If it is greater than 885 * zero, it will return the corresponding ret_stack for the depth 886 * of saved return addresses. 887 */ 888 struct ftrace_ret_stack * 889 ftrace_graph_get_ret_stack(struct task_struct *task, int idx) 890 { 891 struct ftrace_ret_stack *ret_stack = NULL; 892 int offset = task->curr_ret_stack; 893 894 if (offset < 0) 895 return NULL; 896 897 do { 898 ret_stack = get_ret_stack(task, offset, &offset); 899 } while (ret_stack && --idx >= 0); 900 901 return ret_stack; 902 } 903 904 /** 905 * ftrace_graph_ret_addr - return the original value of the return address 906 * @task: The task the unwinder is being executed on 907 * @idx: An initialized pointer to the next stack index to use 908 * @ret: The current return address (likely pointing to return_handler) 909 * @retp: The address on the stack of the current return location 910 * 911 * This function can be called by stack unwinding code to convert a found stack 912 * return address (@ret) to its original value, in case the function graph 913 * tracer has modified it to be 'return_to_handler'. If the address hasn't 914 * been modified, the unchanged value of @ret is returned. 915 * 916 * @idx holds the last index used to know where to start from. It should be 917 * initialized to zero for the first iteration as that will mean to start 918 * at the top of the shadow stack. If the location is found, this pointer 919 * will be assigned that location so that if called again, it will continue 920 * where it left off. 921 * 922 * @retp is a pointer to the return address on the stack. 923 */ 924 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 925 unsigned long ret, unsigned long *retp) 926 { 927 struct ftrace_ret_stack *ret_stack; 928 unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler); 929 int i = task->curr_ret_stack; 930 931 if (ret != return_handler) 932 return ret; 933 934 if (!idx) 935 return ret; 936 937 i = *idx ? : task->curr_ret_stack; 938 while (i > 0) { 939 ret_stack = get_ret_stack(task, i, &i); 940 if (!ret_stack) 941 break; 942 /* 943 * For the tail-call, there would be 2 or more ftrace_ret_stacks on 944 * the ret_stack, which records "return_to_handler" as the return 945 * address except for the last one. 946 * But on the real stack, there should be 1 entry because tail-call 947 * reuses the return address on the stack and jump to the next function. 948 * Thus we will continue to find real return address. 949 */ 950 if (ret_stack->retp == retp && 951 ret_stack->ret != return_handler) { 952 *idx = i; 953 return ret_stack->ret; 954 } 955 } 956 957 return ret; 958 } 959 960 static struct ftrace_ops graph_ops = { 961 .func = ftrace_graph_func, 962 .flags = FTRACE_OPS_GRAPH_STUB, 963 #ifdef FTRACE_GRAPH_TRAMP_ADDR 964 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 965 /* trampoline_size is only needed for dynamically allocated tramps */ 966 #endif 967 }; 968 969 void fgraph_init_ops(struct ftrace_ops *dst_ops, 970 struct ftrace_ops *src_ops) 971 { 972 dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB; 973 974 #ifdef CONFIG_DYNAMIC_FTRACE 975 if (src_ops) { 976 dst_ops->func_hash = &src_ops->local_hash; 977 mutex_init(&dst_ops->local_hash.regex_lock); 978 INIT_LIST_HEAD(&dst_ops->subop_list); 979 dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED; 980 } 981 #endif 982 } 983 984 void ftrace_graph_sleep_time_control(bool enable) 985 { 986 fgraph_sleep_time = enable; 987 } 988 989 /* 990 * Simply points to ftrace_stub, but with the proper protocol. 991 * Defined by the linker script in linux/vmlinux.lds.h 992 */ 993 void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops); 994 995 /* The callbacks that hook a function */ 996 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph; 997 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; 998 999 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 1000 static int alloc_retstack_tasklist(unsigned long **ret_stack_list) 1001 { 1002 int i; 1003 int ret = 0; 1004 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; 1005 struct task_struct *g, *t; 1006 1007 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { 1008 ret_stack_list[i] = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); 1009 if (!ret_stack_list[i]) { 1010 start = 0; 1011 end = i; 1012 ret = -ENOMEM; 1013 goto free; 1014 } 1015 } 1016 1017 rcu_read_lock(); 1018 for_each_process_thread(g, t) { 1019 if (start == end) { 1020 ret = -EAGAIN; 1021 goto unlock; 1022 } 1023 1024 if (t->ret_stack == NULL) { 1025 atomic_set(&t->trace_overrun, 0); 1026 ret_stack_init_task_vars(ret_stack_list[start]); 1027 t->curr_ret_stack = 0; 1028 t->curr_ret_depth = -1; 1029 /* Make sure the tasks see the 0 first: */ 1030 smp_wmb(); 1031 t->ret_stack = ret_stack_list[start++]; 1032 } 1033 } 1034 1035 unlock: 1036 rcu_read_unlock(); 1037 free: 1038 for (i = start; i < end; i++) 1039 kfree(ret_stack_list[i]); 1040 return ret; 1041 } 1042 1043 static void 1044 ftrace_graph_probe_sched_switch(void *ignore, bool preempt, 1045 struct task_struct *prev, 1046 struct task_struct *next, 1047 unsigned int prev_state) 1048 { 1049 unsigned long long timestamp; 1050 1051 /* 1052 * Does the user want to count the time a function was asleep. 1053 * If so, do not update the time stamps. 1054 */ 1055 if (fgraph_sleep_time) 1056 return; 1057 1058 timestamp = trace_clock_local(); 1059 1060 prev->ftrace_timestamp = timestamp; 1061 1062 /* only process tasks that we timestamped */ 1063 if (!next->ftrace_timestamp) 1064 return; 1065 1066 next->ftrace_sleeptime += timestamp - next->ftrace_timestamp; 1067 } 1068 1069 static DEFINE_PER_CPU(unsigned long *, idle_ret_stack); 1070 1071 static void 1072 graph_init_task(struct task_struct *t, unsigned long *ret_stack) 1073 { 1074 atomic_set(&t->trace_overrun, 0); 1075 ret_stack_init_task_vars(ret_stack); 1076 t->ftrace_timestamp = 0; 1077 t->curr_ret_stack = 0; 1078 t->curr_ret_depth = -1; 1079 /* make curr_ret_stack visible before we add the ret_stack */ 1080 smp_wmb(); 1081 t->ret_stack = ret_stack; 1082 } 1083 1084 /* 1085 * Allocate a return stack for the idle task. May be the first 1086 * time through, or it may be done by CPU hotplug online. 1087 */ 1088 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) 1089 { 1090 t->curr_ret_stack = 0; 1091 t->curr_ret_depth = -1; 1092 /* 1093 * The idle task has no parent, it either has its own 1094 * stack or no stack at all. 1095 */ 1096 if (t->ret_stack) 1097 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); 1098 1099 if (ftrace_graph_active) { 1100 unsigned long *ret_stack; 1101 1102 ret_stack = per_cpu(idle_ret_stack, cpu); 1103 if (!ret_stack) { 1104 ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); 1105 if (!ret_stack) 1106 return; 1107 per_cpu(idle_ret_stack, cpu) = ret_stack; 1108 } 1109 graph_init_task(t, ret_stack); 1110 } 1111 } 1112 1113 /* Allocate a return stack for newly created task */ 1114 void ftrace_graph_init_task(struct task_struct *t) 1115 { 1116 /* Make sure we do not use the parent ret_stack */ 1117 t->ret_stack = NULL; 1118 t->curr_ret_stack = 0; 1119 t->curr_ret_depth = -1; 1120 1121 if (ftrace_graph_active) { 1122 unsigned long *ret_stack; 1123 1124 ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); 1125 if (!ret_stack) 1126 return; 1127 graph_init_task(t, ret_stack); 1128 } 1129 } 1130 1131 void ftrace_graph_exit_task(struct task_struct *t) 1132 { 1133 unsigned long *ret_stack = t->ret_stack; 1134 1135 t->ret_stack = NULL; 1136 /* NULL must become visible to IRQs before we free it: */ 1137 barrier(); 1138 1139 kfree(ret_stack); 1140 } 1141 1142 #ifdef CONFIG_DYNAMIC_FTRACE 1143 static int fgraph_pid_func(struct ftrace_graph_ent *trace, 1144 struct fgraph_ops *gops) 1145 { 1146 struct trace_array *tr = gops->ops.private; 1147 int pid; 1148 1149 if (tr) { 1150 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); 1151 if (pid == FTRACE_PID_IGNORE) 1152 return 0; 1153 if (pid != FTRACE_PID_TRACE && 1154 pid != current->pid) 1155 return 0; 1156 } 1157 1158 return gops->saved_func(trace, gops); 1159 } 1160 1161 void fgraph_update_pid_func(void) 1162 { 1163 struct fgraph_ops *gops; 1164 struct ftrace_ops *op; 1165 1166 if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED)) 1167 return; 1168 1169 list_for_each_entry(op, &graph_ops.subop_list, list) { 1170 if (op->flags & FTRACE_OPS_FL_PID) { 1171 gops = container_of(op, struct fgraph_ops, ops); 1172 gops->entryfunc = ftrace_pids_enabled(op) ? 1173 fgraph_pid_func : gops->saved_func; 1174 if (ftrace_graph_active == 1) 1175 static_call_update(fgraph_func, gops->entryfunc); 1176 } 1177 } 1178 } 1179 #endif 1180 1181 /* Allocate a return stack for each task */ 1182 static int start_graph_tracing(void) 1183 { 1184 unsigned long **ret_stack_list; 1185 int ret, cpu; 1186 1187 ret_stack_list = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); 1188 1189 if (!ret_stack_list) 1190 return -ENOMEM; 1191 1192 /* The cpu_boot init_task->ret_stack will never be freed */ 1193 for_each_online_cpu(cpu) { 1194 if (!idle_task(cpu)->ret_stack) 1195 ftrace_graph_init_idle_task(idle_task(cpu), cpu); 1196 } 1197 1198 do { 1199 ret = alloc_retstack_tasklist(ret_stack_list); 1200 } while (ret == -EAGAIN); 1201 1202 if (!ret) { 1203 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 1204 if (ret) 1205 pr_info("ftrace_graph: Couldn't activate tracepoint" 1206 " probe to kernel_sched_switch\n"); 1207 } 1208 1209 kfree(ret_stack_list); 1210 return ret; 1211 } 1212 1213 static void init_task_vars(int idx) 1214 { 1215 struct task_struct *g, *t; 1216 int cpu; 1217 1218 for_each_online_cpu(cpu) { 1219 if (idle_task(cpu)->ret_stack) 1220 ret_stack_set_task_var(idle_task(cpu), idx, 0); 1221 } 1222 1223 read_lock(&tasklist_lock); 1224 for_each_process_thread(g, t) { 1225 if (t->ret_stack) 1226 ret_stack_set_task_var(t, idx, 0); 1227 } 1228 read_unlock(&tasklist_lock); 1229 } 1230 1231 static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *gops) 1232 { 1233 trace_func_graph_ent_t func = NULL; 1234 trace_func_graph_ret_t retfunc = NULL; 1235 int i; 1236 1237 if (gops) { 1238 func = gops->entryfunc; 1239 retfunc = gops->retfunc; 1240 fgraph_direct_gops = gops; 1241 } else { 1242 for_each_set_bit(i, &fgraph_array_bitmask, 1243 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) { 1244 func = fgraph_array[i]->entryfunc; 1245 retfunc = fgraph_array[i]->retfunc; 1246 fgraph_direct_gops = fgraph_array[i]; 1247 } 1248 } 1249 if (WARN_ON_ONCE(!func)) 1250 return; 1251 1252 static_call_update(fgraph_func, func); 1253 static_call_update(fgraph_retfunc, retfunc); 1254 if (enable_branch) 1255 static_branch_disable(&fgraph_do_direct); 1256 } 1257 1258 static void ftrace_graph_disable_direct(bool disable_branch) 1259 { 1260 if (disable_branch) 1261 static_branch_disable(&fgraph_do_direct); 1262 static_call_update(fgraph_func, ftrace_graph_entry_stub); 1263 static_call_update(fgraph_retfunc, ftrace_graph_ret_stub); 1264 fgraph_direct_gops = &fgraph_stub; 1265 } 1266 1267 int register_ftrace_graph(struct fgraph_ops *gops) 1268 { 1269 int command = 0; 1270 int ret = 0; 1271 int i = -1; 1272 1273 mutex_lock(&ftrace_lock); 1274 1275 if (!fgraph_array[0]) { 1276 /* The array must always have real data on it */ 1277 for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) 1278 fgraph_array[i] = &fgraph_stub; 1279 fgraph_lru_init(); 1280 } 1281 1282 i = fgraph_lru_alloc_index(); 1283 if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub)) { 1284 ret = -ENOSPC; 1285 goto out; 1286 } 1287 gops->idx = i; 1288 1289 ftrace_graph_active++; 1290 1291 if (ftrace_graph_active == 2) 1292 ftrace_graph_disable_direct(true); 1293 1294 if (ftrace_graph_active == 1) { 1295 ftrace_graph_enable_direct(false, gops); 1296 register_pm_notifier(&ftrace_suspend_notifier); 1297 ret = start_graph_tracing(); 1298 if (ret) 1299 goto error; 1300 /* 1301 * Some archs just test to see if these are not 1302 * the default function 1303 */ 1304 ftrace_graph_return = return_run; 1305 ftrace_graph_entry = entry_run; 1306 command = FTRACE_START_FUNC_RET; 1307 } else { 1308 init_task_vars(gops->idx); 1309 } 1310 /* Always save the function, and reset at unregistering */ 1311 gops->saved_func = gops->entryfunc; 1312 1313 ret = ftrace_startup_subops(&graph_ops, &gops->ops, command); 1314 if (!ret) 1315 fgraph_array[i] = gops; 1316 1317 error: 1318 if (ret) { 1319 ftrace_graph_active--; 1320 gops->saved_func = NULL; 1321 fgraph_lru_release_index(i); 1322 } 1323 out: 1324 mutex_unlock(&ftrace_lock); 1325 return ret; 1326 } 1327 1328 void unregister_ftrace_graph(struct fgraph_ops *gops) 1329 { 1330 int command = 0; 1331 1332 mutex_lock(&ftrace_lock); 1333 1334 if (unlikely(!ftrace_graph_active)) 1335 goto out; 1336 1337 if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE || 1338 fgraph_array[gops->idx] != gops)) 1339 goto out; 1340 1341 if (fgraph_lru_release_index(gops->idx) < 0) 1342 goto out; 1343 1344 fgraph_array[gops->idx] = &fgraph_stub; 1345 1346 ftrace_graph_active--; 1347 1348 if (!ftrace_graph_active) 1349 command = FTRACE_STOP_FUNC_RET; 1350 1351 ftrace_shutdown_subops(&graph_ops, &gops->ops, command); 1352 1353 if (ftrace_graph_active == 1) 1354 ftrace_graph_enable_direct(true, NULL); 1355 else if (!ftrace_graph_active) 1356 ftrace_graph_disable_direct(false); 1357 1358 if (!ftrace_graph_active) { 1359 ftrace_graph_return = ftrace_stub_graph; 1360 ftrace_graph_entry = ftrace_graph_entry_stub; 1361 unregister_pm_notifier(&ftrace_suspend_notifier); 1362 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 1363 } 1364 out: 1365 gops->saved_func = NULL; 1366 mutex_unlock(&ftrace_lock); 1367 } 1368