1 /* 2 * Kernel Debugger Architecture Independent Support Functions 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. 9 * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. 10 * 03/02/13 added new 2.5 kallsyms <[email protected]> 11 */ 12 13 #include <linux/types.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/kallsyms.h> 17 #include <linux/stddef.h> 18 #include <linux/vmalloc.h> 19 #include <linux/ptrace.h> 20 #include <linux/module.h> 21 #include <linux/highmem.h> 22 #include <linux/hardirq.h> 23 #include <linux/delay.h> 24 #include <linux/uaccess.h> 25 #include <linux/kdb.h> 26 #include <linux/slab.h> 27 #include "kdb_private.h" 28 29 /* 30 * kdbgetsymval - Return the address of the given symbol. 31 * 32 * Parameters: 33 * symname Character string containing symbol name 34 * symtab Structure to receive results 35 * Returns: 36 * 0 Symbol not found, symtab zero filled 37 * 1 Symbol mapped to module/symbol/section, data in symtab 38 */ 39 int kdbgetsymval(const char *symname, kdb_symtab_t *symtab) 40 { 41 kdb_dbg_printf(AR, "symname=%s, symtab=%px\n", symname, symtab); 42 memset(symtab, 0, sizeof(*symtab)); 43 symtab->sym_start = kallsyms_lookup_name(symname); 44 if (symtab->sym_start) { 45 kdb_dbg_printf(AR, "returns 1, symtab->sym_start=0x%lx\n", 46 symtab->sym_start); 47 return 1; 48 } 49 kdb_dbg_printf(AR, "returns 0\n"); 50 return 0; 51 } 52 EXPORT_SYMBOL(kdbgetsymval); 53 54 static char *kdb_name_table[100]; /* arbitrary size */ 55 56 /* 57 * kdbnearsym - Return the name of the symbol with the nearest address 58 * less than 'addr'. 59 * 60 * Parameters: 61 * addr Address to check for symbol near 62 * symtab Structure to receive results 63 * Returns: 64 * 0 No sections contain this address, symtab zero filled 65 * 1 Address mapped to module/symbol/section, data in symtab 66 * Remarks: 67 * 2.6 kallsyms has a "feature" where it unpacks the name into a 68 * string. If that string is reused before the caller expects it 69 * then the caller sees its string change without warning. To 70 * avoid cluttering up the main kdb code with lots of kdb_strdup, 71 * tests and kfree calls, kdbnearsym maintains an LRU list of the 72 * last few unique strings. The list is sized large enough to 73 * hold active strings, no kdb caller of kdbnearsym makes more 74 * than ~20 later calls before using a saved value. 75 */ 76 int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) 77 { 78 int ret = 0; 79 unsigned long symbolsize = 0; 80 unsigned long offset = 0; 81 #define knt1_size 128 /* must be >= kallsyms table size */ 82 char *knt1 = NULL; 83 84 kdb_dbg_printf(AR, "addr=0x%lx, symtab=%px\n", addr, symtab); 85 memset(symtab, 0, sizeof(*symtab)); 86 87 if (addr < 4096) 88 goto out; 89 knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC); 90 if (!knt1) { 91 kdb_func_printf("addr=0x%lx cannot kmalloc knt1\n", addr); 92 goto out; 93 } 94 symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset, 95 (char **)(&symtab->mod_name), knt1); 96 if (offset > 8*1024*1024) { 97 symtab->sym_name = NULL; 98 addr = offset = symbolsize = 0; 99 } 100 symtab->sym_start = addr - offset; 101 symtab->sym_end = symtab->sym_start + symbolsize; 102 ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0'; 103 104 if (ret) { 105 int i; 106 /* Another 2.6 kallsyms "feature". Sometimes the sym_name is 107 * set but the buffer passed into kallsyms_lookup is not used, 108 * so it contains garbage. The caller has to work out which 109 * buffer needs to be saved. 110 * 111 * What was Rusty smoking when he wrote that code? 112 */ 113 if (symtab->sym_name != knt1) { 114 strncpy(knt1, symtab->sym_name, knt1_size); 115 knt1[knt1_size-1] = '\0'; 116 } 117 for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) { 118 if (kdb_name_table[i] && 119 strcmp(kdb_name_table[i], knt1) == 0) 120 break; 121 } 122 if (i >= ARRAY_SIZE(kdb_name_table)) { 123 debug_kfree(kdb_name_table[0]); 124 memmove(kdb_name_table, kdb_name_table+1, 125 sizeof(kdb_name_table[0]) * 126 (ARRAY_SIZE(kdb_name_table)-1)); 127 } else { 128 debug_kfree(knt1); 129 knt1 = kdb_name_table[i]; 130 memmove(kdb_name_table+i, kdb_name_table+i+1, 131 sizeof(kdb_name_table[0]) * 132 (ARRAY_SIZE(kdb_name_table)-i-1)); 133 } 134 i = ARRAY_SIZE(kdb_name_table) - 1; 135 kdb_name_table[i] = knt1; 136 symtab->sym_name = kdb_name_table[i]; 137 knt1 = NULL; 138 } 139 140 if (symtab->mod_name == NULL) 141 symtab->mod_name = "kernel"; 142 kdb_dbg_printf(AR, "returns %d symtab->sym_start=0x%lx, symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", 143 ret, symtab->sym_start, symtab->mod_name, symtab->sym_name, symtab->sym_name); 144 145 out: 146 debug_kfree(knt1); 147 return ret; 148 } 149 150 void kdbnearsym_cleanup(void) 151 { 152 int i; 153 for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) { 154 if (kdb_name_table[i]) { 155 debug_kfree(kdb_name_table[i]); 156 kdb_name_table[i] = NULL; 157 } 158 } 159 } 160 161 static char ks_namebuf[KSYM_NAME_LEN+1], ks_namebuf_prev[KSYM_NAME_LEN+1]; 162 163 /* 164 * kallsyms_symbol_complete 165 * 166 * Parameters: 167 * prefix_name prefix of a symbol name to lookup 168 * max_len maximum length that can be returned 169 * Returns: 170 * Number of symbols which match the given prefix. 171 * Notes: 172 * prefix_name is changed to contain the longest unique prefix that 173 * starts with this prefix (tab completion). 174 */ 175 int kallsyms_symbol_complete(char *prefix_name, int max_len) 176 { 177 loff_t pos = 0; 178 int prefix_len = strlen(prefix_name), prev_len = 0; 179 int i, number = 0; 180 const char *name; 181 182 while ((name = kdb_walk_kallsyms(&pos))) { 183 if (strncmp(name, prefix_name, prefix_len) == 0) { 184 strscpy(ks_namebuf, name, sizeof(ks_namebuf)); 185 /* Work out the longest name that matches the prefix */ 186 if (++number == 1) { 187 prev_len = min_t(int, max_len-1, 188 strlen(ks_namebuf)); 189 memcpy(ks_namebuf_prev, ks_namebuf, prev_len); 190 ks_namebuf_prev[prev_len] = '\0'; 191 continue; 192 } 193 for (i = 0; i < prev_len; i++) { 194 if (ks_namebuf[i] != ks_namebuf_prev[i]) { 195 prev_len = i; 196 ks_namebuf_prev[i] = '\0'; 197 break; 198 } 199 } 200 } 201 } 202 if (prev_len > prefix_len) 203 memcpy(prefix_name, ks_namebuf_prev, prev_len+1); 204 return number; 205 } 206 207 /* 208 * kallsyms_symbol_next 209 * 210 * Parameters: 211 * prefix_name prefix of a symbol name to lookup 212 * flag 0 means search from the head, 1 means continue search. 213 * buf_size maximum length that can be written to prefix_name 214 * buffer 215 * Returns: 216 * 1 if a symbol matches the given prefix. 217 * 0 if no string found 218 */ 219 int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size) 220 { 221 int prefix_len = strlen(prefix_name); 222 static loff_t pos; 223 const char *name; 224 225 if (!flag) 226 pos = 0; 227 228 while ((name = kdb_walk_kallsyms(&pos))) { 229 if (!strncmp(name, prefix_name, prefix_len)) 230 return strscpy(prefix_name, name, buf_size); 231 } 232 return 0; 233 } 234 235 /* 236 * kdb_symbol_print - Standard method for printing a symbol name and offset. 237 * Inputs: 238 * addr Address to be printed. 239 * symtab Address of symbol data, if NULL this routine does its 240 * own lookup. 241 * punc Punctuation for string, bit field. 242 * Remarks: 243 * The string and its punctuation is only printed if the address 244 * is inside the kernel, except that the value is always printed 245 * when requested. 246 */ 247 void kdb_symbol_print(unsigned long addr, const kdb_symtab_t *symtab_p, 248 unsigned int punc) 249 { 250 kdb_symtab_t symtab, *symtab_p2; 251 if (symtab_p) { 252 symtab_p2 = (kdb_symtab_t *)symtab_p; 253 } else { 254 symtab_p2 = &symtab; 255 kdbnearsym(addr, symtab_p2); 256 } 257 if (!(symtab_p2->sym_name || (punc & KDB_SP_VALUE))) 258 return; 259 if (punc & KDB_SP_SPACEB) 260 kdb_printf(" "); 261 if (punc & KDB_SP_VALUE) 262 kdb_printf(kdb_machreg_fmt0, addr); 263 if (symtab_p2->sym_name) { 264 if (punc & KDB_SP_VALUE) 265 kdb_printf(" "); 266 if (punc & KDB_SP_PAREN) 267 kdb_printf("("); 268 if (strcmp(symtab_p2->mod_name, "kernel")) 269 kdb_printf("[%s]", symtab_p2->mod_name); 270 kdb_printf("%s", symtab_p2->sym_name); 271 if (addr != symtab_p2->sym_start) 272 kdb_printf("+0x%lx", addr - symtab_p2->sym_start); 273 if (punc & KDB_SP_SYMSIZE) 274 kdb_printf("/0x%lx", 275 symtab_p2->sym_end - symtab_p2->sym_start); 276 if (punc & KDB_SP_PAREN) 277 kdb_printf(")"); 278 } 279 if (punc & KDB_SP_SPACEA) 280 kdb_printf(" "); 281 if (punc & KDB_SP_NEWLINE) 282 kdb_printf("\n"); 283 } 284 285 /* 286 * kdb_strdup - kdb equivalent of strdup, for disasm code. 287 * Inputs: 288 * str The string to duplicate. 289 * type Flags to kmalloc for the new string. 290 * Returns: 291 * Address of the new string, NULL if storage could not be allocated. 292 * Remarks: 293 * This is not in lib/string.c because it uses kmalloc which is not 294 * available when string.o is used in boot loaders. 295 */ 296 char *kdb_strdup(const char *str, gfp_t type) 297 { 298 int n = strlen(str)+1; 299 char *s = kmalloc(n, type); 300 if (!s) 301 return NULL; 302 return strcpy(s, str); 303 } 304 305 /* 306 * kdb_getarea_size - Read an area of data. The kdb equivalent of 307 * copy_from_user, with kdb messages for invalid addresses. 308 * Inputs: 309 * res Pointer to the area to receive the result. 310 * addr Address of the area to copy. 311 * size Size of the area. 312 * Returns: 313 * 0 for success, < 0 for error. 314 */ 315 int kdb_getarea_size(void *res, unsigned long addr, size_t size) 316 { 317 int ret = copy_from_kernel_nofault((char *)res, (char *)addr, size); 318 if (ret) { 319 if (!KDB_STATE(SUPPRESS)) { 320 kdb_func_printf("Bad address 0x%lx\n", addr); 321 KDB_STATE_SET(SUPPRESS); 322 } 323 ret = KDB_BADADDR; 324 } else { 325 KDB_STATE_CLEAR(SUPPRESS); 326 } 327 return ret; 328 } 329 330 /* 331 * kdb_putarea_size - Write an area of data. The kdb equivalent of 332 * copy_to_user, with kdb messages for invalid addresses. 333 * Inputs: 334 * addr Address of the area to write to. 335 * res Pointer to the area holding the data. 336 * size Size of the area. 337 * Returns: 338 * 0 for success, < 0 for error. 339 */ 340 int kdb_putarea_size(unsigned long addr, void *res, size_t size) 341 { 342 int ret = copy_from_kernel_nofault((char *)addr, (char *)res, size); 343 if (ret) { 344 if (!KDB_STATE(SUPPRESS)) { 345 kdb_func_printf("Bad address 0x%lx\n", addr); 346 KDB_STATE_SET(SUPPRESS); 347 } 348 ret = KDB_BADADDR; 349 } else { 350 KDB_STATE_CLEAR(SUPPRESS); 351 } 352 return ret; 353 } 354 355 /* 356 * kdb_getphys - Read data from a physical address. Validate the 357 * address is in range, use kmap_atomic() to get data 358 * similar to kdb_getarea() - but for phys addresses 359 * Inputs: 360 * res Pointer to the word to receive the result 361 * addr Physical address of the area to copy 362 * size Size of the area 363 * Returns: 364 * 0 for success, < 0 for error. 365 */ 366 static int kdb_getphys(void *res, unsigned long addr, size_t size) 367 { 368 unsigned long pfn; 369 void *vaddr; 370 struct page *page; 371 372 pfn = (addr >> PAGE_SHIFT); 373 if (!pfn_valid(pfn)) 374 return 1; 375 page = pfn_to_page(pfn); 376 vaddr = kmap_atomic(page); 377 memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); 378 kunmap_atomic(vaddr); 379 380 return 0; 381 } 382 383 /* 384 * kdb_getphysword 385 * Inputs: 386 * word Pointer to the word to receive the result. 387 * addr Address of the area to copy. 388 * size Size of the area. 389 * Returns: 390 * 0 for success, < 0 for error. 391 */ 392 int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size) 393 { 394 int diag; 395 __u8 w1; 396 __u16 w2; 397 __u32 w4; 398 __u64 w8; 399 *word = 0; /* Default value if addr or size is invalid */ 400 401 switch (size) { 402 case 1: 403 diag = kdb_getphys(&w1, addr, sizeof(w1)); 404 if (!diag) 405 *word = w1; 406 break; 407 case 2: 408 diag = kdb_getphys(&w2, addr, sizeof(w2)); 409 if (!diag) 410 *word = w2; 411 break; 412 case 4: 413 diag = kdb_getphys(&w4, addr, sizeof(w4)); 414 if (!diag) 415 *word = w4; 416 break; 417 case 8: 418 if (size <= sizeof(*word)) { 419 diag = kdb_getphys(&w8, addr, sizeof(w8)); 420 if (!diag) 421 *word = w8; 422 break; 423 } 424 fallthrough; 425 default: 426 diag = KDB_BADWIDTH; 427 kdb_func_printf("bad width %zu\n", size); 428 } 429 return diag; 430 } 431 432 /* 433 * kdb_getword - Read a binary value. Unlike kdb_getarea, this treats 434 * data as numbers. 435 * Inputs: 436 * word Pointer to the word to receive the result. 437 * addr Address of the area to copy. 438 * size Size of the area. 439 * Returns: 440 * 0 for success, < 0 for error. 441 */ 442 int kdb_getword(unsigned long *word, unsigned long addr, size_t size) 443 { 444 int diag; 445 __u8 w1; 446 __u16 w2; 447 __u32 w4; 448 __u64 w8; 449 *word = 0; /* Default value if addr or size is invalid */ 450 switch (size) { 451 case 1: 452 diag = kdb_getarea(w1, addr); 453 if (!diag) 454 *word = w1; 455 break; 456 case 2: 457 diag = kdb_getarea(w2, addr); 458 if (!diag) 459 *word = w2; 460 break; 461 case 4: 462 diag = kdb_getarea(w4, addr); 463 if (!diag) 464 *word = w4; 465 break; 466 case 8: 467 if (size <= sizeof(*word)) { 468 diag = kdb_getarea(w8, addr); 469 if (!diag) 470 *word = w8; 471 break; 472 } 473 fallthrough; 474 default: 475 diag = KDB_BADWIDTH; 476 kdb_func_printf("bad width %zu\n", size); 477 } 478 return diag; 479 } 480 481 /* 482 * kdb_putword - Write a binary value. Unlike kdb_putarea, this 483 * treats data as numbers. 484 * Inputs: 485 * addr Address of the area to write to.. 486 * word The value to set. 487 * size Size of the area. 488 * Returns: 489 * 0 for success, < 0 for error. 490 */ 491 int kdb_putword(unsigned long addr, unsigned long word, size_t size) 492 { 493 int diag; 494 __u8 w1; 495 __u16 w2; 496 __u32 w4; 497 __u64 w8; 498 switch (size) { 499 case 1: 500 w1 = word; 501 diag = kdb_putarea(addr, w1); 502 break; 503 case 2: 504 w2 = word; 505 diag = kdb_putarea(addr, w2); 506 break; 507 case 4: 508 w4 = word; 509 diag = kdb_putarea(addr, w4); 510 break; 511 case 8: 512 if (size <= sizeof(word)) { 513 w8 = word; 514 diag = kdb_putarea(addr, w8); 515 break; 516 } 517 fallthrough; 518 default: 519 diag = KDB_BADWIDTH; 520 kdb_func_printf("bad width %zu\n", size); 521 } 522 return diag; 523 } 524 525 /* 526 * kdb_task_state_string - Convert a string containing any of the 527 * letters DRSTCZEUIMA to a mask for the process state field and 528 * return the value. If no argument is supplied, return the mask 529 * that corresponds to environment variable PS, DRSTCZEU by 530 * default. 531 * Inputs: 532 * s String to convert 533 * Returns: 534 * Mask for process state. 535 * Notes: 536 * The mask folds data from several sources into a single long value, so 537 * be careful not to overlap the bits. TASK_* bits are in the LSB, 538 * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there 539 * is no overlap between TASK_* and EXIT_* but that may not always be 540 * true, so EXIT_* bits are shifted left 16 bits before being stored in 541 * the mask. 542 */ 543 544 /* unrunnable is < 0 */ 545 #define UNRUNNABLE (1UL << (8*sizeof(unsigned long) - 1)) 546 #define RUNNING (1UL << (8*sizeof(unsigned long) - 2)) 547 #define IDLE (1UL << (8*sizeof(unsigned long) - 3)) 548 #define DAEMON (1UL << (8*sizeof(unsigned long) - 4)) 549 550 unsigned long kdb_task_state_string(const char *s) 551 { 552 long res = 0; 553 if (!s) { 554 s = kdbgetenv("PS"); 555 if (!s) 556 s = "DRSTCZEU"; /* default value for ps */ 557 } 558 while (*s) { 559 switch (*s) { 560 case 'D': 561 res |= TASK_UNINTERRUPTIBLE; 562 break; 563 case 'R': 564 res |= RUNNING; 565 break; 566 case 'S': 567 res |= TASK_INTERRUPTIBLE; 568 break; 569 case 'T': 570 res |= TASK_STOPPED; 571 break; 572 case 'C': 573 res |= TASK_TRACED; 574 break; 575 case 'Z': 576 res |= EXIT_ZOMBIE << 16; 577 break; 578 case 'E': 579 res |= EXIT_DEAD << 16; 580 break; 581 case 'U': 582 res |= UNRUNNABLE; 583 break; 584 case 'I': 585 res |= IDLE; 586 break; 587 case 'M': 588 res |= DAEMON; 589 break; 590 case 'A': 591 res = ~0UL; 592 break; 593 default: 594 kdb_func_printf("unknown flag '%c' ignored\n", *s); 595 break; 596 } 597 ++s; 598 } 599 return res; 600 } 601 602 /* 603 * kdb_task_state_char - Return the character that represents the task state. 604 * Inputs: 605 * p struct task for the process 606 * Returns: 607 * One character to represent the task state. 608 */ 609 char kdb_task_state_char (const struct task_struct *p) 610 { 611 unsigned int p_state; 612 unsigned long tmp; 613 char state; 614 int cpu; 615 616 if (!p || 617 copy_from_kernel_nofault(&tmp, (char *)p, sizeof(unsigned long))) 618 return 'E'; 619 620 cpu = kdb_process_cpu(p); 621 p_state = READ_ONCE(p->__state); 622 state = (p_state == 0) ? 'R' : 623 (p_state < 0) ? 'U' : 624 (p_state & TASK_UNINTERRUPTIBLE) ? 'D' : 625 (p_state & TASK_STOPPED) ? 'T' : 626 (p_state & TASK_TRACED) ? 'C' : 627 (p->exit_state & EXIT_ZOMBIE) ? 'Z' : 628 (p->exit_state & EXIT_DEAD) ? 'E' : 629 (p_state & TASK_INTERRUPTIBLE) ? 'S' : '?'; 630 if (is_idle_task(p)) { 631 /* Idle task. Is it really idle, apart from the kdb 632 * interrupt? */ 633 if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) { 634 if (cpu != kdb_initial_cpu) 635 state = 'I'; /* idle task */ 636 } 637 } else if (!p->mm && state == 'S') { 638 state = 'M'; /* sleeping system daemon */ 639 } 640 return state; 641 } 642 643 /* 644 * kdb_task_state - Return true if a process has the desired state 645 * given by the mask. 646 * Inputs: 647 * p struct task for the process 648 * mask mask from kdb_task_state_string to select processes 649 * Returns: 650 * True if the process matches at least one criteria defined by the mask. 651 */ 652 unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask) 653 { 654 char state[] = { kdb_task_state_char(p), '\0' }; 655 return (mask & kdb_task_state_string(state)) != 0; 656 } 657 658 /* Last ditch allocator for debugging, so we can still debug even when 659 * the GFP_ATOMIC pool has been exhausted. The algorithms are tuned 660 * for space usage, not for speed. One smallish memory pool, the free 661 * chain is always in ascending address order to allow coalescing, 662 * allocations are done in brute force best fit. 663 */ 664 665 struct debug_alloc_header { 666 u32 next; /* offset of next header from start of pool */ 667 u32 size; 668 void *caller; 669 }; 670 671 /* The memory returned by this allocator must be aligned, which means 672 * so must the header size. Do not assume that sizeof(struct 673 * debug_alloc_header) is a multiple of the alignment, explicitly 674 * calculate the overhead of this header, including the alignment. 675 * The rest of this code must not use sizeof() on any header or 676 * pointer to a header. 677 */ 678 #define dah_align 8 679 #define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align) 680 681 static u64 debug_alloc_pool_aligned[256*1024/dah_align]; /* 256K pool */ 682 static char *debug_alloc_pool = (char *)debug_alloc_pool_aligned; 683 static u32 dah_first, dah_first_call = 1, dah_used, dah_used_max; 684 685 /* Locking is awkward. The debug code is called from all contexts, 686 * including non maskable interrupts. A normal spinlock is not safe 687 * in NMI context. Try to get the debug allocator lock, if it cannot 688 * be obtained after a second then give up. If the lock could not be 689 * previously obtained on this cpu then only try once. 690 * 691 * sparse has no annotation for "this function _sometimes_ acquires a 692 * lock", so fudge the acquire/release notation. 693 */ 694 static DEFINE_SPINLOCK(dap_lock); 695 static int get_dap_lock(void) 696 __acquires(dap_lock) 697 { 698 static int dap_locked = -1; 699 int count; 700 if (dap_locked == smp_processor_id()) 701 count = 1; 702 else 703 count = 1000; 704 while (1) { 705 if (spin_trylock(&dap_lock)) { 706 dap_locked = -1; 707 return 1; 708 } 709 if (!count--) 710 break; 711 udelay(1000); 712 } 713 dap_locked = smp_processor_id(); 714 __acquire(dap_lock); 715 return 0; 716 } 717 718 void *debug_kmalloc(size_t size, gfp_t flags) 719 { 720 unsigned int rem, h_offset; 721 struct debug_alloc_header *best, *bestprev, *prev, *h; 722 void *p = NULL; 723 if (!get_dap_lock()) { 724 __release(dap_lock); /* we never actually got it */ 725 return NULL; 726 } 727 h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first); 728 if (dah_first_call) { 729 h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead; 730 dah_first_call = 0; 731 } 732 size = ALIGN(size, dah_align); 733 prev = best = bestprev = NULL; 734 while (1) { 735 if (h->size >= size && (!best || h->size < best->size)) { 736 best = h; 737 bestprev = prev; 738 if (h->size == size) 739 break; 740 } 741 if (!h->next) 742 break; 743 prev = h; 744 h = (struct debug_alloc_header *)(debug_alloc_pool + h->next); 745 } 746 if (!best) 747 goto out; 748 rem = best->size - size; 749 /* The pool must always contain at least one header */ 750 if (best->next == 0 && bestprev == NULL && rem < dah_overhead) 751 goto out; 752 if (rem >= dah_overhead) { 753 best->size = size; 754 h_offset = ((char *)best - debug_alloc_pool) + 755 dah_overhead + best->size; 756 h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset); 757 h->size = rem - dah_overhead; 758 h->next = best->next; 759 } else 760 h_offset = best->next; 761 best->caller = __builtin_return_address(0); 762 dah_used += best->size; 763 dah_used_max = max(dah_used, dah_used_max); 764 if (bestprev) 765 bestprev->next = h_offset; 766 else 767 dah_first = h_offset; 768 p = (char *)best + dah_overhead; 769 memset(p, POISON_INUSE, best->size - 1); 770 *((char *)p + best->size - 1) = POISON_END; 771 out: 772 spin_unlock(&dap_lock); 773 return p; 774 } 775 776 void debug_kfree(void *p) 777 { 778 struct debug_alloc_header *h; 779 unsigned int h_offset; 780 if (!p) 781 return; 782 if ((char *)p < debug_alloc_pool || 783 (char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) { 784 kfree(p); 785 return; 786 } 787 if (!get_dap_lock()) { 788 __release(dap_lock); /* we never actually got it */ 789 return; /* memory leak, cannot be helped */ 790 } 791 h = (struct debug_alloc_header *)((char *)p - dah_overhead); 792 memset(p, POISON_FREE, h->size - 1); 793 *((char *)p + h->size - 1) = POISON_END; 794 h->caller = NULL; 795 dah_used -= h->size; 796 h_offset = (char *)h - debug_alloc_pool; 797 if (h_offset < dah_first) { 798 h->next = dah_first; 799 dah_first = h_offset; 800 } else { 801 struct debug_alloc_header *prev; 802 unsigned int prev_offset; 803 prev = (struct debug_alloc_header *)(debug_alloc_pool + 804 dah_first); 805 while (1) { 806 if (!prev->next || prev->next > h_offset) 807 break; 808 prev = (struct debug_alloc_header *) 809 (debug_alloc_pool + prev->next); 810 } 811 prev_offset = (char *)prev - debug_alloc_pool; 812 if (prev_offset + dah_overhead + prev->size == h_offset) { 813 prev->size += dah_overhead + h->size; 814 memset(h, POISON_FREE, dah_overhead - 1); 815 *((char *)h + dah_overhead - 1) = POISON_END; 816 h = prev; 817 h_offset = prev_offset; 818 } else { 819 h->next = prev->next; 820 prev->next = h_offset; 821 } 822 } 823 if (h_offset + dah_overhead + h->size == h->next) { 824 struct debug_alloc_header *next; 825 next = (struct debug_alloc_header *) 826 (debug_alloc_pool + h->next); 827 h->size += dah_overhead + next->size; 828 h->next = next->next; 829 memset(next, POISON_FREE, dah_overhead - 1); 830 *((char *)next + dah_overhead - 1) = POISON_END; 831 } 832 spin_unlock(&dap_lock); 833 } 834 835 void debug_kusage(void) 836 { 837 struct debug_alloc_header *h_free, *h_used; 838 #ifdef CONFIG_IA64 839 /* FIXME: using dah for ia64 unwind always results in a memory leak. 840 * Fix that memory leak first, then set debug_kusage_one_time = 1 for 841 * all architectures. 842 */ 843 static int debug_kusage_one_time; 844 #else 845 static int debug_kusage_one_time = 1; 846 #endif 847 if (!get_dap_lock()) { 848 __release(dap_lock); /* we never actually got it */ 849 return; 850 } 851 h_free = (struct debug_alloc_header *)(debug_alloc_pool + dah_first); 852 if (dah_first == 0 && 853 (h_free->size == sizeof(debug_alloc_pool_aligned) - dah_overhead || 854 dah_first_call)) 855 goto out; 856 if (!debug_kusage_one_time) 857 goto out; 858 debug_kusage_one_time = 0; 859 kdb_func_printf("debug_kmalloc memory leak dah_first %d\n", dah_first); 860 if (dah_first) { 861 h_used = (struct debug_alloc_header *)debug_alloc_pool; 862 kdb_func_printf("h_used %px size %d\n", h_used, h_used->size); 863 } 864 do { 865 h_used = (struct debug_alloc_header *) 866 ((char *)h_free + dah_overhead + h_free->size); 867 kdb_func_printf("h_used %px size %d caller %px\n", 868 h_used, h_used->size, h_used->caller); 869 h_free = (struct debug_alloc_header *) 870 (debug_alloc_pool + h_free->next); 871 } while (h_free->next); 872 h_used = (struct debug_alloc_header *) 873 ((char *)h_free + dah_overhead + h_free->size); 874 if ((char *)h_used - debug_alloc_pool != 875 sizeof(debug_alloc_pool_aligned)) 876 kdb_func_printf("h_used %px size %d caller %px\n", 877 h_used, h_used->size, h_used->caller); 878 out: 879 spin_unlock(&dap_lock); 880 } 881 882 /* Maintain a small stack of kdb_flags to allow recursion without disturbing 883 * the global kdb state. 884 */ 885 886 static int kdb_flags_stack[4], kdb_flags_index; 887 888 void kdb_save_flags(void) 889 { 890 BUG_ON(kdb_flags_index >= ARRAY_SIZE(kdb_flags_stack)); 891 kdb_flags_stack[kdb_flags_index++] = kdb_flags; 892 } 893 894 void kdb_restore_flags(void) 895 { 896 BUG_ON(kdb_flags_index <= 0); 897 kdb_flags = kdb_flags_stack[--kdb_flags_index]; 898 } 899