1 //===-- dfsan.cpp ---------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of DataFlowSanitizer. 10 // 11 // DataFlowSanitizer runtime. This file defines the public interface to 12 // DataFlowSanitizer as well as the definition of certain runtime functions 13 // called automatically by the compiler (specifically the instrumentation pass 14 // in llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp). 15 // 16 // The public interface is defined in include/sanitizer/dfsan_interface.h whose 17 // functions are prefixed dfsan_ while the compiler interface functions are 18 // prefixed __dfsan_. 19 //===----------------------------------------------------------------------===// 20 21 #include "dfsan/dfsan.h" 22 23 #include "dfsan/dfsan_chained_origin_depot.h" 24 #include "dfsan/dfsan_flags.h" 25 #include "dfsan/dfsan_origin.h" 26 #include "dfsan/dfsan_thread.h" 27 #include "sanitizer_common/sanitizer_atomic.h" 28 #include "sanitizer_common/sanitizer_common.h" 29 #include "sanitizer_common/sanitizer_file.h" 30 #include "sanitizer_common/sanitizer_flag_parser.h" 31 #include "sanitizer_common/sanitizer_flags.h" 32 #include "sanitizer_common/sanitizer_internal_defs.h" 33 #include "sanitizer_common/sanitizer_libc.h" 34 #include "sanitizer_common/sanitizer_report_decorator.h" 35 #include "sanitizer_common/sanitizer_stacktrace.h" 36 37 using namespace __dfsan; 38 39 typedef atomic_uint16_t atomic_dfsan_label; 40 static const dfsan_label kInitializingLabel = -1; 41 42 static const uptr kNumLabels = 1 << (sizeof(dfsan_label) * 8); 43 44 static atomic_dfsan_label __dfsan_last_label; 45 static dfsan_label_info __dfsan_label_info[kNumLabels]; 46 47 Flags __dfsan::flags_data; 48 49 // The size of TLS variables. These constants must be kept in sync with the ones 50 // in DataFlowSanitizer.cpp. 51 static const int kDFsanArgTlsSize = 800; 52 static const int kDFsanRetvalTlsSize = 800; 53 static const int kDFsanArgOriginTlsSize = 800; 54 55 SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u64 56 __dfsan_retval_tls[kDFsanRetvalTlsSize / sizeof(u64)]; 57 SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u32 __dfsan_retval_origin_tls; 58 SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u64 59 __dfsan_arg_tls[kDFsanArgTlsSize / sizeof(u64)]; 60 SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u32 61 __dfsan_arg_origin_tls[kDFsanArgOriginTlsSize / sizeof(u32)]; 62 63 SANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask; 64 65 // Instrumented code may set this value in terms of -dfsan-track-origins. 66 // * undefined or 0: do not track origins. 67 // * 1: track origins at memory store operations. 68 // * 2: TODO: track origins at memory store operations and callsites. 69 extern "C" SANITIZER_WEAK_ATTRIBUTE const int __dfsan_track_origins; 70 71 int __dfsan_get_track_origins() { 72 return &__dfsan_track_origins ? __dfsan_track_origins : 0; 73 } 74 75 // On Linux/x86_64, memory is laid out as follows: 76 // 77 // +--------------------+ 0x800000000000 (top of memory) 78 // | application memory | 79 // +--------------------+ 0x700000008000 (kAppAddr) 80 // | | 81 // | unused | 82 // | | 83 // +--------------------+ 0x300200000000 (kUnusedAddr) 84 // | union table | 85 // +--------------------+ 0x300000000000 (kUnionTableAddr) 86 // | origin | 87 // +--------------------+ 0x200000000000 (kOriginAddr) 88 // | shadow memory | 89 // +--------------------+ 0x000000010000 (kShadowAddr) 90 // | reserved by kernel | 91 // +--------------------+ 0x000000000000 92 // 93 // To derive a shadow memory address from an application memory address, 94 // bits 44-46 are cleared to bring the address into the range 95 // [0x000000008000,0x100000000000). Then the address is shifted left by 1 to 96 // account for the double byte representation of shadow labels and move the 97 // address into the shadow memory range. See the function shadow_for below. 98 99 // On Linux/MIPS64, memory is laid out as follows: 100 // 101 // +--------------------+ 0x10000000000 (top of memory) 102 // | application memory | 103 // +--------------------+ 0xF000008000 (kAppAddr) 104 // | | 105 // | unused | 106 // | | 107 // +--------------------+ 0x2200000000 (kUnusedAddr) 108 // | union table | 109 // +--------------------+ 0x2000000000 (kUnionTableAddr) 110 // | shadow memory | 111 // +--------------------+ 0x0000010000 (kShadowAddr) 112 // | reserved by kernel | 113 // +--------------------+ 0x0000000000 114 115 // On Linux/AArch64 (39-bit VMA), memory is laid out as follow: 116 // 117 // +--------------------+ 0x8000000000 (top of memory) 118 // | application memory | 119 // +--------------------+ 0x7000008000 (kAppAddr) 120 // | | 121 // | unused | 122 // | | 123 // +--------------------+ 0x1200000000 (kUnusedAddr) 124 // | union table | 125 // +--------------------+ 0x1000000000 (kUnionTableAddr) 126 // | shadow memory | 127 // +--------------------+ 0x0000010000 (kShadowAddr) 128 // | reserved by kernel | 129 // +--------------------+ 0x0000000000 130 131 // On Linux/AArch64 (42-bit VMA), memory is laid out as follow: 132 // 133 // +--------------------+ 0x40000000000 (top of memory) 134 // | application memory | 135 // +--------------------+ 0x3ff00008000 (kAppAddr) 136 // | | 137 // | unused | 138 // | | 139 // +--------------------+ 0x1200000000 (kUnusedAddr) 140 // | union table | 141 // +--------------------+ 0x8000000000 (kUnionTableAddr) 142 // | shadow memory | 143 // +--------------------+ 0x0000010000 (kShadowAddr) 144 // | reserved by kernel | 145 // +--------------------+ 0x0000000000 146 147 // On Linux/AArch64 (48-bit VMA), memory is laid out as follow: 148 // 149 // +--------------------+ 0x1000000000000 (top of memory) 150 // | application memory | 151 // +--------------------+ 0xffff00008000 (kAppAddr) 152 // | unused | 153 // +--------------------+ 0xaaaab0000000 (top of PIE address) 154 // | application PIE | 155 // +--------------------+ 0xaaaaa0000000 (top of PIE address) 156 // | | 157 // | unused | 158 // | | 159 // +--------------------+ 0x1200000000 (kUnusedAddr) 160 // | union table | 161 // +--------------------+ 0x8000000000 (kUnionTableAddr) 162 // | shadow memory | 163 // +--------------------+ 0x0000010000 (kShadowAddr) 164 // | reserved by kernel | 165 // +--------------------+ 0x0000000000 166 167 typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels]; 168 169 #ifdef DFSAN_RUNTIME_VMA 170 // Runtime detected VMA size. 171 int __dfsan::vmaSize; 172 #endif 173 174 static uptr UnusedAddr() { 175 return UnionTableAddr() + sizeof(dfsan_union_table_t); 176 } 177 178 static atomic_dfsan_label *union_table(dfsan_label l1, dfsan_label l2) { 179 return &(*(dfsan_union_table_t *) UnionTableAddr())[l1][l2]; 180 } 181 182 // Checks we do not run out of labels. 183 static void dfsan_check_label(dfsan_label label) { 184 if (label == kInitializingLabel) { 185 Report("FATAL: DataFlowSanitizer: out of labels\n"); 186 Die(); 187 } 188 } 189 190 // Resolves the union of two unequal labels. Nonequality is a precondition for 191 // this function (the instrumentation pass inlines the equality test). 192 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 193 dfsan_label __dfsan_union(dfsan_label l1, dfsan_label l2) { 194 DCHECK_NE(l1, l2); 195 196 if (l1 == 0) 197 return l2; 198 if (l2 == 0) 199 return l1; 200 201 // If no labels have been created, yet l1 and l2 are non-zero, we are using 202 // fast16labels mode. 203 if (atomic_load(&__dfsan_last_label, memory_order_relaxed) == 0) 204 return l1 | l2; 205 206 if (l1 > l2) 207 Swap(l1, l2); 208 209 atomic_dfsan_label *table_ent = union_table(l1, l2); 210 // We need to deal with the case where two threads concurrently request 211 // a union of the same pair of labels. If the table entry is uninitialized, 212 // (i.e. 0) use a compare-exchange to set the entry to kInitializingLabel 213 // (i.e. -1) to mark that we are initializing it. 214 dfsan_label label = 0; 215 if (atomic_compare_exchange_strong(table_ent, &label, kInitializingLabel, 216 memory_order_acquire)) { 217 // Check whether l2 subsumes l1. We don't need to check whether l1 218 // subsumes l2 because we are guaranteed here that l1 < l2, and (at least 219 // in the cases we are interested in) a label may only subsume labels 220 // created earlier (i.e. with a lower numerical value). 221 if (__dfsan_label_info[l2].l1 == l1 || 222 __dfsan_label_info[l2].l2 == l1) { 223 label = l2; 224 } else { 225 label = 226 atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1; 227 dfsan_check_label(label); 228 __dfsan_label_info[label].l1 = l1; 229 __dfsan_label_info[label].l2 = l2; 230 } 231 atomic_store(table_ent, label, memory_order_release); 232 } else if (label == kInitializingLabel) { 233 // Another thread is initializing the entry. Wait until it is finished. 234 do { 235 internal_sched_yield(); 236 label = atomic_load(table_ent, memory_order_acquire); 237 } while (label == kInitializingLabel); 238 } 239 return label; 240 } 241 242 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 243 dfsan_label __dfsan_union_load(const dfsan_label *ls, uptr n) { 244 dfsan_label label = ls[0]; 245 for (uptr i = 1; i != n; ++i) { 246 dfsan_label next_label = ls[i]; 247 if (label != next_label) 248 label = __dfsan_union(label, next_label); 249 } 250 return label; 251 } 252 253 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 254 dfsan_label __dfsan_union_load_fast16labels(const dfsan_label *ls, uptr n) { 255 dfsan_label label = ls[0]; 256 for (uptr i = 1; i != n; ++i) 257 label |= ls[i]; 258 return label; 259 } 260 261 // Return the union of all the n labels from addr at the high 32 bit, and the 262 // origin of the first taint byte at the low 32 bit. 263 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u64 264 __dfsan_load_label_and_origin(const void *addr, uptr n) { 265 dfsan_label label = 0; 266 u64 ret = 0; 267 uptr p = (uptr)addr; 268 dfsan_label *s = shadow_for((void *)p); 269 for (uptr i = 0; i < n; ++i) { 270 dfsan_label l = s[i]; 271 if (!l) 272 continue; 273 label |= l; 274 if (!ret) 275 ret = *(dfsan_origin *)origin_for((void *)(p + i)); 276 } 277 return ret | (u64)label << 32; 278 } 279 280 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 281 void __dfsan_unimplemented(char *fname) { 282 if (flags().warn_unimplemented) 283 Report("WARNING: DataFlowSanitizer: call to uninstrumented function %s\n", 284 fname); 285 } 286 287 // Use '-mllvm -dfsan-debug-nonzero-labels' and break on this function 288 // to try to figure out where labels are being introduced in a nominally 289 // label-free program. 290 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_nonzero_label() { 291 if (flags().warn_nonzero_labels) 292 Report("WARNING: DataFlowSanitizer: saw nonzero label\n"); 293 } 294 295 // Indirect call to an uninstrumented vararg function. We don't have a way of 296 // handling these at the moment. 297 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void 298 __dfsan_vararg_wrapper(const char *fname) { 299 Report("FATAL: DataFlowSanitizer: unsupported indirect call to vararg " 300 "function %s\n", fname); 301 Die(); 302 } 303 304 // Like __dfsan_union, but for use from the client or custom functions. Hence 305 // the equality comparison is done here before calling __dfsan_union. 306 SANITIZER_INTERFACE_ATTRIBUTE dfsan_label 307 dfsan_union(dfsan_label l1, dfsan_label l2) { 308 if (l1 == l2) 309 return l1; 310 return __dfsan_union(l1, l2); 311 } 312 313 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 314 dfsan_label dfsan_create_label(const char *desc, void *userdata) { 315 dfsan_label label = 316 atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1; 317 dfsan_check_label(label); 318 __dfsan_label_info[label].l1 = __dfsan_label_info[label].l2 = 0; 319 __dfsan_label_info[label].desc = desc; 320 __dfsan_label_info[label].userdata = userdata; 321 return label; 322 } 323 324 // Return the origin of the first taint byte in the size bytes from the address 325 // addr. 326 static dfsan_origin GetOriginIfTainted(uptr addr, uptr size) { 327 for (uptr i = 0; i < size; ++i, ++addr) { 328 dfsan_label *s = shadow_for((void *)addr); 329 if (!is_shadow_addr_valid((uptr)s)) { 330 // The current DFSan memory layout is not always correct. For example, 331 // addresses (0, 0x10000) are mapped to (0, 0x10000). Before fixing the 332 // issue, we ignore such addresses. 333 continue; 334 } 335 if (*s) 336 return *(dfsan_origin *)origin_for((void *)addr); 337 } 338 return 0; 339 } 340 341 // For platforms which support slow unwinder only, we need to restrict the store 342 // context size to 1, basically only storing the current pc, because the slow 343 // unwinder which is based on libunwind is not async signal safe and causes 344 // random freezes in forking applications as well as in signal handlers. 345 // DFSan supports only Linux. So we do not restrict the store context size. 346 #define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \ 347 BufferedStackTrace stack; \ 348 stack.Unwind(pc, bp, nullptr, true, flags().store_context_size); 349 350 #define PRINT_CALLER_STACK_TRACE \ 351 { \ 352 GET_CALLER_PC_BP_SP; \ 353 (void)sp; \ 354 GET_STORE_STACK_TRACE_PC_BP(pc, bp) \ 355 stack.Print(); \ 356 } 357 358 // Return a chain with the previous ID id and the current stack. 359 // from_init = true if this is the first chain of an origin tracking path. 360 static u32 ChainOrigin(u32 id, StackTrace *stack, bool from_init = false) { 361 // StackDepot is not async signal safe. Do not create new chains in a signal 362 // handler. 363 DFsanThread *t = GetCurrentThread(); 364 if (t && t->InSignalHandler()) 365 return id; 366 367 // As an optimization the origin of an application byte is updated only when 368 // its shadow is non-zero. Because we are only interested in the origins of 369 // taint labels, it does not matter what origin a zero label has. This reduces 370 // memory write cost. MSan does similar optimization. The following invariant 371 // may not hold because of some bugs. We check the invariant to help debug. 372 if (!from_init && id == 0 && flags().check_origin_invariant) { 373 Printf(" DFSan found invalid origin invariant\n"); 374 PRINT_CALLER_STACK_TRACE 375 } 376 377 Origin o = Origin::FromRawId(id); 378 stack->tag = StackTrace::TAG_UNKNOWN; 379 Origin chained = Origin::CreateChainedOrigin(o, stack); 380 return chained.raw_id(); 381 } 382 383 static const uptr kOriginAlign = sizeof(dfsan_origin); 384 static const uptr kOriginAlignMask = ~(kOriginAlign - 1UL); 385 386 static uptr AlignUp(uptr u) { 387 return (u + kOriginAlign - 1) & kOriginAlignMask; 388 } 389 390 static uptr AlignDown(uptr u) { return u & kOriginAlignMask; } 391 392 static void ChainAndWriteOriginIfTainted(uptr src, uptr size, uptr dst, 393 StackTrace *stack) { 394 dfsan_origin o = GetOriginIfTainted(src, size); 395 if (o) { 396 o = ChainOrigin(o, stack); 397 *(dfsan_origin *)origin_for((void *)dst) = o; 398 } 399 } 400 401 // Copy the origins of the size bytes from src to dst. The source and target 402 // memory ranges cannot be overlapped. This is used by memcpy. stack records the 403 // stack trace of the memcpy. When dst and src are not 4-byte aligned properly, 404 // origins at the unaligned address boundaries may be overwritten because four 405 // contiguous bytes share the same origin. 406 static void CopyOrigin(const void *dst, const void *src, uptr size, 407 StackTrace *stack) { 408 uptr d = (uptr)dst; 409 uptr beg = AlignDown(d); 410 // Copy left unaligned origin if that memory is tainted. 411 if (beg < d) { 412 ChainAndWriteOriginIfTainted((uptr)src, beg + kOriginAlign - d, beg, stack); 413 beg += kOriginAlign; 414 } 415 416 uptr end = AlignDown(d + size); 417 // If both ends fall into the same 4-byte slot, we are done. 418 if (end < beg) 419 return; 420 421 // Copy right unaligned origin if that memory is tainted. 422 if (end < d + size) 423 ChainAndWriteOriginIfTainted((uptr)src + (end - d), (d + size) - end, end, 424 stack); 425 426 if (beg >= end) 427 return; 428 429 // Align src up. 430 uptr s = AlignUp((uptr)src); 431 dfsan_origin *src_o = (dfsan_origin *)origin_for((void *)s); 432 u64 *src_s = (u64 *)shadow_for((void *)s); 433 dfsan_origin *src_end = (dfsan_origin *)origin_for((void *)(s + (end - beg))); 434 dfsan_origin *dst_o = (dfsan_origin *)origin_for((void *)beg); 435 dfsan_origin last_src_o = 0; 436 dfsan_origin last_dst_o = 0; 437 for (; src_o < src_end; ++src_o, ++src_s, ++dst_o) { 438 if (!*src_s) 439 continue; 440 if (*src_o != last_src_o) { 441 last_src_o = *src_o; 442 last_dst_o = ChainOrigin(last_src_o, stack); 443 } 444 *dst_o = last_dst_o; 445 } 446 } 447 448 // Copy the origins of the size bytes from src to dst. The source and target 449 // memory ranges may be overlapped. So the copy is done in a reverse order. 450 // This is used by memmove. stack records the stack trace of the memmove. 451 static void ReverseCopyOrigin(const void *dst, const void *src, uptr size, 452 StackTrace *stack) { 453 uptr d = (uptr)dst; 454 uptr end = AlignDown(d + size); 455 456 // Copy right unaligned origin if that memory is tainted. 457 if (end < d + size) 458 ChainAndWriteOriginIfTainted((uptr)src + (end - d), (d + size) - end, end, 459 stack); 460 461 uptr beg = AlignDown(d); 462 463 if (beg + kOriginAlign < end) { 464 // Align src up. 465 uptr s = AlignUp((uptr)src); 466 dfsan_origin *src = 467 (dfsan_origin *)origin_for((void *)(s + end - beg - kOriginAlign)); 468 u64 *src_s = (u64 *)shadow_for((void *)(s + end - beg - kOriginAlign)); 469 dfsan_origin *src_begin = (dfsan_origin *)origin_for((void *)s); 470 dfsan_origin *dst = 471 (dfsan_origin *)origin_for((void *)(end - kOriginAlign)); 472 dfsan_origin src_o = 0; 473 dfsan_origin dst_o = 0; 474 for (; src >= src_begin; --src, --src_s, --dst) { 475 if (!*src_s) 476 continue; 477 if (*src != src_o) { 478 src_o = *src; 479 dst_o = ChainOrigin(src_o, stack); 480 } 481 *dst = dst_o; 482 } 483 } 484 485 // Copy left unaligned origin if that memory is tainted. 486 if (beg < d) 487 ChainAndWriteOriginIfTainted((uptr)src, beg + kOriginAlign - d, beg, stack); 488 } 489 490 // Copy or move the origins of the len bytes from src to dst. The source and 491 // target memory ranges may or may not be overlapped. This is used by memory 492 // transfer operations. stack records the stack trace of the memory transfer 493 // operation. 494 static void MoveOrigin(const void *dst, const void *src, uptr size, 495 StackTrace *stack) { 496 if (!has_valid_shadow_addr(dst) || 497 !has_valid_shadow_addr((void *)((uptr)dst + size)) || 498 !has_valid_shadow_addr(src) || 499 !has_valid_shadow_addr((void *)((uptr)src + size))) { 500 return; 501 } 502 // If destination origin range overlaps with source origin range, move 503 // origins by copying origins in a reverse order; otherwise, copy origins in 504 // a normal order. The orders of origin transfer are consistent with the 505 // orders of how memcpy and memmove transfer user data. 506 uptr src_aligned_beg = reinterpret_cast<uptr>(src) & ~3UL; 507 uptr src_aligned_end = (reinterpret_cast<uptr>(src) + size) & ~3UL; 508 uptr dst_aligned_beg = reinterpret_cast<uptr>(dst) & ~3UL; 509 if (dst_aligned_beg < src_aligned_end && dst_aligned_beg >= src_aligned_beg) 510 return ReverseCopyOrigin(dst, src, size, stack); 511 return CopyOrigin(dst, src, size, stack); 512 } 513 514 // Set the size bytes from the addres dst to be the origin value. 515 static void SetOrigin(const void *dst, uptr size, u32 origin) { 516 if (size == 0) 517 return; 518 519 // Origin mapping is 4 bytes per 4 bytes of application memory. 520 // Here we extend the range such that its left and right bounds are both 521 // 4 byte aligned. 522 uptr x = unaligned_origin_for((uptr)dst); 523 uptr beg = AlignDown(x); 524 uptr end = AlignUp(x + size); // align up. 525 u64 origin64 = ((u64)origin << 32) | origin; 526 // This is like memset, but the value is 32-bit. We unroll by 2 to write 527 // 64 bits at once. May want to unroll further to get 128-bit stores. 528 if (beg & 7ULL) { 529 if (*(u32 *)beg != origin) 530 *(u32 *)beg = origin; 531 beg += 4; 532 } 533 for (uptr addr = beg; addr < (end & ~7UL); addr += 8) { 534 if (*(u64 *)addr == origin64) 535 continue; 536 *(u64 *)addr = origin64; 537 } 538 if (end & 7ULL) 539 if (*(u32 *)(end - kOriginAlign) != origin) 540 *(u32 *)(end - kOriginAlign) = origin; 541 } 542 543 static void WriteShadowIfDifferent(dfsan_label label, uptr shadow_addr, 544 uptr size) { 545 dfsan_label *labelp = (dfsan_label *)shadow_addr; 546 for (; size != 0; --size, ++labelp) { 547 // Don't write the label if it is already the value we need it to be. 548 // In a program where most addresses are not labeled, it is common that 549 // a page of shadow memory is entirely zeroed. The Linux copy-on-write 550 // implementation will share all of the zeroed pages, making a copy of a 551 // page when any value is written. The un-sharing will happen even if 552 // the value written does not change the value in memory. Avoiding the 553 // write when both |label| and |*labelp| are zero dramatically reduces 554 // the amount of real memory used by large programs. 555 if (label == *labelp) 556 continue; 557 558 *labelp = label; 559 } 560 } 561 562 // Return a new origin chain with the previous ID id and the current stack 563 // trace. 564 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin 565 __dfsan_chain_origin(dfsan_origin id) { 566 GET_CALLER_PC_BP_SP; 567 (void)sp; 568 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 569 return ChainOrigin(id, &stack); 570 } 571 572 // Copy or move the origins of the len bytes from src to dst. 573 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_mem_origin_transfer( 574 const void *dst, const void *src, uptr len) { 575 if (src == dst) 576 return; 577 GET_CALLER_PC_BP; 578 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 579 MoveOrigin(dst, src, len, &stack); 580 } 581 582 SANITIZER_INTERFACE_ATTRIBUTE void dfsan_mem_origin_transfer(const void *dst, 583 const void *src, 584 uptr len) { 585 __dfsan_mem_origin_transfer(dst, src, len); 586 } 587 588 // If the label s is tainted, set the size bytes from the address p to be a new 589 // origin chain with the previous ID o and the current stack trace. This is 590 // used by instrumentation to reduce code size when too much code is inserted. 591 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_maybe_store_origin( 592 u16 s, void *p, uptr size, dfsan_origin o) { 593 if (UNLIKELY(s)) { 594 GET_CALLER_PC_BP_SP; 595 (void)sp; 596 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 597 SetOrigin(p, size, ChainOrigin(o, &stack)); 598 } 599 } 600 601 // Releases the pages within the origin address range, and sets the origin 602 // addresses not on the pages to be 0. 603 static void ReleaseOrClearOrigins(void *addr, uptr size) { 604 const uptr beg_origin_addr = (uptr)__dfsan::origin_for(addr); 605 const void *end_addr = (void *)((uptr)addr + size); 606 const uptr end_origin_addr = (uptr)__dfsan::origin_for(end_addr); 607 const uptr page_size = GetPageSizeCached(); 608 const uptr beg_aligned = RoundUpTo(beg_origin_addr, page_size); 609 const uptr end_aligned = RoundDownTo(end_origin_addr, page_size); 610 611 // dfsan_set_label can be called from the following cases 612 // 1) mapped ranges by new/delete and malloc/free. This case has origin memory 613 // size > 50k, and happens less frequently. 614 // 2) zero-filling internal data structures by utility libraries. This case 615 // has origin memory size < 16k, and happens more often. 616 // Set kNumPagesThreshold to be 4 to avoid releasing small pages. 617 const int kNumPagesThreshold = 4; 618 if (beg_aligned + kNumPagesThreshold * page_size >= end_aligned) 619 return; 620 621 ReleaseMemoryPagesToOS(beg_aligned, end_aligned); 622 } 623 624 void SetShadow(dfsan_label label, void *addr, uptr size, dfsan_origin origin) { 625 const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr); 626 627 if (0 != label) { 628 WriteShadowIfDifferent(label, beg_shadow_addr, size); 629 if (__dfsan_get_track_origins()) 630 SetOrigin(addr, size, origin); 631 return; 632 } 633 634 if (__dfsan_get_track_origins()) 635 ReleaseOrClearOrigins(addr, size); 636 637 // If label is 0, releases the pages within the shadow address range, and sets 638 // the shadow addresses not on the pages to be 0. 639 const void *end_addr = (void *)((uptr)addr + size); 640 const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr); 641 const uptr page_size = GetPageSizeCached(); 642 const uptr beg_aligned = RoundUpTo(beg_shadow_addr, page_size); 643 const uptr end_aligned = RoundDownTo(end_shadow_addr, page_size); 644 645 // dfsan_set_label can be called from the following cases 646 // 1) mapped ranges by new/delete and malloc/free. This case has shadow memory 647 // size > 100k, and happens less frequently. 648 // 2) zero-filling internal data structures by utility libraries. This case 649 // has shadow memory size < 32k, and happens more often. 650 // Set kNumPagesThreshold to be 8 to avoid releasing small pages. 651 const int kNumPagesThreshold = 8; 652 if (beg_aligned + kNumPagesThreshold * page_size >= end_aligned) 653 return WriteShadowIfDifferent(label, beg_shadow_addr, size); 654 655 WriteShadowIfDifferent(label, beg_shadow_addr, beg_aligned - beg_shadow_addr); 656 ReleaseMemoryPagesToOS(beg_aligned, end_aligned); 657 WriteShadowIfDifferent(label, end_aligned, end_shadow_addr - end_aligned); 658 } 659 660 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_set_label( 661 dfsan_label label, dfsan_origin origin, void *addr, uptr size) { 662 SetShadow(label, addr, size, origin); 663 } 664 665 SANITIZER_INTERFACE_ATTRIBUTE 666 void dfsan_set_label(dfsan_label label, void *addr, uptr size) { 667 dfsan_origin init_origin = 0; 668 if (label && __dfsan_get_track_origins()) { 669 GET_CALLER_PC_BP; 670 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 671 init_origin = ChainOrigin(0, &stack, true); 672 } 673 SetShadow(label, addr, size, init_origin); 674 } 675 676 SANITIZER_INTERFACE_ATTRIBUTE 677 void dfsan_add_label(dfsan_label label, void *addr, uptr size) { 678 if (0 == label) 679 return; 680 681 if (__dfsan_get_track_origins()) { 682 GET_CALLER_PC_BP; 683 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 684 dfsan_origin init_origin = ChainOrigin(0, &stack, true); 685 SetOrigin(addr, size, init_origin); 686 } 687 688 for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp) 689 if (*labelp != label) 690 *labelp = __dfsan_union(*labelp, label); 691 } 692 693 // Unlike the other dfsan interface functions the behavior of this function 694 // depends on the label of one of its arguments. Hence it is implemented as a 695 // custom function. 696 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label 697 __dfsw_dfsan_get_label(long data, dfsan_label data_label, 698 dfsan_label *ret_label) { 699 *ret_label = 0; 700 return data_label; 701 } 702 703 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label __dfso_dfsan_get_label( 704 long data, dfsan_label data_label, dfsan_label *ret_label, 705 dfsan_origin data_origin, dfsan_origin *ret_origin) { 706 *ret_label = 0; 707 *ret_origin = 0; 708 return data_label; 709 } 710 711 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin __dfso_dfsan_get_origin( 712 long data, dfsan_label data_label, dfsan_label *ret_label, 713 dfsan_origin data_origin, dfsan_origin *ret_origin) { 714 *ret_label = 0; 715 *ret_origin = 0; 716 return data_origin; 717 } 718 719 SANITIZER_INTERFACE_ATTRIBUTE dfsan_label 720 dfsan_read_label(const void *addr, uptr size) { 721 if (size == 0) 722 return 0; 723 return __dfsan_union_load(shadow_for(addr), size); 724 } 725 726 SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin 727 dfsan_read_origin_of_first_taint(const void *addr, uptr size) { 728 return GetOriginIfTainted((uptr)addr, size); 729 } 730 731 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 732 const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label) { 733 return &__dfsan_label_info[label]; 734 } 735 736 extern "C" SANITIZER_INTERFACE_ATTRIBUTE int 737 dfsan_has_label(dfsan_label label, dfsan_label elem) { 738 if (label == elem) 739 return true; 740 const dfsan_label_info *info = dfsan_get_label_info(label); 741 if (info->l1 != 0) { 742 return dfsan_has_label(info->l1, elem) || dfsan_has_label(info->l2, elem); 743 } else { 744 return false; 745 } 746 } 747 748 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label 749 dfsan_has_label_with_desc(dfsan_label label, const char *desc) { 750 const dfsan_label_info *info = dfsan_get_label_info(label); 751 if (info->l1 != 0) { 752 return dfsan_has_label_with_desc(info->l1, desc) || 753 dfsan_has_label_with_desc(info->l2, desc); 754 } else { 755 return internal_strcmp(desc, info->desc) == 0; 756 } 757 } 758 759 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr 760 dfsan_get_label_count(void) { 761 dfsan_label max_label_allocated = 762 atomic_load(&__dfsan_last_label, memory_order_relaxed); 763 764 return static_cast<uptr>(max_label_allocated); 765 } 766 767 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void 768 dfsan_dump_labels(int fd) { 769 dfsan_label last_label = 770 atomic_load(&__dfsan_last_label, memory_order_relaxed); 771 for (uptr l = 1; l <= last_label; ++l) { 772 char buf[64]; 773 internal_snprintf(buf, sizeof(buf), "%u %u %u ", l, 774 __dfsan_label_info[l].l1, __dfsan_label_info[l].l2); 775 WriteToFile(fd, buf, internal_strlen(buf)); 776 if (__dfsan_label_info[l].l1 == 0 && __dfsan_label_info[l].desc) { 777 WriteToFile(fd, __dfsan_label_info[l].desc, 778 internal_strlen(__dfsan_label_info[l].desc)); 779 } 780 WriteToFile(fd, "\n", 1); 781 } 782 } 783 784 class Decorator : public __sanitizer::SanitizerCommonDecorator { 785 public: 786 Decorator() : SanitizerCommonDecorator() {} 787 const char *Origin() const { return Magenta(); } 788 }; 789 790 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_print_origin_trace( 791 const void *addr, const char *description) { 792 Decorator d; 793 794 if (!__dfsan_get_track_origins()) { 795 Printf( 796 " %sDFSan: origin tracking is not enabled. Did you specify the " 797 "-dfsan-track-origins=1 option?%s\n", 798 d.Warning(), d.Default()); 799 return; 800 } 801 802 const dfsan_label label = *__dfsan::shadow_for(addr); 803 if (!label) { 804 Printf(" %sDFSan: no tainted value at %x%s\n", d.Warning(), addr, 805 d.Default()); 806 return; 807 } 808 809 const dfsan_origin origin = *__dfsan::origin_for(addr); 810 811 Printf(" %sTaint value 0x%x (at %p) origin tracking (%s)%s\n", d.Origin(), 812 label, addr, description ? description : "", d.Default()); 813 Origin o = Origin::FromRawId(origin); 814 bool found = false; 815 while (o.isChainedOrigin()) { 816 StackTrace stack; 817 dfsan_origin origin_id = o.raw_id(); 818 o = o.getNextChainedOrigin(&stack); 819 if (o.isChainedOrigin()) 820 Printf(" %sOrigin value: 0x%x, Taint value was stored to memory at%s\n", 821 d.Origin(), origin_id, d.Default()); 822 else 823 Printf(" %sOrigin value: 0x%x, Taint value was created at%s\n", 824 d.Origin(), origin_id, d.Default()); 825 stack.Print(); 826 found = true; 827 } 828 if (!found) 829 Printf( 830 " %sTaint value 0x%x (at %p) has invalid origin tracking. This can " 831 "be a DFSan bug.%s\n", 832 d.Warning(), label, addr, d.Default()); 833 } 834 835 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin 836 dfsan_get_init_origin(const void *addr) { 837 if (!__dfsan_get_track_origins()) 838 return 0; 839 840 const dfsan_label label = *__dfsan::shadow_for(addr); 841 if (!label) 842 return 0; 843 844 const dfsan_origin origin = *__dfsan::origin_for(addr); 845 846 Origin o = Origin::FromRawId(origin); 847 dfsan_origin origin_id = o.raw_id(); 848 while (o.isChainedOrigin()) { 849 StackTrace stack; 850 o = o.getNextChainedOrigin(&stack); 851 } 852 return origin_id; 853 } 854 855 #define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \ 856 BufferedStackTrace stack; \ 857 stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal); 858 859 void __sanitizer::BufferedStackTrace::UnwindImpl(uptr pc, uptr bp, 860 void *context, 861 bool request_fast, 862 u32 max_depth) { 863 using namespace __dfsan; 864 DFsanThread *t = GetCurrentThread(); 865 if (!t || !StackTrace::WillUseFastUnwind(request_fast)) { 866 return Unwind(max_depth, pc, bp, context, 0, 0, false); 867 } 868 Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(), true); 869 } 870 871 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_print_stack_trace() { 872 GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()); 873 stack.Print(); 874 } 875 876 void Flags::SetDefaults() { 877 #define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; 878 #include "dfsan_flags.inc" 879 #undef DFSAN_FLAG 880 } 881 882 static void RegisterDfsanFlags(FlagParser *parser, Flags *f) { 883 #define DFSAN_FLAG(Type, Name, DefaultValue, Description) \ 884 RegisterFlag(parser, #Name, Description, &f->Name); 885 #include "dfsan_flags.inc" 886 #undef DFSAN_FLAG 887 } 888 889 static void InitializeFlags() { 890 SetCommonFlagsDefaults(); 891 flags().SetDefaults(); 892 893 FlagParser parser; 894 RegisterCommonFlags(&parser); 895 RegisterDfsanFlags(&parser, &flags()); 896 parser.ParseStringFromEnv("DFSAN_OPTIONS"); 897 InitializeCommonFlags(); 898 if (Verbosity()) ReportUnrecognizedFlags(); 899 if (common_flags()->help) parser.PrintFlagDescriptions(); 900 } 901 902 SANITIZER_INTERFACE_ATTRIBUTE 903 void dfsan_clear_arg_tls(uptr offset, uptr size) { 904 internal_memset((void *)((uptr)__dfsan_arg_tls + offset), 0, size); 905 } 906 907 SANITIZER_INTERFACE_ATTRIBUTE 908 void dfsan_clear_thread_local_state() { 909 internal_memset(__dfsan_arg_tls, 0, sizeof(__dfsan_arg_tls)); 910 internal_memset(__dfsan_retval_tls, 0, sizeof(__dfsan_retval_tls)); 911 912 if (__dfsan_get_track_origins()) { 913 internal_memset(__dfsan_arg_origin_tls, 0, sizeof(__dfsan_arg_origin_tls)); 914 internal_memset(&__dfsan_retval_origin_tls, 0, 915 sizeof(__dfsan_retval_origin_tls)); 916 } 917 } 918 919 static void InitializePlatformEarly() { 920 AvoidCVE_2016_2143(); 921 #ifdef DFSAN_RUNTIME_VMA 922 __dfsan::vmaSize = 923 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); 924 if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42 || 925 __dfsan::vmaSize == 48) { 926 __dfsan_shadow_ptr_mask = ShadowMask(); 927 } else { 928 Printf("FATAL: DataFlowSanitizer: unsupported VMA range\n"); 929 Printf("FATAL: Found %d - Supported 39, 42, and 48\n", __dfsan::vmaSize); 930 Die(); 931 } 932 #endif 933 } 934 935 static void dfsan_fini() { 936 if (internal_strcmp(flags().dump_labels_at_exit, "") != 0) { 937 fd_t fd = OpenFile(flags().dump_labels_at_exit, WrOnly); 938 if (fd == kInvalidFd) { 939 Report("WARNING: DataFlowSanitizer: unable to open output file %s\n", 940 flags().dump_labels_at_exit); 941 return; 942 } 943 944 Report("INFO: DataFlowSanitizer: dumping labels to %s\n", 945 flags().dump_labels_at_exit); 946 dfsan_dump_labels(fd); 947 CloseFile(fd); 948 } 949 } 950 951 extern "C" void dfsan_flush() { 952 if (!MmapFixedSuperNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr())) 953 Die(); 954 } 955 956 static void dfsan_init(int argc, char **argv, char **envp) { 957 InitializeFlags(); 958 959 ::InitializePlatformEarly(); 960 961 dfsan_flush(); 962 if (common_flags()->use_madv_dontdump) 963 DontDumpShadowMemory(ShadowAddr(), UnusedAddr() - ShadowAddr()); 964 965 // Protect the region of memory we don't use, to preserve the one-to-one 966 // mapping from application to shadow memory. But if ASLR is disabled, Linux 967 // will load our executable in the middle of our unused region. This mostly 968 // works so long as the program doesn't use too much memory. We support this 969 // case by disabling memory protection when ASLR is disabled. 970 uptr init_addr = (uptr)&dfsan_init; 971 if (!(init_addr >= UnusedAddr() && init_addr < AppAddr())) 972 MmapFixedNoAccess(UnusedAddr(), AppAddr() - UnusedAddr()); 973 974 InitializeInterceptors(); 975 976 // Register the fini callback to run when the program terminates successfully 977 // or it is killed by the runtime. 978 Atexit(dfsan_fini); 979 AddDieCallback(dfsan_fini); 980 981 // Set up threads 982 DFsanTSDInit(DFsanTSDDtor); 983 DFsanThread *main_thread = DFsanThread::Create(nullptr, nullptr, nullptr); 984 SetCurrentThread(main_thread); 985 main_thread->ThreadStart(); 986 987 __dfsan_label_info[kInitializingLabel].desc = "<init label>"; 988 } 989 990 #if SANITIZER_CAN_USE_PREINIT_ARRAY 991 __attribute__((section(".preinit_array"), used)) 992 static void (*dfsan_init_ptr)(int, char **, char **) = dfsan_init; 993 #endif 994