1 //===-- memprof_rtl.cpp --------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of MemProfiler, a memory profiler. 10 // 11 // Main file of the MemProf run-time library. 12 //===----------------------------------------------------------------------===// 13 14 #include "memprof_allocator.h" 15 #include "memprof_interceptors.h" 16 #include "memprof_interface_internal.h" 17 #include "memprof_internal.h" 18 #include "memprof_mapping.h" 19 #include "memprof_stack.h" 20 #include "memprof_stats.h" 21 #include "memprof_thread.h" 22 #include "sanitizer_common/sanitizer_atomic.h" 23 #include "sanitizer_common/sanitizer_flags.h" 24 #include "sanitizer_common/sanitizer_interface_internal.h" 25 #include "sanitizer_common/sanitizer_libc.h" 26 #include "sanitizer_common/sanitizer_symbolizer.h" 27 28 #include <time.h> 29 30 uptr __memprof_shadow_memory_dynamic_address; // Global interface symbol. 31 32 // Allow the user to specify a profile output file via the binary. 33 SANITIZER_WEAK_ATTRIBUTE char __memprof_profile_filename[1]; 34 35 namespace __memprof { 36 37 static void MemprofDie() { 38 static atomic_uint32_t num_calls; 39 if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) { 40 // Don't die twice - run a busy loop. 41 while (1) { 42 internal_sched_yield(); 43 } 44 } 45 if (common_flags()->print_module_map >= 1) 46 DumpProcessMap(); 47 if (flags()->unmap_shadow_on_exit) { 48 if (kHighShadowEnd) 49 UnmapOrDie((void *)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); 50 } 51 } 52 53 static void CheckUnwind() { 54 GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check); 55 stack.Print(); 56 } 57 58 // -------------------------- Globals --------------------- {{{1 59 int memprof_inited; 60 int memprof_init_done; 61 bool memprof_init_is_running; 62 int memprof_timestamp_inited; 63 long memprof_init_timestamp_s; 64 65 uptr kHighMemEnd; 66 67 // -------------------------- Run-time entry ------------------- {{{1 68 // exported functions 69 70 #define MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() __memprof::RecordAccess(addr); 71 72 #define MEMPROF_MEMORY_ACCESS_CALLBACK(type) \ 73 extern "C" NOINLINE INTERFACE_ATTRIBUTE void __memprof_##type(uptr addr) { \ 74 MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() \ 75 } 76 77 MEMPROF_MEMORY_ACCESS_CALLBACK(load) 78 MEMPROF_MEMORY_ACCESS_CALLBACK(store) 79 80 // Force the linker to keep the symbols for various MemProf interface 81 // functions. We want to keep those in the executable in order to let the 82 // instrumented dynamic libraries access the symbol even if it is not used by 83 // the executable itself. This should help if the build system is removing dead 84 // code at link time. 85 static NOINLINE void force_interface_symbols() { 86 volatile int fake_condition = 0; // prevent dead condition elimination. 87 // clang-format off 88 switch (fake_condition) { 89 case 1: __memprof_record_access(nullptr); break; 90 case 2: __memprof_record_access_range(nullptr, 0); break; 91 } 92 // clang-format on 93 } 94 95 static void memprof_atexit() { 96 Printf("MemProfiler exit stats:\n"); 97 __memprof_print_accumulated_stats(); 98 } 99 100 static void InitializeHighMemEnd() { 101 kHighMemEnd = GetMaxUserVirtualAddress(); 102 // Increase kHighMemEnd to make sure it's properly 103 // aligned together with kHighMemBeg: 104 kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1; 105 } 106 107 void PrintAddressSpaceLayout() { 108 if (kHighMemBeg) { 109 Printf("|| `[%p, %p]` || HighMem ||\n", (void *)kHighMemBeg, 110 (void *)kHighMemEnd); 111 Printf("|| `[%p, %p]` || HighShadow ||\n", (void *)kHighShadowBeg, 112 (void *)kHighShadowEnd); 113 } 114 Printf("|| `[%p, %p]` || ShadowGap ||\n", (void *)kShadowGapBeg, 115 (void *)kShadowGapEnd); 116 if (kLowShadowBeg) { 117 Printf("|| `[%p, %p]` || LowShadow ||\n", (void *)kLowShadowBeg, 118 (void *)kLowShadowEnd); 119 Printf("|| `[%p, %p]` || LowMem ||\n", (void *)kLowMemBeg, 120 (void *)kLowMemEnd); 121 } 122 Printf("MemToShadow(shadow): %p %p", (void *)MEM_TO_SHADOW(kLowShadowBeg), 123 (void *)MEM_TO_SHADOW(kLowShadowEnd)); 124 if (kHighMemBeg) { 125 Printf(" %p %p", (void *)MEM_TO_SHADOW(kHighShadowBeg), 126 (void *)MEM_TO_SHADOW(kHighShadowEnd)); 127 } 128 Printf("\n"); 129 Printf("malloc_context_size=%zu\n", 130 (uptr)common_flags()->malloc_context_size); 131 132 Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE); 133 Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY); 134 Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET); 135 CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); 136 } 137 138 static void MemprofInitInternal() { 139 if (LIKELY(memprof_inited)) 140 return; 141 SanitizerToolName = "MemProfiler"; 142 CHECK(!memprof_init_is_running && "MemProf init calls itself!"); 143 memprof_init_is_running = true; 144 145 CacheBinaryName(); 146 147 // Initialize flags. This must be done early, because most of the 148 // initialization steps look at flags(). 149 InitializeFlags(); 150 151 AvoidCVE_2016_2143(); 152 153 SetMallocContextSize(common_flags()->malloc_context_size); 154 155 InitializeHighMemEnd(); 156 157 // Make sure we are not statically linked. 158 MemprofDoesNotSupportStaticLinkage(); 159 160 // Install tool-specific callbacks in sanitizer_common. 161 AddDieCallback(MemprofDie); 162 SetCheckUnwindCallback(CheckUnwind); 163 164 // Use profile name specified via the binary itself if it exists, and hasn't 165 // been overrriden by a flag at runtime. 166 if (__memprof_profile_filename[0] != 0 && !common_flags()->log_path) 167 __sanitizer_set_report_path(__memprof_profile_filename); 168 else 169 __sanitizer_set_report_path(common_flags()->log_path); 170 171 __sanitizer::InitializePlatformEarly(); 172 173 // Re-exec ourselves if we need to set additional env or command line args. 174 MaybeReexec(); 175 176 // Setup internal allocator callback. 177 SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY); 178 179 InitializeMemprofInterceptors(); 180 CheckASLR(); 181 182 ReplaceSystemMalloc(); 183 184 DisableCoreDumperIfNecessary(); 185 186 InitializeShadowMemory(); 187 188 TSDInit(PlatformTSDDtor); 189 190 InitializeAllocator(); 191 192 // On Linux MemprofThread::ThreadStart() calls malloc() that's why 193 // memprof_inited should be set to 1 prior to initializing the threads. 194 memprof_inited = 1; 195 memprof_init_is_running = false; 196 197 if (flags()->atexit) 198 Atexit(memprof_atexit); 199 200 InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); 201 202 // interceptors 203 InitTlsSize(); 204 205 // Create main thread. 206 MemprofThread *main_thread = CreateMainThread(); 207 CHECK_EQ(0, main_thread->tid()); 208 force_interface_symbols(); // no-op. 209 SanitizerInitializeUnwinder(); 210 211 Symbolizer::LateInitialize(); 212 213 VReport(1, "MemProfiler Init done\n"); 214 215 memprof_init_done = 1; 216 } 217 218 void MemprofInitTime() { 219 if (LIKELY(memprof_timestamp_inited)) 220 return; 221 timespec ts; 222 clock_gettime(CLOCK_REALTIME, &ts); 223 memprof_init_timestamp_s = ts.tv_sec; 224 memprof_timestamp_inited = 1; 225 } 226 227 // Initialize as requested from some part of MemProf runtime library 228 // (interceptors, allocator, etc). 229 void MemprofInitFromRtl() { MemprofInitInternal(); } 230 231 #if MEMPROF_DYNAMIC 232 // Initialize runtime in case it's LD_PRELOAD-ed into uninstrumented executable 233 // (and thus normal initializers from .preinit_array or modules haven't run). 234 235 class MemprofInitializer { 236 public: 237 MemprofInitializer() { MemprofInitFromRtl(); } 238 }; 239 240 static MemprofInitializer memprof_initializer; 241 #endif // MEMPROF_DYNAMIC 242 243 } // namespace __memprof 244 245 // ---------------------- Interface ---------------- {{{1 246 using namespace __memprof; 247 248 // Initialize as requested from instrumented application code. 249 void __memprof_init() { 250 MemprofInitTime(); 251 MemprofInitInternal(); 252 } 253 254 void __memprof_preinit() { MemprofInitInternal(); } 255 256 void __memprof_version_mismatch_check_v1() {} 257 258 void __memprof_record_access(void const volatile *addr) { 259 __memprof::RecordAccess((uptr)addr); 260 } 261 262 void __memprof_record_access_range(void const volatile *addr, uptr size) { 263 for (uptr a = (uptr)addr; a < (uptr)addr + size; a += kWordSize) 264 __memprof::RecordAccess(a); 265 } 266 267 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u16 268 __sanitizer_unaligned_load16(const uu16 *p) { 269 __memprof_record_access(p); 270 return *p; 271 } 272 273 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u32 274 __sanitizer_unaligned_load32(const uu32 *p) { 275 __memprof_record_access(p); 276 return *p; 277 } 278 279 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u64 280 __sanitizer_unaligned_load64(const uu64 *p) { 281 __memprof_record_access(p); 282 return *p; 283 } 284 285 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void 286 __sanitizer_unaligned_store16(uu16 *p, u16 x) { 287 __memprof_record_access(p); 288 *p = x; 289 } 290 291 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void 292 __sanitizer_unaligned_store32(uu32 *p, u32 x) { 293 __memprof_record_access(p); 294 *p = x; 295 } 296 297 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void 298 __sanitizer_unaligned_store64(uu64 *p, u64 x) { 299 __memprof_record_access(p); 300 *p = x; 301 } 302