1 //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// Scudo Hardened Allocator implementation.
10 /// It uses the sanitizer_common allocator as a base and aims at mitigating
11 /// heap corruption vulnerabilities. It provides a checksum-guarded chunk
12 /// header, a delayed free list, and additional sanity checks.
13 ///
14 //===----------------------------------------------------------------------===//
15
16 #include "scudo_allocator.h"
17 #include "scudo_crc32.h"
18 #include "scudo_errors.h"
19 #include "scudo_flags.h"
20 #include "scudo_interface_internal.h"
21 #include "scudo_tsd.h"
22 #include "scudo_utils.h"
23
24 #include "sanitizer_common/sanitizer_allocator_checks.h"
25 #include "sanitizer_common/sanitizer_allocator_interface.h"
26 #include "sanitizer_common/sanitizer_quarantine.h"
27
28 #ifdef GWP_ASAN_HOOKS
29 # include "gwp_asan/guarded_pool_allocator.h"
30 # include "gwp_asan/optional/backtrace.h"
31 # include "gwp_asan/optional/options_parser.h"
32 #include "gwp_asan/optional/segv_handler.h"
33 #endif // GWP_ASAN_HOOKS
34
35 #include <errno.h>
36 #include <string.h>
37
38 namespace __scudo {
39
40 // Global static cookie, initialized at start-up.
41 static u32 Cookie;
42
43 // We default to software CRC32 if the alternatives are not supported, either
44 // at compilation or at runtime.
45 static atomic_uint8_t HashAlgorithm = { CRC32Software };
46
computeCRC32(u32 Crc,uptr Value,uptr * Array,uptr ArraySize)47 inline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
48 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
49 // as opposed to only for scudo_crc32.cpp. This means that other hardware
50 // specific instructions were likely emitted at other places, and as a
51 // result there is no reason to not use it here.
52 #if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
53 Crc = CRC32_INTRINSIC(Crc, Value);
54 for (uptr i = 0; i < ArraySize; i++)
55 Crc = CRC32_INTRINSIC(Crc, Array[i]);
56 return Crc;
57 #else
58 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
59 Crc = computeHardwareCRC32(Crc, Value);
60 for (uptr i = 0; i < ArraySize; i++)
61 Crc = computeHardwareCRC32(Crc, Array[i]);
62 return Crc;
63 }
64 Crc = computeSoftwareCRC32(Crc, Value);
65 for (uptr i = 0; i < ArraySize; i++)
66 Crc = computeSoftwareCRC32(Crc, Array[i]);
67 return Crc;
68 #endif // defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
69 }
70
71 static BackendT &getBackend();
72
73 namespace Chunk {
getAtomicHeader(void * Ptr)74 static inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
75 return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
76 getHeaderSize());
77 }
78 static inline
getConstAtomicHeader(const void * Ptr)79 const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
80 return reinterpret_cast<const AtomicPackedHeader *>(
81 reinterpret_cast<uptr>(Ptr) - getHeaderSize());
82 }
83
isAligned(const void * Ptr)84 static inline bool isAligned(const void *Ptr) {
85 return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
86 }
87
88 // We can't use the offset member of the chunk itself, as we would double
89 // fetch it without any warranty that it wouldn't have been tampered. To
90 // prevent this, we work with a local copy of the header.
getBackendPtr(const void * Ptr,UnpackedHeader * Header)91 static inline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
92 return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
93 getHeaderSize() - (Header->Offset << MinAlignmentLog));
94 }
95
96 // Returns the usable size for a chunk, meaning the amount of bytes from the
97 // beginning of the user data to the end of the backend allocated chunk.
getUsableSize(const void * Ptr,UnpackedHeader * Header)98 static inline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
99 const uptr ClassId = Header->ClassId;
100 if (ClassId)
101 return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
102 (Header->Offset << MinAlignmentLog);
103 return SecondaryT::GetActuallyAllocatedSize(
104 getBackendPtr(Ptr, Header)) - getHeaderSize();
105 }
106
107 // Returns the size the user requested when allocating the chunk.
getSize(const void * Ptr,UnpackedHeader * Header)108 static inline uptr getSize(const void *Ptr, UnpackedHeader *Header) {
109 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
110 if (Header->ClassId)
111 return SizeOrUnusedBytes;
112 return SecondaryT::GetActuallyAllocatedSize(
113 getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
114 }
115
116 // Compute the checksum of the chunk pointer and its header.
computeChecksum(const void * Ptr,UnpackedHeader * Header)117 static inline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
118 UnpackedHeader ZeroChecksumHeader = *Header;
119 ZeroChecksumHeader.Checksum = 0;
120 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
121 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
122 const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
123 HeaderHolder, ARRAY_SIZE(HeaderHolder));
124 return static_cast<u16>(Crc);
125 }
126
127 // Checks the validity of a chunk by verifying its checksum. It doesn't
128 // incur termination in the event of an invalid chunk.
isValid(const void * Ptr)129 static inline bool isValid(const void *Ptr) {
130 PackedHeader NewPackedHeader =
131 atomic_load_relaxed(getConstAtomicHeader(Ptr));
132 UnpackedHeader NewUnpackedHeader =
133 bit_cast<UnpackedHeader>(NewPackedHeader);
134 return (NewUnpackedHeader.Checksum ==
135 computeChecksum(Ptr, &NewUnpackedHeader));
136 }
137
138 // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid
139 // for a fully nulled out header, its state will be available anyway.
140 COMPILER_CHECK(ChunkAvailable == 0);
141
142 // Loads and unpacks the header, verifying the checksum in the process.
143 static inline
loadHeader(const void * Ptr,UnpackedHeader * NewUnpackedHeader)144 void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
145 PackedHeader NewPackedHeader =
146 atomic_load_relaxed(getConstAtomicHeader(Ptr));
147 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
148 if (UNLIKELY(NewUnpackedHeader->Checksum !=
149 computeChecksum(Ptr, NewUnpackedHeader)))
150 dieWithMessage("corrupted chunk header at address %p\n", Ptr);
151 }
152
153 // Packs and stores the header, computing the checksum in the process.
storeHeader(void * Ptr,UnpackedHeader * NewUnpackedHeader)154 static inline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
155 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
156 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
157 atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
158 }
159
160 // Packs and stores the header, computing the checksum in the process. We
161 // compare the current header with the expected provided one to ensure that
162 // we are not being raced by a corruption occurring in another thread.
compareExchangeHeader(void * Ptr,UnpackedHeader * NewUnpackedHeader,UnpackedHeader * OldUnpackedHeader)163 static inline void compareExchangeHeader(void *Ptr,
164 UnpackedHeader *NewUnpackedHeader,
165 UnpackedHeader *OldUnpackedHeader) {
166 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
167 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
168 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
169 if (UNLIKELY(!atomic_compare_exchange_strong(
170 getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
171 memory_order_relaxed)))
172 dieWithMessage("race on chunk header at address %p\n", Ptr);
173 }
174 } // namespace Chunk
175
176 struct QuarantineCallback {
QuarantineCallback__scudo::QuarantineCallback177 explicit QuarantineCallback(AllocatorCacheT *Cache)
178 : Cache_(Cache) {}
179
180 // Chunk recycling function, returns a quarantined chunk to the backend,
181 // first making sure it hasn't been tampered with.
Recycle__scudo::QuarantineCallback182 void Recycle(void *Ptr) {
183 UnpackedHeader Header;
184 Chunk::loadHeader(Ptr, &Header);
185 if (UNLIKELY(Header.State != ChunkQuarantine))
186 dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
187 UnpackedHeader NewHeader = Header;
188 NewHeader.State = ChunkAvailable;
189 Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header);
190 void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
191 if (Header.ClassId)
192 getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
193 else
194 getBackend().deallocateSecondary(BackendPtr);
195 }
196
197 // Internal quarantine allocation and deallocation functions. We first check
198 // that the batches are indeed serviced by the Primary.
199 // TODO(kostyak): figure out the best way to protect the batches.
Allocate__scudo::QuarantineCallback200 void *Allocate(uptr Size) {
201 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
202 return getBackend().allocatePrimary(Cache_, BatchClassId);
203 }
204
Deallocate__scudo::QuarantineCallback205 void Deallocate(void *Ptr) {
206 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
207 getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
208 }
209
210 AllocatorCacheT *Cache_;
211 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
212 };
213
214 typedef Quarantine<QuarantineCallback, void> QuarantineT;
215 typedef QuarantineT::Cache QuarantineCacheT;
216 COMPILER_CHECK(sizeof(QuarantineCacheT) <=
217 sizeof(ScudoTSD::QuarantineCachePlaceHolder));
218
getQuarantineCache(ScudoTSD * TSD)219 QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
220 return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
221 }
222
223 #ifdef GWP_ASAN_HOOKS
224 static gwp_asan::GuardedPoolAllocator GuardedAlloc;
225 #endif // GWP_ASAN_HOOKS
226
227 struct Allocator {
228 static const uptr MaxAllowedMallocSize =
229 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
230
231 BackendT Backend;
232 QuarantineT Quarantine;
233
234 u32 QuarantineChunksUpToSize;
235
236 bool DeallocationTypeMismatch;
237 bool ZeroContents;
238 bool DeleteSizeMismatch;
239
240 bool CheckRssLimit;
241 uptr HardRssLimitMb;
242 uptr SoftRssLimitMb;
243 atomic_uint8_t RssLimitExceeded;
244 atomic_uint64_t RssLastCheckedAtNS;
245
Allocator__scudo::Allocator246 explicit Allocator(LinkerInitialized)
247 : Quarantine(LINKER_INITIALIZED) {}
248
249 NOINLINE void performSanityChecks();
250
init__scudo::Allocator251 void init() {
252 SanitizerToolName = "Scudo";
253 PrimaryAllocatorName = "ScudoPrimary";
254 SecondaryAllocatorName = "ScudoSecondary";
255
256 initFlags();
257
258 performSanityChecks();
259
260 // Check if hardware CRC32 is supported in the binary and by the platform,
261 // if so, opt for the CRC32 hardware version of the checksum.
262 if (&computeHardwareCRC32 && hasHardwareCRC32())
263 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
264
265 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
266 Backend.init(common_flags()->allocator_release_to_os_interval_ms);
267 HardRssLimitMb = common_flags()->hard_rss_limit_mb;
268 SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
269 Quarantine.Init(
270 static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
271 static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
272 QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 :
273 getFlags()->QuarantineChunksUpToSize;
274 DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
275 DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
276 ZeroContents = getFlags()->ZeroContents;
277
278 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
279 /*blocking=*/false))) {
280 Cookie = static_cast<u32>((NanoTime() >> 12) ^
281 (reinterpret_cast<uptr>(this) >> 4));
282 }
283
284 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
285 if (CheckRssLimit)
286 atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
287 }
288
289 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
isValidPointer__scudo::Allocator290 bool isValidPointer(const void *Ptr) {
291 initThreadMaybe();
292 if (UNLIKELY(!Ptr))
293 return false;
294 if (!Chunk::isAligned(Ptr))
295 return false;
296 return Chunk::isValid(Ptr);
297 }
298
299 NOINLINE bool isRssLimitExceeded();
300
301 // Allocates a chunk.
302 void *
allocate__scudo::Allocator303 allocate(uptr Size, uptr Alignment, AllocType Type,
304 bool ForceZeroContents = false) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
305 initThreadMaybe();
306
307 if (UNLIKELY(Alignment > MaxAlignment)) {
308 if (AllocatorMayReturnNull())
309 return nullptr;
310 reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
311 }
312 if (UNLIKELY(Alignment < MinAlignment))
313 Alignment = MinAlignment;
314
315 #ifdef GWP_ASAN_HOOKS
316 if (UNLIKELY(GuardedAlloc.shouldSample())) {
317 if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
318 if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
319 __sanitizer_malloc_hook(Ptr, Size);
320 return Ptr;
321 }
322 }
323 #endif // GWP_ASAN_HOOKS
324
325 const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
326 Chunk::getHeaderSize();
327 const uptr AlignedSize = (Alignment > MinAlignment) ?
328 NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
329 if (UNLIKELY(Size >= MaxAllowedMallocSize) ||
330 UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) {
331 if (AllocatorMayReturnNull())
332 return nullptr;
333 reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
334 }
335
336 if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) {
337 if (AllocatorMayReturnNull())
338 return nullptr;
339 reportRssLimitExceeded();
340 }
341
342 // Primary and Secondary backed allocations have a different treatment. We
343 // deal with alignment requirements of Primary serviced allocations here,
344 // but the Secondary will take care of its own alignment needs.
345 void *BackendPtr;
346 uptr BackendSize;
347 u8 ClassId;
348 if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
349 BackendSize = AlignedSize;
350 ClassId = SizeClassMap::ClassID(BackendSize);
351 bool UnlockRequired;
352 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
353 BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
354 if (UnlockRequired)
355 TSD->unlock();
356 } else {
357 BackendSize = NeededSize;
358 ClassId = 0;
359 BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
360 }
361 if (UNLIKELY(!BackendPtr)) {
362 SetAllocatorOutOfMemory();
363 if (AllocatorMayReturnNull())
364 return nullptr;
365 reportOutOfMemory(Size);
366 }
367
368 // If requested, we will zero out the entire contents of the returned chunk.
369 if ((ForceZeroContents || ZeroContents) && ClassId)
370 memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
371
372 UnpackedHeader Header = {};
373 uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
374 if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
375 // Since the Secondary takes care of alignment, a non-aligned pointer
376 // means it is from the Primary. It is also the only case where the offset
377 // field of the header would be non-zero.
378 DCHECK(ClassId);
379 const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
380 Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
381 UserPtr = AlignedUserPtr;
382 }
383 DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
384 Header.State = ChunkAllocated;
385 Header.AllocType = Type;
386 if (ClassId) {
387 Header.ClassId = ClassId;
388 Header.SizeOrUnusedBytes = Size;
389 } else {
390 // The secondary fits the allocations to a page, so the amount of unused
391 // bytes is the difference between the end of the user allocation and the
392 // next page boundary.
393 const uptr PageSize = GetPageSizeCached();
394 const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
395 if (TrailingBytes)
396 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
397 }
398 void *Ptr = reinterpret_cast<void *>(UserPtr);
399 Chunk::storeHeader(Ptr, &Header);
400 if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
401 __sanitizer_malloc_hook(Ptr, Size);
402 return Ptr;
403 }
404
405 // Place a chunk in the quarantine or directly deallocate it in the event of
406 // a zero-sized quarantine, or if the size of the chunk is greater than the
407 // quarantine chunk size threshold.
quarantineOrDeallocateChunk__scudo::Allocator408 void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, uptr Size)
409 SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
410 const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
411 if (BypassQuarantine) {
412 UnpackedHeader NewHeader = *Header;
413 NewHeader.State = ChunkAvailable;
414 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
415 void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
416 if (Header->ClassId) {
417 bool UnlockRequired;
418 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
419 getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
420 Header->ClassId);
421 if (UnlockRequired)
422 TSD->unlock();
423 } else {
424 getBackend().deallocateSecondary(BackendPtr);
425 }
426 } else {
427 // If a small memory amount was allocated with a larger alignment, we want
428 // to take that into account. Otherwise the Quarantine would be filled
429 // with tiny chunks, taking a lot of VA memory. This is an approximation
430 // of the usable size, that allows us to not call
431 // GetActuallyAllocatedSize.
432 const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
433 UnpackedHeader NewHeader = *Header;
434 NewHeader.State = ChunkQuarantine;
435 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
436 bool UnlockRequired;
437 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
438 Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
439 Ptr, EstimatedSize);
440 if (UnlockRequired)
441 TSD->unlock();
442 }
443 }
444
445 // Deallocates a Chunk, which means either adding it to the quarantine or
446 // directly returning it to the backend if criteria are met.
deallocate__scudo::Allocator447 void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
448 AllocType Type) {
449 // For a deallocation, we only ensure minimal initialization, meaning thread
450 // local data will be left uninitialized for now (when using ELF TLS). The
451 // fallback cache will be used instead. This is a workaround for a situation
452 // where the only heap operation performed in a thread would be a free past
453 // the TLS destructors, ending up in initialized thread specific data never
454 // being destroyed properly. Any other heap operation will do a full init.
455 initThreadMaybe(/*MinimalInit=*/true);
456 if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook)
457 __sanitizer_free_hook(Ptr);
458 if (UNLIKELY(!Ptr))
459 return;
460
461 #ifdef GWP_ASAN_HOOKS
462 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
463 GuardedAlloc.deallocate(Ptr);
464 return;
465 }
466 #endif // GWP_ASAN_HOOKS
467
468 if (UNLIKELY(!Chunk::isAligned(Ptr)))
469 dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
470 UnpackedHeader Header;
471 Chunk::loadHeader(Ptr, &Header);
472 if (UNLIKELY(Header.State != ChunkAllocated))
473 dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
474 if (DeallocationTypeMismatch) {
475 // The deallocation type has to match the allocation one.
476 if (Header.AllocType != Type) {
477 // With the exception of memalign'd Chunks, that can be still be free'd.
478 if (Header.AllocType != FromMemalign || Type != FromMalloc)
479 dieWithMessage("allocation type mismatch when deallocating address "
480 "%p\n", Ptr);
481 }
482 }
483 const uptr Size = Chunk::getSize(Ptr, &Header);
484 if (DeleteSizeMismatch) {
485 if (DeleteSize && DeleteSize != Size)
486 dieWithMessage("invalid sized delete when deallocating address %p\n",
487 Ptr);
488 }
489 (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
490 quarantineOrDeallocateChunk(Ptr, &Header, Size);
491 }
492
493 // Reallocates a chunk. We can save on a new allocation if the new requested
494 // size still fits in the chunk.
reallocate__scudo::Allocator495 void *reallocate(void *OldPtr, uptr NewSize) {
496 initThreadMaybe();
497
498 #ifdef GWP_ASAN_HOOKS
499 if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
500 size_t OldSize = GuardedAlloc.getSize(OldPtr);
501 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
502 if (NewPtr)
503 memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
504 GuardedAlloc.deallocate(OldPtr);
505 return NewPtr;
506 }
507 #endif // GWP_ASAN_HOOKS
508
509 if (UNLIKELY(!Chunk::isAligned(OldPtr)))
510 dieWithMessage("misaligned address when reallocating address %p\n",
511 OldPtr);
512 UnpackedHeader OldHeader;
513 Chunk::loadHeader(OldPtr, &OldHeader);
514 if (UNLIKELY(OldHeader.State != ChunkAllocated))
515 dieWithMessage("invalid chunk state when reallocating address %p\n",
516 OldPtr);
517 if (DeallocationTypeMismatch) {
518 if (UNLIKELY(OldHeader.AllocType != FromMalloc))
519 dieWithMessage("allocation type mismatch when reallocating address "
520 "%p\n", OldPtr);
521 }
522 const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
523 // The new size still fits in the current chunk, and the size difference
524 // is reasonable.
525 if (NewSize <= UsableSize &&
526 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
527 UnpackedHeader NewHeader = OldHeader;
528 NewHeader.SizeOrUnusedBytes =
529 OldHeader.ClassId ? NewSize : UsableSize - NewSize;
530 Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
531 return OldPtr;
532 }
533 // Otherwise, we have to allocate a new chunk and copy the contents of the
534 // old one.
535 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
536 if (NewPtr) {
537 const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
538 UsableSize - OldHeader.SizeOrUnusedBytes;
539 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
540 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
541 }
542 return NewPtr;
543 }
544
545 // Helper function that returns the actual usable size of a chunk.
getUsableSize__scudo::Allocator546 uptr getUsableSize(const void *Ptr) {
547 initThreadMaybe();
548 if (UNLIKELY(!Ptr))
549 return 0;
550
551 #ifdef GWP_ASAN_HOOKS
552 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
553 return GuardedAlloc.getSize(Ptr);
554 #endif // GWP_ASAN_HOOKS
555
556 UnpackedHeader Header;
557 Chunk::loadHeader(Ptr, &Header);
558 // Getting the usable size of a chunk only makes sense if it's allocated.
559 if (UNLIKELY(Header.State != ChunkAllocated))
560 dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
561 return Chunk::getUsableSize(Ptr, &Header);
562 }
563
calloc__scudo::Allocator564 void *calloc(uptr NMemB, uptr Size) {
565 initThreadMaybe();
566 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) {
567 if (AllocatorMayReturnNull())
568 return nullptr;
569 reportCallocOverflow(NMemB, Size);
570 }
571 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
572 }
573
commitBack__scudo::Allocator574 void commitBack(ScudoTSD *TSD) {
575 Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
576 Backend.destroyCache(&TSD->Cache);
577 }
578
getStats__scudo::Allocator579 uptr getStats(AllocatorStat StatType) {
580 initThreadMaybe();
581 uptr stats[AllocatorStatCount];
582 Backend.getStats(stats);
583 return stats[StatType];
584 }
585
canReturnNull__scudo::Allocator586 bool canReturnNull() {
587 initThreadMaybe();
588 return AllocatorMayReturnNull();
589 }
590
setRssLimit__scudo::Allocator591 void setRssLimit(uptr LimitMb, bool HardLimit) {
592 if (HardLimit)
593 HardRssLimitMb = LimitMb;
594 else
595 SoftRssLimitMb = LimitMb;
596 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
597 }
598
printStats__scudo::Allocator599 void printStats() {
600 initThreadMaybe();
601 Backend.printStats();
602 }
603 };
604
performSanityChecks()605 NOINLINE void Allocator::performSanityChecks() {
606 // Verify that the header offset field can hold the maximum offset. In the
607 // case of the Secondary allocator, it takes care of alignment and the
608 // offset will always be 0. In the case of the Primary, the worst case
609 // scenario happens in the last size class, when the backend allocation
610 // would already be aligned on the requested alignment, which would happen
611 // to be the maximum alignment that would fit in that size class. As a
612 // result, the maximum offset will be at most the maximum alignment for the
613 // last size class minus the header size, in multiples of MinAlignment.
614 UnpackedHeader Header = {};
615 const uptr MaxPrimaryAlignment =
616 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
617 const uptr MaxOffset =
618 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
619 Header.Offset = MaxOffset;
620 if (Header.Offset != MaxOffset)
621 dieWithMessage("maximum possible offset doesn't fit in header\n");
622 // Verify that we can fit the maximum size or amount of unused bytes in the
623 // header. Given that the Secondary fits the allocation to a page, the worst
624 // case scenario happens in the Primary. It will depend on the second to
625 // last and last class sizes, as well as the dynamic base for the Primary.
626 // The following is an over-approximation that works for our needs.
627 const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
628 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
629 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
630 dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
631
632 const uptr LargestClassId = SizeClassMap::kLargestClassID;
633 Header.ClassId = LargestClassId;
634 if (Header.ClassId != LargestClassId)
635 dieWithMessage("largest class ID doesn't fit in header\n");
636 }
637
638 // Opportunistic RSS limit check. This will update the RSS limit status, if
639 // it can, every 250ms, otherwise it will just return the current one.
isRssLimitExceeded()640 NOINLINE bool Allocator::isRssLimitExceeded() {
641 u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
642 const u64 CurrentCheck = MonotonicNanoTime();
643 if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL)))
644 return atomic_load_relaxed(&RssLimitExceeded);
645 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
646 CurrentCheck, memory_order_relaxed))
647 return atomic_load_relaxed(&RssLimitExceeded);
648 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
649 // RSS from /proc/self/statm by default. We might want to
650 // call getrusage directly, even if it's less accurate.
651 const uptr CurrentRssMb = GetRSS() >> 20;
652 if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
653 dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
654 HardRssLimitMb, CurrentRssMb);
655 if (SoftRssLimitMb) {
656 if (atomic_load_relaxed(&RssLimitExceeded)) {
657 if (CurrentRssMb <= SoftRssLimitMb)
658 atomic_store_relaxed(&RssLimitExceeded, false);
659 } else {
660 if (CurrentRssMb > SoftRssLimitMb) {
661 atomic_store_relaxed(&RssLimitExceeded, true);
662 Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
663 SoftRssLimitMb, CurrentRssMb);
664 }
665 }
666 }
667 return atomic_load_relaxed(&RssLimitExceeded);
668 }
669
670 static Allocator Instance(LINKER_INITIALIZED);
671
getBackend()672 static BackendT &getBackend() {
673 return Instance.Backend;
674 }
675
initScudo()676 void initScudo() {
677 Instance.init();
678 #ifdef GWP_ASAN_HOOKS
679 gwp_asan::options::initOptions(__sanitizer::GetEnv("GWP_ASAN_OPTIONS"),
680 Printf);
681 gwp_asan::options::Options &Opts = gwp_asan::options::getOptions();
682 Opts.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
683 GuardedAlloc.init(Opts);
684
685 if (Opts.InstallSignalHandlers)
686 gwp_asan::segv_handler::installSignalHandlers(
687 &GuardedAlloc, __sanitizer::Printf,
688 gwp_asan::backtrace::getPrintBacktraceFunction(),
689 gwp_asan::backtrace::getSegvBacktraceFunction());
690 #endif // GWP_ASAN_HOOKS
691 }
692
init()693 void ScudoTSD::init() {
694 getBackend().initCache(&Cache);
695 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
696 }
697
commitBack()698 void ScudoTSD::commitBack() {
699 Instance.commitBack(this);
700 }
701
scudoAllocate(uptr Size,uptr Alignment,AllocType Type)702 void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
703 if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) {
704 errno = EINVAL;
705 if (Instance.canReturnNull())
706 return nullptr;
707 reportAllocationAlignmentNotPowerOfTwo(Alignment);
708 }
709 return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
710 }
711
scudoDeallocate(void * Ptr,uptr Size,uptr Alignment,AllocType Type)712 void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
713 Instance.deallocate(Ptr, Size, Alignment, Type);
714 }
715
scudoRealloc(void * Ptr,uptr Size)716 void *scudoRealloc(void *Ptr, uptr Size) {
717 if (!Ptr)
718 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
719 if (Size == 0) {
720 Instance.deallocate(Ptr, 0, 0, FromMalloc);
721 return nullptr;
722 }
723 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
724 }
725
scudoCalloc(uptr NMemB,uptr Size)726 void *scudoCalloc(uptr NMemB, uptr Size) {
727 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
728 }
729
scudoValloc(uptr Size)730 void *scudoValloc(uptr Size) {
731 return SetErrnoOnNull(
732 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
733 }
734
scudoPvalloc(uptr Size)735 void *scudoPvalloc(uptr Size) {
736 const uptr PageSize = GetPageSizeCached();
737 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
738 errno = ENOMEM;
739 if (Instance.canReturnNull())
740 return nullptr;
741 reportPvallocOverflow(Size);
742 }
743 // pvalloc(0) should allocate one page.
744 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
745 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
746 }
747
scudoPosixMemalign(void ** MemPtr,uptr Alignment,uptr Size)748 int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
749 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
750 if (!Instance.canReturnNull())
751 reportInvalidPosixMemalignAlignment(Alignment);
752 return EINVAL;
753 }
754 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
755 if (UNLIKELY(!Ptr))
756 return ENOMEM;
757 *MemPtr = Ptr;
758 return 0;
759 }
760
scudoAlignedAlloc(uptr Alignment,uptr Size)761 void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
762 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
763 errno = EINVAL;
764 if (Instance.canReturnNull())
765 return nullptr;
766 reportInvalidAlignedAllocAlignment(Size, Alignment);
767 }
768 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
769 }
770
scudoMallocUsableSize(void * Ptr)771 uptr scudoMallocUsableSize(void *Ptr) {
772 return Instance.getUsableSize(Ptr);
773 }
774
775 } // namespace __scudo
776
777 using namespace __scudo;
778
779 // MallocExtension helper functions
780
__sanitizer_get_current_allocated_bytes()781 uptr __sanitizer_get_current_allocated_bytes() {
782 return Instance.getStats(AllocatorStatAllocated);
783 }
784
__sanitizer_get_heap_size()785 uptr __sanitizer_get_heap_size() {
786 return Instance.getStats(AllocatorStatMapped);
787 }
788
__sanitizer_get_free_bytes()789 uptr __sanitizer_get_free_bytes() {
790 return 1;
791 }
792
__sanitizer_get_unmapped_bytes()793 uptr __sanitizer_get_unmapped_bytes() {
794 return 1;
795 }
796
__sanitizer_get_estimated_allocated_size(uptr Size)797 uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
798 return Size;
799 }
800
__sanitizer_get_ownership(const void * Ptr)801 int __sanitizer_get_ownership(const void *Ptr) {
802 return Instance.isValidPointer(Ptr);
803 }
804
__sanitizer_get_allocated_size(const void * Ptr)805 uptr __sanitizer_get_allocated_size(const void *Ptr) {
806 return Instance.getUsableSize(Ptr);
807 }
808
809 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_malloc_hook,void * Ptr,uptr Size)810 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
811 void *Ptr, uptr Size) {
812 (void)Ptr;
813 (void)Size;
814 }
815
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_free_hook,void * Ptr)816 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) {
817 (void)Ptr;
818 }
819 #endif
820
821 // Interface functions
822
__scudo_set_rss_limit(uptr LimitMb,s32 HardLimit)823 void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
824 if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
825 return;
826 Instance.setRssLimit(LimitMb, !!HardLimit);
827 }
828
__scudo_print_stats()829 void __scudo_print_stats() {
830 Instance.printStats();
831 }
832