1 //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// Scudo Hardened Allocator implementation.
11 /// It uses the sanitizer_common allocator as a base and aims at mitigating
12 /// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13 /// header, a delayed free list, and additional sanity checks.
14 ///
15 //===----------------------------------------------------------------------===//
16 
17 #include "scudo_allocator.h"
18 #include "scudo_crc32.h"
19 #include "scudo_utils.h"
20 
21 #include "sanitizer_common/sanitizer_allocator_interface.h"
22 #include "sanitizer_common/sanitizer_quarantine.h"
23 
24 #include <limits.h>
25 #include <pthread.h>
26 
27 #include <cstring>
28 
29 namespace __scudo {
30 
31 #if SANITIZER_CAN_USE_ALLOCATOR64
32 const uptr AllocatorSpace = ~0ULL;
33 const uptr AllocatorSize = 0x40000000000ULL;
34 typedef DefaultSizeClassMap SizeClassMap;
35 struct AP {
36   static const uptr kSpaceBeg = AllocatorSpace;
37   static const uptr kSpaceSize = AllocatorSize;
38   static const uptr kMetadataSize = 0;
39   typedef __scudo::SizeClassMap SizeClassMap;
40   typedef NoOpMapUnmapCallback MapUnmapCallback;
41   static const uptr kFlags =
42       SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
43 };
44 typedef SizeClassAllocator64<AP> PrimaryAllocator;
45 #else
46 // Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
47 // security improvements brought to the 64-bit one. This makes the 32-bit
48 // version of Scudo slightly less toughened.
49 static const uptr RegionSizeLog = 20;
50 static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
51 # if SANITIZER_WORDSIZE == 32
52 typedef FlatByteMap<NumRegions> ByteMap;
53 # elif SANITIZER_WORDSIZE == 64
54 typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
55 # endif  // SANITIZER_WORDSIZE
56 typedef DefaultSizeClassMap SizeClassMap;
57 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap,
58     RegionSizeLog, ByteMap> PrimaryAllocator;
59 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
60 
61 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
62 typedef ScudoLargeMmapAllocator SecondaryAllocator;
63 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
64   ScudoAllocator;
65 
66 static ScudoAllocator &getAllocator();
67 
68 static thread_local Xorshift128Plus Prng;
69 // Global static cookie, initialized at start-up.
70 static uptr Cookie;
71 
72 // We default to software CRC32 if the alternatives are not supported, either
73 // at compilation or at runtime.
74 static atomic_uint8_t HashAlgorithm = { CRC32Software };
75 
76 // Helper function that will compute the chunk checksum, being passed all the
77 // the needed information as uptrs. It will opt for the hardware version of
78 // the checksumming function if available.
79 INLINE u32 hashUptrs(uptr Pointer, uptr *Array, uptr ArraySize, u8 HashType) {
80   u32 Crc;
81   Crc = computeCRC32(Cookie, Pointer, HashType);
82   for (uptr i = 0; i < ArraySize; i++)
83     Crc = computeCRC32(Crc, Array[i], HashType);
84   return Crc;
85 }
86 
87 struct ScudoChunk : UnpackedHeader {
88   // We can't use the offset member of the chunk itself, as we would double
89   // fetch it without any warranty that it wouldn't have been tampered. To
90   // prevent this, we work with a local copy of the header.
91   void *getAllocBeg(UnpackedHeader *Header) {
92     return reinterpret_cast<void *>(
93         reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
94   }
95 
96   // Returns the usable size for a chunk, meaning the amount of bytes from the
97   // beginning of the user data to the end of the backend allocated chunk.
98   uptr getUsableSize(UnpackedHeader *Header) {
99     uptr Size = getAllocator().GetActuallyAllocatedSize(getAllocBeg(Header));
100     if (Size == 0)
101       return Size;
102     return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
103   }
104 
105   // Compute the checksum of the Chunk pointer and its ChunkHeader.
106   u16 computeChecksum(UnpackedHeader *Header) const {
107     UnpackedHeader ZeroChecksumHeader = *Header;
108     ZeroChecksumHeader.Checksum = 0;
109     uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
110     memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
111     u32 Hash = hashUptrs(reinterpret_cast<uptr>(this),
112                          HeaderHolder,
113                          ARRAY_SIZE(HeaderHolder),
114                          atomic_load_relaxed(&HashAlgorithm));
115     return static_cast<u16>(Hash);
116   }
117 
118   // Checks the validity of a chunk by verifying its checksum.
119   bool isValid() {
120     UnpackedHeader NewUnpackedHeader;
121     const AtomicPackedHeader *AtomicHeader =
122         reinterpret_cast<const AtomicPackedHeader *>(this);
123     PackedHeader NewPackedHeader =
124         AtomicHeader->load(std::memory_order_relaxed);
125     NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
126     return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
127   }
128 
129   // Loads and unpacks the header, verifying the checksum in the process.
130   void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
131     const AtomicPackedHeader *AtomicHeader =
132         reinterpret_cast<const AtomicPackedHeader *>(this);
133     PackedHeader NewPackedHeader =
134         AtomicHeader->load(std::memory_order_relaxed);
135     *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
136     if (NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader)) {
137       dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
138     }
139   }
140 
141   // Packs and stores the header, computing the checksum in the process.
142   void storeHeader(UnpackedHeader *NewUnpackedHeader) {
143     NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
144     PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
145     AtomicPackedHeader *AtomicHeader =
146         reinterpret_cast<AtomicPackedHeader *>(this);
147     AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed);
148   }
149 
150   // Packs and stores the header, computing the checksum in the process. We
151   // compare the current header with the expected provided one to ensure that
152   // we are not being raced by a corruption occurring in another thread.
153   void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
154                              UnpackedHeader *OldUnpackedHeader) {
155     NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
156     PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
157     PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
158     AtomicPackedHeader *AtomicHeader =
159         reinterpret_cast<AtomicPackedHeader *>(this);
160     if (!AtomicHeader->compare_exchange_strong(OldPackedHeader,
161                                                NewPackedHeader,
162                                                std::memory_order_relaxed,
163                                                std::memory_order_relaxed)) {
164       dieWithMessage("ERROR: race on chunk header at address %p\n", this);
165     }
166   }
167 };
168 
169 static bool ScudoInitIsRunning = false;
170 
171 static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
172 static pthread_key_t PThreadKey;
173 
174 static thread_local bool ThreadInited = false;
175 static thread_local bool ThreadTornDown = false;
176 static thread_local AllocatorCache Cache;
177 
178 static void teardownThread(void *p) {
179   uptr v = reinterpret_cast<uptr>(p);
180   // The glibc POSIX thread-local-storage deallocation routine calls user
181   // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
182   // We want to be called last since other destructors might call free and the
183   // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
184   // quarantine and swallowing the cache.
185   if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
186     pthread_setspecific(PThreadKey, reinterpret_cast<void *>(v + 1));
187     return;
188   }
189   drainQuarantine();
190   getAllocator().DestroyCache(&Cache);
191   ThreadTornDown = true;
192 }
193 
194 static void initInternal() {
195   SanitizerToolName = "Scudo";
196   CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
197   ScudoInitIsRunning = true;
198 
199   // Check is SSE4.2 is supported, if so, opt for the CRC32 hardware version.
200   if (testCPUFeature(CRC32CPUFeature)) {
201     atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
202   }
203 
204   initFlags();
205 
206   AllocatorOptions Options;
207   Options.setFrom(getFlags(), common_flags());
208   initAllocator(Options);
209 
210   MaybeStartBackgroudThread();
211 
212   ScudoInitIsRunning = false;
213 }
214 
215 static void initGlobal() {
216   pthread_key_create(&PThreadKey, teardownThread);
217   initInternal();
218 }
219 
220 static void NOINLINE initThread() {
221   pthread_once(&GlobalInited, initGlobal);
222   pthread_setspecific(PThreadKey, reinterpret_cast<void *>(1));
223   getAllocator().InitCache(&Cache);
224   ThreadInited = true;
225 }
226 
227 struct QuarantineCallback {
228   explicit QuarantineCallback(AllocatorCache *Cache)
229     : Cache_(Cache) {}
230 
231   // Chunk recycling function, returns a quarantined chunk to the backend.
232   void Recycle(ScudoChunk *Chunk) {
233     UnpackedHeader Header;
234     Chunk->loadHeader(&Header);
235     if (Header.State != ChunkQuarantine) {
236       dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
237                      Chunk);
238     }
239     void *Ptr = Chunk->getAllocBeg(&Header);
240     getAllocator().Deallocate(Cache_, Ptr);
241   }
242 
243   /// Internal quarantine allocation and deallocation functions.
244   void *Allocate(uptr Size) {
245     // The internal quarantine memory cannot be protected by us. But the only
246     // structures allocated are QuarantineBatch, that are 8KB for x64. So we
247     // will use mmap for those, and given that Deallocate doesn't pass a size
248     // in, we enforce the size of the allocation to be sizeof(QuarantineBatch).
249     // TODO(kostyak): switching to mmap impacts greatly performances, we have
250     //                to find another solution
251     // CHECK_EQ(Size, sizeof(QuarantineBatch));
252     // return MmapOrDie(Size, "QuarantineBatch");
253     return getAllocator().Allocate(Cache_, Size, 1, false);
254   }
255 
256   void Deallocate(void *Ptr) {
257     // UnmapOrDie(Ptr, sizeof(QuarantineBatch));
258     getAllocator().Deallocate(Cache_, Ptr);
259   }
260 
261   AllocatorCache *Cache_;
262 };
263 
264 typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
265 typedef ScudoQuarantine::Cache QuarantineCache;
266 static thread_local QuarantineCache ThreadQuarantineCache;
267 
268 void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
269   MayReturnNull = cf->allocator_may_return_null;
270   ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
271   QuarantineSizeMb = f->QuarantineSizeMb;
272   ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
273   DeallocationTypeMismatch = f->DeallocationTypeMismatch;
274   DeleteSizeMismatch = f->DeleteSizeMismatch;
275   ZeroContents = f->ZeroContents;
276 }
277 
278 void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
279   cf->allocator_may_return_null = MayReturnNull;
280   cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
281   f->QuarantineSizeMb = QuarantineSizeMb;
282   f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
283   f->DeallocationTypeMismatch = DeallocationTypeMismatch;
284   f->DeleteSizeMismatch = DeleteSizeMismatch;
285   f->ZeroContents = ZeroContents;
286 }
287 
288 struct Allocator {
289   static const uptr MaxAllowedMallocSize =
290       FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
291 
292   ScudoAllocator BackendAllocator;
293   ScudoQuarantine AllocatorQuarantine;
294 
295   // The fallback caches are used when the thread local caches have been
296   // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
297   // be accessed by different threads.
298   StaticSpinMutex FallbackMutex;
299   AllocatorCache FallbackAllocatorCache;
300   QuarantineCache FallbackQuarantineCache;
301 
302   bool DeallocationTypeMismatch;
303   bool ZeroContents;
304   bool DeleteSizeMismatch;
305 
306   explicit Allocator(LinkerInitialized)
307     : AllocatorQuarantine(LINKER_INITIALIZED),
308       FallbackQuarantineCache(LINKER_INITIALIZED) {}
309 
310   void init(const AllocatorOptions &Options) {
311     // Verify that the header offset field can hold the maximum offset. In the
312     // case of the Secondary allocator, it takes care of alignment and the
313     // offset will always be 0. In the case of the Primary, the worst case
314     // scenario happens in the last size class, when the backend allocation
315     // would already be aligned on the requested alignment, which would happen
316     // to be the maximum alignment that would fit in that size class. As a
317     // result, the maximum offset will be at most the maximum alignment for the
318     // last size class minus the header size, in multiples of MinAlignment.
319     UnpackedHeader Header = {};
320     uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
321         SizeClassMap::kMaxSize - MinAlignment);
322     uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >>
323         MinAlignmentLog;
324     Header.Offset = MaxOffset;
325     if (Header.Offset != MaxOffset) {
326       dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
327                      "header\n");
328     }
329     // Verify that we can fit the maximum amount of unused bytes in the header.
330     // Given that the Secondary fits the allocation to a page, the worst case
331     // scenario happens in the Primary. It will depend on the second to last
332     // and last class sizes, as well as the dynamic base for the Primary. The
333     // following is an over-approximation that works for our needs.
334     uptr MaxUnusedBytes = SizeClassMap::kMaxSize - 1 - AlignedChunkHeaderSize;
335     Header.UnusedBytes = MaxUnusedBytes;
336     if (Header.UnusedBytes != MaxUnusedBytes) {
337       dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
338                      "the header\n");
339     }
340 
341     DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
342     DeleteSizeMismatch = Options.DeleteSizeMismatch;
343     ZeroContents = Options.ZeroContents;
344     BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
345     AllocatorQuarantine.Init(
346         static_cast<uptr>(Options.QuarantineSizeMb) << 20,
347         static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
348     BackendAllocator.InitCache(&FallbackAllocatorCache);
349     Cookie = Prng.Next();
350   }
351 
352   // Helper function that checks for a valid Scudo chunk.
353   bool isValidPointer(const void *UserPtr) {
354     uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
355     if (!IsAligned(ChunkBeg, MinAlignment)) {
356       return false;
357     }
358     ScudoChunk *Chunk =
359         reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
360     return Chunk->isValid();
361   }
362 
363   // Allocates a chunk.
364   void *allocate(uptr Size, uptr Alignment, AllocType Type) {
365     if (UNLIKELY(!ThreadInited))
366       initThread();
367     if (!IsPowerOfTwo(Alignment)) {
368       dieWithMessage("ERROR: alignment is not a power of 2\n");
369     }
370     if (Alignment > MaxAlignment)
371       return BackendAllocator.ReturnNullOrDieOnBadRequest();
372     if (Alignment < MinAlignment)
373       Alignment = MinAlignment;
374     if (Size == 0)
375       Size = 1;
376     if (Size >= MaxAllowedMallocSize)
377       return BackendAllocator.ReturnNullOrDieOnBadRequest();
378 
379     uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
380     if (Alignment > MinAlignment)
381       NeededSize += Alignment;
382     if (NeededSize >= MaxAllowedMallocSize)
383       return BackendAllocator.ReturnNullOrDieOnBadRequest();
384 
385     // Primary backed and Secondary backed allocations have a different
386     // treatment. We deal with alignment requirements of Primary serviced
387     // allocations here, but the Secondary will take care of its own alignment
388     // needs, which means we also have to work around some limitations of the
389     // combined allocator to accommodate the situation.
390     bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
391 
392     void *Ptr;
393     if (LIKELY(!ThreadTornDown)) {
394       Ptr = BackendAllocator.Allocate(&Cache, NeededSize,
395                                       FromPrimary ? MinAlignment : Alignment);
396     } else {
397       SpinMutexLock l(&FallbackMutex);
398       Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
399                                       FromPrimary ? MinAlignment : Alignment);
400     }
401     if (!Ptr)
402       return BackendAllocator.ReturnNullOrDieOnOOM();
403 
404     uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
405     // If the allocation was serviced by the secondary, the returned pointer
406     // accounts for ChunkHeaderSize to pass the alignment check of the combined
407     // allocator. Adjust it here.
408     if (!FromPrimary) {
409       AllocBeg -= AlignedChunkHeaderSize;
410       if (Alignment > MinAlignment)
411         NeededSize -= Alignment;
412     }
413 
414     uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize(
415         reinterpret_cast<void *>(AllocBeg));
416     // If requested, we will zero out the entire contents of the returned chunk.
417     if (ZeroContents && FromPrimary)
418        memset(Ptr, 0, ActuallyAllocatedSize);
419 
420     uptr ChunkBeg = AllocBeg + AlignedChunkHeaderSize;
421     if (!IsAligned(ChunkBeg, Alignment))
422       ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
423     CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
424     ScudoChunk *Chunk =
425         reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
426     UnpackedHeader Header = {};
427     Header.State = ChunkAllocated;
428     uptr Offset = ChunkBeg - AlignedChunkHeaderSize - AllocBeg;
429     Header.Offset = Offset >> MinAlignmentLog;
430     Header.AllocType = Type;
431     Header.UnusedBytes = ActuallyAllocatedSize - Offset -
432         AlignedChunkHeaderSize - Size;
433     Header.Salt = static_cast<u8>(Prng.Next());
434     Chunk->storeHeader(&Header);
435     void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
436     // TODO(kostyak): hooks sound like a terrible idea security wise but might
437     //                be needed for things to work properly?
438     // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
439     return UserPtr;
440   }
441 
442   // Deallocates a Chunk, which means adding it to the delayed free list (or
443   // Quarantine).
444   void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
445     if (UNLIKELY(!ThreadInited))
446       initThread();
447     // TODO(kostyak): see hook comment above
448     // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
449     if (!UserPtr)
450       return;
451     uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
452     if (!IsAligned(ChunkBeg, MinAlignment)) {
453       dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
454                      "aligned at address %p\n", UserPtr);
455     }
456     ScudoChunk *Chunk =
457         reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
458     UnpackedHeader OldHeader;
459     Chunk->loadHeader(&OldHeader);
460     if (OldHeader.State != ChunkAllocated) {
461       dieWithMessage("ERROR: invalid chunk state when deallocating address "
462                      "%p\n", UserPtr);
463     }
464     uptr UsableSize = Chunk->getUsableSize(&OldHeader);
465     UnpackedHeader NewHeader = OldHeader;
466     NewHeader.State = ChunkQuarantine;
467     Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
468     if (DeallocationTypeMismatch) {
469       // The deallocation type has to match the allocation one.
470       if (NewHeader.AllocType != Type) {
471         // With the exception of memalign'd Chunks, that can be still be free'd.
472         if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) {
473           dieWithMessage("ERROR: allocation type mismatch on address %p\n",
474                          Chunk);
475         }
476       }
477     }
478     uptr Size = UsableSize - OldHeader.UnusedBytes;
479     if (DeleteSizeMismatch) {
480       if (DeleteSize && DeleteSize != Size) {
481         dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
482                        Chunk);
483       }
484     }
485 
486     if (LIKELY(!ThreadTornDown)) {
487       AllocatorQuarantine.Put(&ThreadQuarantineCache,
488                               QuarantineCallback(&Cache), Chunk, UsableSize);
489     } else {
490       SpinMutexLock l(&FallbackMutex);
491       AllocatorQuarantine.Put(&FallbackQuarantineCache,
492                               QuarantineCallback(&FallbackAllocatorCache),
493                               Chunk, UsableSize);
494     }
495   }
496 
497   // Reallocates a chunk. We can save on a new allocation if the new requested
498   // size still fits in the chunk.
499   void *reallocate(void *OldPtr, uptr NewSize) {
500     if (UNLIKELY(!ThreadInited))
501       initThread();
502     uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
503     ScudoChunk *Chunk =
504         reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
505     UnpackedHeader OldHeader;
506     Chunk->loadHeader(&OldHeader);
507     if (OldHeader.State != ChunkAllocated) {
508       dieWithMessage("ERROR: invalid chunk state when reallocating address "
509                      "%p\n", OldPtr);
510     }
511     uptr Size = Chunk->getUsableSize(&OldHeader);
512     if (OldHeader.AllocType != FromMalloc) {
513       dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
514                      Chunk);
515     }
516     UnpackedHeader NewHeader = OldHeader;
517     // The new size still fits in the current chunk.
518     if (NewSize <= Size) {
519       NewHeader.UnusedBytes = Size - NewSize;
520       Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
521       return OldPtr;
522     }
523     // Otherwise, we have to allocate a new chunk and copy the contents of the
524     // old one.
525     void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
526     if (NewPtr) {
527       uptr OldSize = Size - OldHeader.UnusedBytes;
528       memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
529       NewHeader.State = ChunkQuarantine;
530       Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
531       if (LIKELY(!ThreadTornDown)) {
532         AllocatorQuarantine.Put(&ThreadQuarantineCache,
533                                 QuarantineCallback(&Cache), Chunk, Size);
534       } else {
535         SpinMutexLock l(&FallbackMutex);
536         AllocatorQuarantine.Put(&FallbackQuarantineCache,
537                                 QuarantineCallback(&FallbackAllocatorCache),
538                                 Chunk, Size);
539       }
540     }
541     return NewPtr;
542   }
543 
544   // Helper function that returns the actual usable size of a chunk.
545   uptr getUsableSize(const void *Ptr) {
546     if (UNLIKELY(!ThreadInited))
547       initThread();
548     if (!Ptr)
549       return 0;
550     uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
551     ScudoChunk *Chunk =
552         reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
553     UnpackedHeader Header;
554     Chunk->loadHeader(&Header);
555     // Getting the usable size of a chunk only makes sense if it's allocated.
556     if (Header.State != ChunkAllocated) {
557       dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
558                      Ptr);
559     }
560     return Chunk->getUsableSize(&Header);
561   }
562 
563   void *calloc(uptr NMemB, uptr Size) {
564     if (UNLIKELY(!ThreadInited))
565       initThread();
566     uptr Total = NMemB * Size;
567     if (Size != 0 && Total / Size != NMemB) // Overflow check
568       return BackendAllocator.ReturnNullOrDieOnBadRequest();
569     void *Ptr = allocate(Total, MinAlignment, FromMalloc);
570     // If ZeroContents, the content of the chunk has already been zero'd out.
571     if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
572       memset(Ptr, 0, getUsableSize(Ptr));
573     return Ptr;
574   }
575 
576   void drainQuarantine() {
577     AllocatorQuarantine.Drain(&ThreadQuarantineCache,
578                               QuarantineCallback(&Cache));
579   }
580 };
581 
582 static Allocator Instance(LINKER_INITIALIZED);
583 
584 static ScudoAllocator &getAllocator() {
585   return Instance.BackendAllocator;
586 }
587 
588 void initAllocator(const AllocatorOptions &Options) {
589   Instance.init(Options);
590 }
591 
592 void drainQuarantine() {
593   Instance.drainQuarantine();
594 }
595 
596 void *scudoMalloc(uptr Size, AllocType Type) {
597   return Instance.allocate(Size, MinAlignment, Type);
598 }
599 
600 void scudoFree(void *Ptr, AllocType Type) {
601   Instance.deallocate(Ptr, 0, Type);
602 }
603 
604 void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
605   Instance.deallocate(Ptr, Size, Type);
606 }
607 
608 void *scudoRealloc(void *Ptr, uptr Size) {
609   if (!Ptr)
610     return Instance.allocate(Size, MinAlignment, FromMalloc);
611   if (Size == 0) {
612     Instance.deallocate(Ptr, 0, FromMalloc);
613     return nullptr;
614   }
615   return Instance.reallocate(Ptr, Size);
616 }
617 
618 void *scudoCalloc(uptr NMemB, uptr Size) {
619   return Instance.calloc(NMemB, Size);
620 }
621 
622 void *scudoValloc(uptr Size) {
623   return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
624 }
625 
626 void *scudoMemalign(uptr Alignment, uptr Size) {
627   return Instance.allocate(Size, Alignment, FromMemalign);
628 }
629 
630 void *scudoPvalloc(uptr Size) {
631   uptr PageSize = GetPageSizeCached();
632   Size = RoundUpTo(Size, PageSize);
633   if (Size == 0) {
634     // pvalloc(0) should allocate one page.
635     Size = PageSize;
636   }
637   return Instance.allocate(Size, PageSize, FromMemalign);
638 }
639 
640 int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
641   *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
642   return 0;
643 }
644 
645 void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
646   // size must be a multiple of the alignment. To avoid a division, we first
647   // make sure that alignment is a power of 2.
648   CHECK(IsPowerOfTwo(Alignment));
649   CHECK_EQ((Size & (Alignment - 1)), 0);
650   return Instance.allocate(Size, Alignment, FromMalloc);
651 }
652 
653 uptr scudoMallocUsableSize(void *Ptr) {
654   return Instance.getUsableSize(Ptr);
655 }
656 
657 }  // namespace __scudo
658 
659 using namespace __scudo;
660 
661 // MallocExtension helper functions
662 
663 uptr __sanitizer_get_current_allocated_bytes() {
664   uptr stats[AllocatorStatCount];
665   getAllocator().GetStats(stats);
666   return stats[AllocatorStatAllocated];
667 }
668 
669 uptr __sanitizer_get_heap_size() {
670   uptr stats[AllocatorStatCount];
671   getAllocator().GetStats(stats);
672   return stats[AllocatorStatMapped];
673 }
674 
675 uptr __sanitizer_get_free_bytes() {
676   return 1;
677 }
678 
679 uptr __sanitizer_get_unmapped_bytes() {
680   return 1;
681 }
682 
683 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
684   return size;
685 }
686 
687 int __sanitizer_get_ownership(const void *Ptr) {
688   return Instance.isValidPointer(Ptr);
689 }
690 
691 uptr __sanitizer_get_allocated_size(const void *Ptr) {
692   return Instance.getUsableSize(Ptr);
693 }
694