1 //===- MappedBlockStream.cpp - Reads stream data from an MSF file ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "llvm/DebugInfo/MSF/MappedBlockStream.h" 11 12 #include "llvm/DebugInfo/MSF/IMSFFile.h" 13 #include "llvm/DebugInfo/MSF/MSFCommon.h" 14 #include "llvm/DebugInfo/MSF/MSFStreamLayout.h" 15 #include "llvm/Support/BinaryStreamError.h" 16 17 using namespace llvm; 18 using namespace llvm::msf; 19 20 namespace { 21 template <typename Base> class MappedBlockStreamImpl : public Base { 22 public: 23 template <typename... Args> 24 MappedBlockStreamImpl(Args &&... Params) 25 : Base(std::forward<Args>(Params)...) {} 26 }; 27 } 28 29 static void initializeFpmStreamLayout(const MSFLayout &Layout, 30 MSFStreamLayout &FpmLayout) { 31 uint32_t NumFpmIntervals = msf::getNumFpmIntervals(Layout); 32 support::ulittle32_t FpmBlock = Layout.SB->FreeBlockMapBlock; 33 assert(FpmBlock == 1 || FpmBlock == 2); 34 while (NumFpmIntervals > 0) { 35 FpmLayout.Blocks.push_back(FpmBlock); 36 FpmBlock += msf::getFpmIntervalLength(Layout); 37 --NumFpmIntervals; 38 } 39 FpmLayout.Length = msf::getFullFpmByteSize(Layout); 40 } 41 42 typedef std::pair<uint32_t, uint32_t> Interval; 43 static Interval intersect(const Interval &I1, const Interval &I2) { 44 return std::make_pair(std::max(I1.first, I2.first), 45 std::min(I1.second, I2.second)); 46 } 47 48 MappedBlockStream::MappedBlockStream(uint32_t BlockSize, 49 const MSFStreamLayout &Layout, 50 BinaryStreamRef MsfData, 51 BumpPtrAllocator &Allocator) 52 : BlockSize(BlockSize), StreamLayout(Layout), MsfData(MsfData), 53 Allocator(Allocator) {} 54 55 std::unique_ptr<MappedBlockStream> MappedBlockStream::createStream( 56 uint32_t BlockSize, const MSFStreamLayout &Layout, BinaryStreamRef MsfData, 57 BumpPtrAllocator &Allocator) { 58 return llvm::make_unique<MappedBlockStreamImpl<MappedBlockStream>>( 59 BlockSize, Layout, MsfData, Allocator); 60 } 61 62 std::unique_ptr<MappedBlockStream> MappedBlockStream::createIndexedStream( 63 const MSFLayout &Layout, BinaryStreamRef MsfData, uint32_t StreamIndex, 64 BumpPtrAllocator &Allocator) { 65 assert(StreamIndex < Layout.StreamMap.size() && "Invalid stream index"); 66 MSFStreamLayout SL; 67 SL.Blocks = Layout.StreamMap[StreamIndex]; 68 SL.Length = Layout.StreamSizes[StreamIndex]; 69 return llvm::make_unique<MappedBlockStreamImpl<MappedBlockStream>>( 70 Layout.SB->BlockSize, SL, MsfData, Allocator); 71 } 72 73 std::unique_ptr<MappedBlockStream> 74 MappedBlockStream::createDirectoryStream(const MSFLayout &Layout, 75 BinaryStreamRef MsfData, 76 BumpPtrAllocator &Allocator) { 77 MSFStreamLayout SL; 78 SL.Blocks = Layout.DirectoryBlocks; 79 SL.Length = Layout.SB->NumDirectoryBytes; 80 return createStream(Layout.SB->BlockSize, SL, MsfData, Allocator); 81 } 82 83 std::unique_ptr<MappedBlockStream> 84 MappedBlockStream::createFpmStream(const MSFLayout &Layout, 85 BinaryStreamRef MsfData, 86 BumpPtrAllocator &Allocator) { 87 MSFStreamLayout SL; 88 initializeFpmStreamLayout(Layout, SL); 89 return createStream(Layout.SB->BlockSize, SL, MsfData, Allocator); 90 } 91 92 Error MappedBlockStream::readBytes(uint32_t Offset, uint32_t Size, 93 ArrayRef<uint8_t> &Buffer) { 94 // Make sure we aren't trying to read beyond the end of the stream. 95 if (auto EC = checkOffset(Offset, Size)) 96 return EC; 97 98 if (tryReadContiguously(Offset, Size, Buffer)) 99 return Error::success(); 100 101 auto CacheIter = CacheMap.find(Offset); 102 if (CacheIter != CacheMap.end()) { 103 // Try to find an alloc that was large enough for this request. 104 for (auto &Entry : CacheIter->second) { 105 if (Entry.size() >= Size) { 106 Buffer = Entry.slice(0, Size); 107 return Error::success(); 108 } 109 } 110 } 111 112 // We couldn't find a buffer that started at the correct offset (the most 113 // common scenario). Try to see if there is a buffer that starts at some 114 // other offset but overlaps the desired range. 115 for (auto &CacheItem : CacheMap) { 116 Interval RequestExtent = std::make_pair(Offset, Offset + Size); 117 118 // We already checked this one on the fast path above. 119 if (CacheItem.first == Offset) 120 continue; 121 // If the initial extent of the cached item is beyond the ending extent 122 // of the request, there is no overlap. 123 if (CacheItem.first >= Offset + Size) 124 continue; 125 126 // We really only have to check the last item in the list, since we append 127 // in order of increasing length. 128 if (CacheItem.second.empty()) 129 continue; 130 131 auto CachedAlloc = CacheItem.second.back(); 132 // If the initial extent of the request is beyond the ending extent of 133 // the cached item, there is no overlap. 134 Interval CachedExtent = 135 std::make_pair(CacheItem.first, CacheItem.first + CachedAlloc.size()); 136 if (RequestExtent.first >= CachedExtent.first + CachedExtent.second) 137 continue; 138 139 Interval Intersection = intersect(CachedExtent, RequestExtent); 140 // Only use this if the entire request extent is contained in the cached 141 // extent. 142 if (Intersection != RequestExtent) 143 continue; 144 145 uint32_t CacheRangeOffset = 146 AbsoluteDifference(CachedExtent.first, Intersection.first); 147 Buffer = CachedAlloc.slice(CacheRangeOffset, Size); 148 return Error::success(); 149 } 150 151 // Otherwise allocate a large enough buffer in the pool, memcpy the data 152 // into it, and return an ArrayRef to that. Do not touch existing pool 153 // allocations, as existing clients may be holding a pointer which must 154 // not be invalidated. 155 uint8_t *WriteBuffer = static_cast<uint8_t *>(Allocator.Allocate(Size, 8)); 156 if (auto EC = readBytes(Offset, MutableArrayRef<uint8_t>(WriteBuffer, Size))) 157 return EC; 158 159 if (CacheIter != CacheMap.end()) { 160 CacheIter->second.emplace_back(WriteBuffer, Size); 161 } else { 162 std::vector<CacheEntry> List; 163 List.emplace_back(WriteBuffer, Size); 164 CacheMap.insert(std::make_pair(Offset, List)); 165 } 166 Buffer = ArrayRef<uint8_t>(WriteBuffer, Size); 167 return Error::success(); 168 } 169 170 Error MappedBlockStream::readLongestContiguousChunk(uint32_t Offset, 171 ArrayRef<uint8_t> &Buffer) { 172 // Make sure we aren't trying to read beyond the end of the stream. 173 if (auto EC = checkOffset(Offset, 1)) 174 return EC; 175 176 uint32_t First = Offset / BlockSize; 177 uint32_t Last = First; 178 179 while (Last < getNumBlocks() - 1) { 180 if (StreamLayout.Blocks[Last] != StreamLayout.Blocks[Last + 1] - 1) 181 break; 182 ++Last; 183 } 184 185 uint32_t OffsetInFirstBlock = Offset % BlockSize; 186 uint32_t BytesFromFirstBlock = BlockSize - OffsetInFirstBlock; 187 uint32_t BlockSpan = Last - First + 1; 188 uint32_t ByteSpan = BytesFromFirstBlock + (BlockSpan - 1) * BlockSize; 189 190 ArrayRef<uint8_t> BlockData; 191 uint32_t MsfOffset = blockToOffset(StreamLayout.Blocks[First], BlockSize); 192 if (auto EC = MsfData.readBytes(MsfOffset, BlockSize, BlockData)) 193 return EC; 194 195 BlockData = BlockData.drop_front(OffsetInFirstBlock); 196 Buffer = ArrayRef<uint8_t>(BlockData.data(), ByteSpan); 197 return Error::success(); 198 } 199 200 uint32_t MappedBlockStream::getLength() { return StreamLayout.Length; } 201 202 bool MappedBlockStream::tryReadContiguously(uint32_t Offset, uint32_t Size, 203 ArrayRef<uint8_t> &Buffer) { 204 if (Size == 0) { 205 Buffer = ArrayRef<uint8_t>(); 206 return true; 207 } 208 // Attempt to fulfill the request with a reference directly into the stream. 209 // This can work even if the request crosses a block boundary, provided that 210 // all subsequent blocks are contiguous. For example, a 10k read with a 4k 211 // block size can be filled with a reference if, from the starting offset, 212 // 3 blocks in a row are contiguous. 213 uint32_t BlockNum = Offset / BlockSize; 214 uint32_t OffsetInBlock = Offset % BlockSize; 215 uint32_t BytesFromFirstBlock = std::min(Size, BlockSize - OffsetInBlock); 216 uint32_t NumAdditionalBlocks = 217 llvm::alignTo(Size - BytesFromFirstBlock, BlockSize) / BlockSize; 218 219 uint32_t RequiredContiguousBlocks = NumAdditionalBlocks + 1; 220 uint32_t E = StreamLayout.Blocks[BlockNum]; 221 for (uint32_t I = 0; I < RequiredContiguousBlocks; ++I, ++E) { 222 if (StreamLayout.Blocks[I + BlockNum] != E) 223 return false; 224 } 225 226 // Read out the entire block where the requested offset starts. Then drop 227 // bytes from the beginning so that the actual starting byte lines up with 228 // the requested starting byte. Then, since we know this is a contiguous 229 // cross-block span, explicitly resize the ArrayRef to cover the entire 230 // request length. 231 ArrayRef<uint8_t> BlockData; 232 uint32_t FirstBlockAddr = StreamLayout.Blocks[BlockNum]; 233 uint32_t MsfOffset = blockToOffset(FirstBlockAddr, BlockSize); 234 if (auto EC = MsfData.readBytes(MsfOffset, BlockSize, BlockData)) { 235 consumeError(std::move(EC)); 236 return false; 237 } 238 BlockData = BlockData.drop_front(OffsetInBlock); 239 Buffer = ArrayRef<uint8_t>(BlockData.data(), Size); 240 return true; 241 } 242 243 Error MappedBlockStream::readBytes(uint32_t Offset, 244 MutableArrayRef<uint8_t> Buffer) { 245 uint32_t BlockNum = Offset / BlockSize; 246 uint32_t OffsetInBlock = Offset % BlockSize; 247 248 // Make sure we aren't trying to read beyond the end of the stream. 249 if (auto EC = checkOffset(Offset, Buffer.size())) 250 return EC; 251 252 uint32_t BytesLeft = Buffer.size(); 253 uint32_t BytesWritten = 0; 254 uint8_t *WriteBuffer = Buffer.data(); 255 while (BytesLeft > 0) { 256 uint32_t StreamBlockAddr = StreamLayout.Blocks[BlockNum]; 257 258 ArrayRef<uint8_t> BlockData; 259 uint32_t Offset = blockToOffset(StreamBlockAddr, BlockSize); 260 if (auto EC = MsfData.readBytes(Offset, BlockSize, BlockData)) 261 return EC; 262 263 const uint8_t *ChunkStart = BlockData.data() + OffsetInBlock; 264 uint32_t BytesInChunk = std::min(BytesLeft, BlockSize - OffsetInBlock); 265 ::memcpy(WriteBuffer + BytesWritten, ChunkStart, BytesInChunk); 266 267 BytesWritten += BytesInChunk; 268 BytesLeft -= BytesInChunk; 269 ++BlockNum; 270 OffsetInBlock = 0; 271 } 272 273 return Error::success(); 274 } 275 276 void MappedBlockStream::invalidateCache() { CacheMap.shrink_and_clear(); } 277 278 void MappedBlockStream::fixCacheAfterWrite(uint32_t Offset, 279 ArrayRef<uint8_t> Data) const { 280 // If this write overlapped a read which previously came from the pool, 281 // someone may still be holding a pointer to that alloc which is now invalid. 282 // Compute the overlapping range and update the cache entry, so any 283 // outstanding buffers are automatically updated. 284 for (const auto &MapEntry : CacheMap) { 285 // If the end of the written extent precedes the beginning of the cached 286 // extent, ignore this map entry. 287 if (Offset + Data.size() < MapEntry.first) 288 continue; 289 for (const auto &Alloc : MapEntry.second) { 290 // If the end of the cached extent precedes the beginning of the written 291 // extent, ignore this alloc. 292 if (MapEntry.first + Alloc.size() < Offset) 293 continue; 294 295 // If we get here, they are guaranteed to overlap. 296 Interval WriteInterval = std::make_pair(Offset, Offset + Data.size()); 297 Interval CachedInterval = 298 std::make_pair(MapEntry.first, MapEntry.first + Alloc.size()); 299 // If they overlap, we need to write the new data into the overlapping 300 // range. 301 auto Intersection = intersect(WriteInterval, CachedInterval); 302 assert(Intersection.first <= Intersection.second); 303 304 uint32_t Length = Intersection.second - Intersection.first; 305 uint32_t SrcOffset = 306 AbsoluteDifference(WriteInterval.first, Intersection.first); 307 uint32_t DestOffset = 308 AbsoluteDifference(CachedInterval.first, Intersection.first); 309 ::memcpy(Alloc.data() + DestOffset, Data.data() + SrcOffset, Length); 310 } 311 } 312 } 313 314 WritableMappedBlockStream::WritableMappedBlockStream( 315 uint32_t BlockSize, const MSFStreamLayout &Layout, 316 WritableBinaryStreamRef MsfData, BumpPtrAllocator &Allocator) 317 : ReadInterface(BlockSize, Layout, MsfData, Allocator), 318 WriteInterface(MsfData) {} 319 320 std::unique_ptr<WritableMappedBlockStream> 321 WritableMappedBlockStream::createStream(uint32_t BlockSize, 322 const MSFStreamLayout &Layout, 323 WritableBinaryStreamRef MsfData, 324 BumpPtrAllocator &Allocator) { 325 return llvm::make_unique<MappedBlockStreamImpl<WritableMappedBlockStream>>( 326 BlockSize, Layout, MsfData, Allocator); 327 } 328 329 std::unique_ptr<WritableMappedBlockStream> 330 WritableMappedBlockStream::createIndexedStream(const MSFLayout &Layout, 331 WritableBinaryStreamRef MsfData, 332 uint32_t StreamIndex, 333 BumpPtrAllocator &Allocator) { 334 assert(StreamIndex < Layout.StreamMap.size() && "Invalid stream index"); 335 MSFStreamLayout SL; 336 SL.Blocks = Layout.StreamMap[StreamIndex]; 337 SL.Length = Layout.StreamSizes[StreamIndex]; 338 return createStream(Layout.SB->BlockSize, SL, MsfData, Allocator); 339 } 340 341 std::unique_ptr<WritableMappedBlockStream> 342 WritableMappedBlockStream::createDirectoryStream( 343 const MSFLayout &Layout, WritableBinaryStreamRef MsfData, 344 BumpPtrAllocator &Allocator) { 345 MSFStreamLayout SL; 346 SL.Blocks = Layout.DirectoryBlocks; 347 SL.Length = Layout.SB->NumDirectoryBytes; 348 return createStream(Layout.SB->BlockSize, SL, MsfData, Allocator); 349 } 350 351 std::unique_ptr<WritableMappedBlockStream> 352 WritableMappedBlockStream::createFpmStream(const MSFLayout &Layout, 353 WritableBinaryStreamRef MsfData, 354 BumpPtrAllocator &Allocator) { 355 MSFStreamLayout SL; 356 initializeFpmStreamLayout(Layout, SL); 357 return createStream(Layout.SB->BlockSize, SL, MsfData, Allocator); 358 } 359 360 Error WritableMappedBlockStream::readBytes(uint32_t Offset, uint32_t Size, 361 ArrayRef<uint8_t> &Buffer) { 362 return ReadInterface.readBytes(Offset, Size, Buffer); 363 } 364 365 Error WritableMappedBlockStream::readLongestContiguousChunk( 366 uint32_t Offset, ArrayRef<uint8_t> &Buffer) { 367 return ReadInterface.readLongestContiguousChunk(Offset, Buffer); 368 } 369 370 uint32_t WritableMappedBlockStream::getLength() { 371 return ReadInterface.getLength(); 372 } 373 374 Error WritableMappedBlockStream::writeBytes(uint32_t Offset, 375 ArrayRef<uint8_t> Buffer) { 376 // Make sure we aren't trying to write beyond the end of the stream. 377 if (auto EC = checkOffset(Offset, Buffer.size())) 378 return EC; 379 380 uint32_t BlockNum = Offset / getBlockSize(); 381 uint32_t OffsetInBlock = Offset % getBlockSize(); 382 383 uint32_t BytesLeft = Buffer.size(); 384 uint32_t BytesWritten = 0; 385 while (BytesLeft > 0) { 386 uint32_t StreamBlockAddr = getStreamLayout().Blocks[BlockNum]; 387 uint32_t BytesToWriteInChunk = 388 std::min(BytesLeft, getBlockSize() - OffsetInBlock); 389 390 const uint8_t *Chunk = Buffer.data() + BytesWritten; 391 ArrayRef<uint8_t> ChunkData(Chunk, BytesToWriteInChunk); 392 uint32_t MsfOffset = blockToOffset(StreamBlockAddr, getBlockSize()); 393 MsfOffset += OffsetInBlock; 394 if (auto EC = WriteInterface.writeBytes(MsfOffset, ChunkData)) 395 return EC; 396 397 BytesLeft -= BytesToWriteInChunk; 398 BytesWritten += BytesToWriteInChunk; 399 ++BlockNum; 400 OffsetInBlock = 0; 401 } 402 403 ReadInterface.fixCacheAfterWrite(Offset, Buffer); 404 405 return Error::success(); 406 } 407 408 Error WritableMappedBlockStream::commit() { return WriteInterface.commit(); } 409