1 //===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h" 10 #include "llvm/ExecutionEngine/JITLink/JITLink.h" 11 #include "llvm/Support/FormatVariadic.h" 12 #include "llvm/Support/Process.h" 13 14 #define DEBUG_TYPE "jitlink" 15 16 using namespace llvm; 17 18 namespace { 19 20 // FIXME: Remove this copy of CWrapperFunctionResult as soon as JITLink can 21 // depend on shared utils from Orc. 22 23 // Must be kept in-sync with compiler-rt/lib/orc/c-api.h. 24 union CWrapperFunctionResultDataUnion { 25 char *ValuePtr; 26 char Value[sizeof(ValuePtr)]; 27 }; 28 29 // Must be kept in-sync with compiler-rt/lib/orc/c-api.h. 30 typedef struct { 31 CWrapperFunctionResultDataUnion Data; 32 size_t Size; 33 } CWrapperFunctionResult; 34 35 Error toError(CWrapperFunctionResult R) { 36 bool HasError = false; 37 std::string ErrMsg; 38 if (R.Size) { 39 bool Large = R.Size > sizeof(CWrapperFunctionResultDataUnion); 40 char *Content = Large ? R.Data.ValuePtr : R.Data.Value; 41 if (Content[0]) { 42 HasError = true; 43 ErrMsg.resize(R.Size - 1); 44 memcpy(&ErrMsg[0], Content + 1, R.Size - 1); 45 } 46 if (Large) 47 free(R.Data.ValuePtr); 48 } else if (R.Data.ValuePtr) { 49 HasError = true; 50 ErrMsg = R.Data.ValuePtr; 51 free(R.Data.ValuePtr); 52 } 53 54 if (HasError) 55 return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode()); 56 return Error::success(); 57 } 58 } // namespace 59 60 namespace llvm { 61 namespace jitlink { 62 63 JITLinkMemoryManager::~JITLinkMemoryManager() = default; 64 JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default; 65 66 static Error runAllocAction(JITLinkMemoryManager::AllocActionCall &C) { 67 using WrapperFnTy = CWrapperFunctionResult (*)(const void *, size_t); 68 auto *Fn = jitTargetAddressToPointer<WrapperFnTy>(C.FnAddr); 69 70 return toError(Fn(jitTargetAddressToPointer<const void *>(C.CtxAddr), 71 static_cast<size_t>(C.CtxSize))); 72 } 73 74 // Align a JITTargetAddress to conform with block alignment requirements. 75 static JITTargetAddress alignToBlock(JITTargetAddress Addr, Block &B) { 76 uint64_t Delta = (B.getAlignmentOffset() - Addr) % B.getAlignment(); 77 return Addr + Delta; 78 } 79 80 BasicLayout::BasicLayout(LinkGraph &G) : G(G) { 81 82 for (auto &Sec : G.sections()) { 83 // Skip empty sections. 84 if (empty(Sec.blocks())) 85 continue; 86 87 auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemDeallocPolicy()}]; 88 for (auto *B : Sec.blocks()) 89 if (LLVM_LIKELY(!B->isZeroFill())) 90 Seg.ContentBlocks.push_back(B); 91 else 92 Seg.ZeroFillBlocks.push_back(B); 93 } 94 95 // Build Segments map. 96 auto CompareBlocks = [](const Block *LHS, const Block *RHS) { 97 // Sort by section, address and size 98 if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal()) 99 return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal(); 100 if (LHS->getAddress() != RHS->getAddress()) 101 return LHS->getAddress() < RHS->getAddress(); 102 return LHS->getSize() < RHS->getSize(); 103 }; 104 105 LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n"); 106 for (auto &KV : Segments) { 107 auto &Seg = KV.second; 108 109 llvm::sort(Seg.ContentBlocks, CompareBlocks); 110 llvm::sort(Seg.ZeroFillBlocks, CompareBlocks); 111 112 for (auto *B : Seg.ContentBlocks) { 113 Seg.ContentSize = alignToBlock(Seg.ContentSize, *B); 114 Seg.ContentSize += B->getSize(); 115 Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment())); 116 } 117 118 uint64_t SegEndOffset = Seg.ContentSize; 119 for (auto *B : Seg.ZeroFillBlocks) { 120 SegEndOffset = alignToBlock(SegEndOffset, *B); 121 SegEndOffset += B->getSize(); 122 Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment())); 123 } 124 Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize; 125 126 LLVM_DEBUG({ 127 dbgs() << " Seg " << KV.first 128 << ": content-size=" << formatv("{0:x}", Seg.ContentSize) 129 << ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize) 130 << ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n"; 131 }); 132 } 133 } 134 135 Expected<BasicLayout::ContiguousPageBasedLayoutSizes> 136 BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) { 137 ContiguousPageBasedLayoutSizes SegsSizes; 138 139 for (auto &KV : segments()) { 140 auto &AG = KV.first; 141 auto &Seg = KV.second; 142 143 if (Seg.Alignment > PageSize) 144 return make_error<StringError>("Segment alignment greater than page size", 145 inconvertibleErrorCode()); 146 147 uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize); 148 if (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard) 149 SegsSizes.StandardSegs += SegSize; 150 else 151 SegsSizes.FinalizeSegs += SegSize; 152 } 153 154 return SegsSizes; 155 } 156 157 Error BasicLayout::apply() { 158 for (auto &KV : Segments) { 159 auto &Seg = KV.second; 160 161 assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) && 162 "Empty section recorded?"); 163 164 for (auto *B : Seg.ContentBlocks) { 165 // Align addr and working-mem-offset. 166 Seg.Addr = alignToBlock(Seg.Addr, *B); 167 Seg.NextWorkingMemOffset = alignToBlock(Seg.NextWorkingMemOffset, *B); 168 169 // Update block addr. 170 B->setAddress(Seg.Addr); 171 Seg.Addr += B->getSize(); 172 173 // Copy content to working memory, then update content to point at working 174 // memory. 175 memcpy(Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getContent().data(), 176 B->getSize()); 177 B->setMutableContent( 178 {Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()}); 179 Seg.NextWorkingMemOffset += B->getSize(); 180 } 181 182 for (auto *B : Seg.ZeroFillBlocks) { 183 // Align addr. 184 Seg.Addr = alignToBlock(Seg.Addr, *B); 185 // Update block addr. 186 B->setAddress(Seg.Addr); 187 Seg.Addr += B->getSize(); 188 } 189 190 Seg.ContentBlocks.clear(); 191 Seg.ZeroFillBlocks.clear(); 192 } 193 194 return Error::success(); 195 } 196 197 JITLinkMemoryManager::AllocActions &BasicLayout::graphAllocActions() { 198 return G.allocActions(); 199 } 200 201 void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, 202 const JITLinkDylib *JD, SegmentMap Segments, 203 OnCreatedFunction OnCreated) { 204 205 static_assert(AllocGroup::NumGroups == 16, 206 "AllocGroup has changed. Section names below must be updated"); 207 StringRef AGSectionNames[] = { 208 "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard", 209 "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard", 210 "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize", 211 "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"}; 212 213 auto G = 214 std::make_unique<LinkGraph>("", Triple(), 0, support::native, nullptr); 215 AllocGroupSmallMap<Block *> ContentBlocks; 216 217 JITTargetAddress NextAddr = 0x100000; 218 for (auto &KV : Segments) { 219 auto &AG = KV.first; 220 auto &Seg = KV.second; 221 222 auto AGSectionName = 223 AGSectionNames[static_cast<unsigned>(AG.getMemProt()) | 224 static_cast<bool>(AG.getMemDeallocPolicy()) << 3]; 225 226 auto &Sec = G->createSection(AGSectionName, AG.getMemProt()); 227 Sec.setMemDeallocPolicy(AG.getMemDeallocPolicy()); 228 229 if (Seg.ContentSize != 0) { 230 NextAddr = alignTo(NextAddr, Seg.ContentAlign); 231 auto &B = 232 G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize), 233 NextAddr, Seg.ContentAlign.value(), 0); 234 ContentBlocks[AG] = &B; 235 NextAddr += Seg.ContentSize; 236 } 237 } 238 239 // GRef declared separately since order-of-argument-eval isn't specified. 240 auto &GRef = *G; 241 MemMgr.allocate(JD, GRef, 242 [G = std::move(G), ContentBlocks = std::move(ContentBlocks), 243 OnCreated = std::move(OnCreated)]( 244 JITLinkMemoryManager::AllocResult Alloc) mutable { 245 if (!Alloc) 246 OnCreated(Alloc.takeError()); 247 else 248 OnCreated(SimpleSegmentAlloc(std::move(G), 249 std::move(ContentBlocks), 250 std::move(*Alloc))); 251 }); 252 } 253 254 Expected<SimpleSegmentAlloc> 255 SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD, 256 SegmentMap Segments) { 257 std::promise<MSVCPExpected<SimpleSegmentAlloc>> AllocP; 258 auto AllocF = AllocP.get_future(); 259 Create(MemMgr, JD, std::move(Segments), 260 [&](Expected<SimpleSegmentAlloc> Result) { 261 AllocP.set_value(std::move(Result)); 262 }); 263 return AllocF.get(); 264 } 265 266 SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default; 267 SimpleSegmentAlloc & 268 SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default; 269 SimpleSegmentAlloc::~SimpleSegmentAlloc() {} 270 271 SimpleSegmentAlloc::SegmentInfo SimpleSegmentAlloc::getSegInfo(AllocGroup AG) { 272 auto I = ContentBlocks.find(AG); 273 if (I != ContentBlocks.end()) { 274 auto &B = *I->second; 275 return {B.getAddress(), B.getAlreadyMutableContent()}; 276 } 277 return {}; 278 } 279 280 SimpleSegmentAlloc::SimpleSegmentAlloc( 281 std::unique_ptr<LinkGraph> G, AllocGroupSmallMap<Block *> ContentBlocks, 282 std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc) 283 : G(std::move(G)), ContentBlocks(std::move(ContentBlocks)), 284 Alloc(std::move(Alloc)) {} 285 286 class InProcessMemoryManager::IPInFlightAlloc 287 : public JITLinkMemoryManager::InFlightAlloc { 288 public: 289 IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL, 290 sys::MemoryBlock StandardSegments, 291 sys::MemoryBlock FinalizationSegments) 292 : MemMgr(MemMgr), G(G), BL(std::move(BL)), 293 StandardSegments(std::move(StandardSegments)), 294 FinalizationSegments(std::move(FinalizationSegments)) {} 295 296 void finalize(OnFinalizedFunction OnFinalized) override { 297 298 // Apply memory protections to all segments. 299 if (auto Err = applyProtections()) { 300 OnFinalized(std::move(Err)); 301 return; 302 } 303 304 // Run finalization actions. 305 // FIXME: Roll back previous successful actions on failure. 306 std::vector<AllocActionCall> DeallocActions; 307 DeallocActions.reserve(G.allocActions().size()); 308 for (auto &ActPair : G.allocActions()) { 309 if (ActPair.Finalize.FnAddr) 310 if (auto Err = runAllocAction(ActPair.Finalize)) { 311 OnFinalized(std::move(Err)); 312 return; 313 } 314 if (ActPair.Dealloc.FnAddr) 315 DeallocActions.push_back(ActPair.Dealloc); 316 } 317 G.allocActions().clear(); 318 319 // Release the finalize segments slab. 320 if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) { 321 OnFinalized(errorCodeToError(EC)); 322 return; 323 } 324 325 // Continue with finalized allocation. 326 OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments), 327 std::move(DeallocActions))); 328 } 329 330 void abandon(OnAbandonedFunction OnAbandoned) override { 331 Error Err = Error::success(); 332 if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) 333 Err = joinErrors(std::move(Err), errorCodeToError(EC)); 334 if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments)) 335 Err = joinErrors(std::move(Err), errorCodeToError(EC)); 336 OnAbandoned(std::move(Err)); 337 } 338 339 private: 340 Error applyProtections() { 341 for (auto &KV : BL.segments()) { 342 const auto &AG = KV.first; 343 auto &Seg = KV.second; 344 345 auto Prot = toSysMemoryProtectionFlags(AG.getMemProt()); 346 347 uint64_t SegSize = 348 alignTo(Seg.ContentSize + Seg.ZeroFillSize, MemMgr.PageSize); 349 sys::MemoryBlock MB(Seg.WorkingMem, SegSize); 350 if (auto EC = sys::Memory::protectMappedMemory(MB, Prot)) 351 return errorCodeToError(EC); 352 if (Prot & sys::Memory::MF_EXEC) 353 sys::Memory::InvalidateInstructionCache(MB.base(), MB.allocatedSize()); 354 } 355 return Error::success(); 356 } 357 358 InProcessMemoryManager &MemMgr; 359 LinkGraph &G; 360 BasicLayout BL; 361 sys::MemoryBlock StandardSegments; 362 sys::MemoryBlock FinalizationSegments; 363 }; 364 365 Expected<std::unique_ptr<InProcessMemoryManager>> 366 InProcessMemoryManager::Create() { 367 if (auto PageSize = sys::Process::getPageSize()) 368 return std::make_unique<InProcessMemoryManager>(*PageSize); 369 else 370 return PageSize.takeError(); 371 } 372 373 void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G, 374 OnAllocatedFunction OnAllocated) { 375 376 // FIXME: Just check this once on startup. 377 if (!isPowerOf2_64((uint64_t)PageSize)) { 378 OnAllocated(make_error<StringError>("Page size is not a power of 2", 379 inconvertibleErrorCode())); 380 return; 381 } 382 383 BasicLayout BL(G); 384 385 /// Scan the request and calculate the group and total sizes. 386 /// Check that segment size is no larger than a page. 387 auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize); 388 if (!SegsSizes) { 389 OnAllocated(SegsSizes.takeError()); 390 return; 391 } 392 393 /// Check that the total size requested (including zero fill) is not larger 394 /// than a size_t. 395 if (SegsSizes->total() > std::numeric_limits<size_t>::max()) { 396 OnAllocated(make_error<JITLinkError>( 397 "Total requested size " + formatv("{0:x}", SegsSizes->total()) + 398 " for graph " + G.getName() + " exceeds address space")); 399 return; 400 } 401 402 // Allocate one slab for the whole thing (to make sure everything is 403 // in-range), then partition into standard and finalization blocks. 404 // 405 // FIXME: Make two separate allocations in the future to reduce 406 // fragmentation: finalization segments will usually be a single page, and 407 // standard segments are likely to be more than one page. Where multiple 408 // allocations are in-flight at once (likely) the current approach will leave 409 // a lot of single-page holes. 410 sys::MemoryBlock Slab; 411 sys::MemoryBlock StandardSegsMem; 412 sys::MemoryBlock FinalizeSegsMem; 413 { 414 const sys::Memory::ProtectionFlags ReadWrite = 415 static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ | 416 sys::Memory::MF_WRITE); 417 418 std::error_code EC; 419 Slab = sys::Memory::allocateMappedMemory(SegsSizes->total(), nullptr, 420 ReadWrite, EC); 421 422 if (EC) { 423 OnAllocated(errorCodeToError(EC)); 424 return; 425 } 426 427 // Zero-fill the whole slab up-front. 428 memset(Slab.base(), 0, Slab.allocatedSize()); 429 430 StandardSegsMem = {Slab.base(), 431 static_cast<size_t>(SegsSizes->StandardSegs)}; 432 FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs), 433 static_cast<size_t>(SegsSizes->FinalizeSegs)}; 434 } 435 436 auto NextStandardSegAddr = pointerToJITTargetAddress(StandardSegsMem.base()); 437 auto NextFinalizeSegAddr = pointerToJITTargetAddress(FinalizeSegsMem.base()); 438 439 LLVM_DEBUG({ 440 dbgs() << "InProcessMemoryManager allocated:\n"; 441 if (SegsSizes->StandardSegs) 442 dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextStandardSegAddr, 443 NextStandardSegAddr + StandardSegsMem.allocatedSize()) 444 << " to stardard segs\n"; 445 else 446 dbgs() << " no standard segs\n"; 447 if (SegsSizes->FinalizeSegs) 448 dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr, 449 NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize()) 450 << " to finalize segs\n"; 451 else 452 dbgs() << " no finalize segs\n"; 453 }); 454 455 // Build ProtMap, assign addresses. 456 for (auto &KV : BL.segments()) { 457 auto &AG = KV.first; 458 auto &Seg = KV.second; 459 460 auto &SegAddr = (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard) 461 ? NextStandardSegAddr 462 : NextFinalizeSegAddr; 463 464 Seg.WorkingMem = jitTargetAddressToPointer<char *>(SegAddr); 465 Seg.Addr = SegAddr; 466 467 SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize); 468 } 469 470 if (auto Err = BL.apply()) { 471 OnAllocated(std::move(Err)); 472 return; 473 } 474 475 OnAllocated(std::make_unique<IPInFlightAlloc>(*this, G, std::move(BL), 476 std::move(StandardSegsMem), 477 std::move(FinalizeSegsMem))); 478 } 479 480 void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs, 481 OnDeallocatedFunction OnDeallocated) { 482 std::vector<sys::MemoryBlock> StandardSegmentsList; 483 std::vector<std::vector<AllocActionCall>> DeallocActionsList; 484 485 { 486 std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex); 487 for (auto &Alloc : Allocs) { 488 auto *FA = 489 jitTargetAddressToPointer<FinalizedAllocInfo *>(Alloc.release()); 490 StandardSegmentsList.push_back(std::move(FA->StandardSegments)); 491 if (!FA->DeallocActions.empty()) 492 DeallocActionsList.push_back(std::move(FA->DeallocActions)); 493 FA->~FinalizedAllocInfo(); 494 FinalizedAllocInfos.Deallocate(FA); 495 } 496 } 497 498 Error DeallocErr = Error::success(); 499 500 while (!DeallocActionsList.empty()) { 501 auto &DeallocActions = DeallocActionsList.back(); 502 auto &StandardSegments = StandardSegmentsList.back(); 503 504 /// Run any deallocate calls. 505 while (!DeallocActions.empty()) { 506 if (auto Err = runAllocAction(DeallocActions.back())) 507 DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err)); 508 DeallocActions.pop_back(); 509 } 510 511 /// Release the standard segments slab. 512 if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments)) 513 DeallocErr = joinErrors(std::move(DeallocErr), errorCodeToError(EC)); 514 515 DeallocActionsList.pop_back(); 516 StandardSegmentsList.pop_back(); 517 } 518 519 OnDeallocated(std::move(DeallocErr)); 520 } 521 522 JITLinkMemoryManager::FinalizedAlloc 523 InProcessMemoryManager::createFinalizedAlloc( 524 sys::MemoryBlock StandardSegments, 525 std::vector<AllocActionCall> DeallocActions) { 526 std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex); 527 auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>(); 528 new (FA) FinalizedAllocInfo( 529 {std::move(StandardSegments), std::move(DeallocActions)}); 530 return FinalizedAlloc(pointerToJITTargetAddress(FA)); 531 } 532 533 } // end namespace jitlink 534 } // end namespace llvm 535