1 //===------ BPFAbstractMemberAccess.cpp - Abstracting Member Accesses -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass abstracted struct/union member accesses in order to support 10 // compile-once run-everywhere (CO-RE). The CO-RE intends to compile the program 11 // which can run on different kernels. In particular, if bpf program tries to 12 // access a particular kernel data structure member, the details of the 13 // intermediate member access will be remembered so bpf loader can do 14 // necessary adjustment right before program loading. 15 // 16 // For example, 17 // 18 // struct s { 19 // int a; 20 // int b; 21 // }; 22 // struct t { 23 // struct s c; 24 // int d; 25 // }; 26 // struct t e; 27 // 28 // For the member access e.c.b, the compiler will generate code 29 // &e + 4 30 // 31 // The compile-once run-everywhere instead generates the following code 32 // r = 4 33 // &e + r 34 // The "4" in "r = 4" can be changed based on a particular kernel version. 35 // For example, on a particular kernel version, if struct s is changed to 36 // 37 // struct s { 38 // int new_field; 39 // int a; 40 // int b; 41 // } 42 // 43 // By repeating the member access on the host, the bpf loader can 44 // adjust "r = 4" as "r = 8". 45 // 46 // This feature relies on the following three intrinsic calls: 47 // addr = preserve_array_access_index(base, dimension, index) 48 // addr = preserve_union_access_index(base, di_index) 49 // !llvm.preserve.access.index <union_ditype> 50 // addr = preserve_struct_access_index(base, gep_index, di_index) 51 // !llvm.preserve.access.index <struct_ditype> 52 // 53 // Bitfield member access needs special attention. User cannot take the 54 // address of a bitfield acceess. To facilitate kernel verifier 55 // for easy bitfield code optimization, a new clang intrinsic is introduced: 56 // uint32_t __builtin_preserve_field_info(member_access, info_kind) 57 // In IR, a chain with two (or more) intrinsic calls will be generated: 58 // ... 59 // addr = preserve_struct_access_index(base, 1, 1) !struct s 60 // uint32_t result = bpf_preserve_field_info(addr, info_kind) 61 // 62 // Suppose the info_kind is FIELD_SIGNEDNESS, 63 // The above two IR intrinsics will be replaced with 64 // a relocatable insn: 65 // signness = /* signness of member_access */ 66 // and signness can be changed by bpf loader based on the 67 // types on the host. 68 // 69 // User can also test whether a field exists or not with 70 // uint32_t result = bpf_preserve_field_info(member_access, FIELD_EXISTENCE) 71 // The field will be always available (result = 1) during initial 72 // compilation, but bpf loader can patch with the correct value 73 // on the target host where the member_access may or may not be available 74 // 75 //===----------------------------------------------------------------------===// 76 77 #include "BPF.h" 78 #include "BPFCORE.h" 79 #include "BPFTargetMachine.h" 80 #include "llvm/IR/DebugInfoMetadata.h" 81 #include "llvm/IR/GlobalVariable.h" 82 #include "llvm/IR/Instruction.h" 83 #include "llvm/IR/Instructions.h" 84 #include "llvm/IR/IntrinsicsBPF.h" 85 #include "llvm/IR/Module.h" 86 #include "llvm/IR/PassManager.h" 87 #include "llvm/IR/Type.h" 88 #include "llvm/IR/User.h" 89 #include "llvm/IR/Value.h" 90 #include "llvm/Pass.h" 91 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 92 #include <stack> 93 94 #define DEBUG_TYPE "bpf-abstract-member-access" 95 96 namespace llvm { 97 constexpr StringRef BPFCoreSharedInfo::AmaAttr; 98 uint32_t BPFCoreSharedInfo::SeqNum; 99 100 Instruction *BPFCoreSharedInfo::insertPassThrough(Module *M, BasicBlock *BB, 101 Instruction *Input, 102 Instruction *Before) { 103 Function *Fn = Intrinsic::getDeclaration( 104 M, Intrinsic::bpf_passthrough, {Input->getType(), Input->getType()}); 105 Constant *SeqNumVal = ConstantInt::get(Type::getInt32Ty(BB->getContext()), 106 BPFCoreSharedInfo::SeqNum++); 107 108 auto *NewInst = CallInst::Create(Fn, {SeqNumVal, Input}); 109 BB->getInstList().insert(Before->getIterator(), NewInst); 110 return NewInst; 111 } 112 } // namespace llvm 113 114 using namespace llvm; 115 116 namespace { 117 class BPFAbstractMemberAccess final { 118 public: 119 BPFAbstractMemberAccess(BPFTargetMachine *TM) : TM(TM) {} 120 121 bool run(Function &F); 122 123 struct CallInfo { 124 uint32_t Kind; 125 uint32_t AccessIndex; 126 Align RecordAlignment; 127 MDNode *Metadata; 128 Value *Base; 129 }; 130 typedef std::stack<std::pair<CallInst *, CallInfo>> CallInfoStack; 131 132 private: 133 enum : uint32_t { 134 BPFPreserveArrayAI = 1, 135 BPFPreserveUnionAI = 2, 136 BPFPreserveStructAI = 3, 137 BPFPreserveFieldInfoAI = 4, 138 }; 139 140 TargetMachine *TM; 141 const DataLayout *DL = nullptr; 142 Module *M = nullptr; 143 144 static std::map<std::string, GlobalVariable *> GEPGlobals; 145 // A map to link preserve_*_access_index instrinsic calls. 146 std::map<CallInst *, std::pair<CallInst *, CallInfo>> AIChain; 147 // A map to hold all the base preserve_*_access_index instrinsic calls. 148 // The base call is not an input of any other preserve_* 149 // intrinsics. 150 std::map<CallInst *, CallInfo> BaseAICalls; 151 152 bool doTransformation(Function &F); 153 154 void traceAICall(CallInst *Call, CallInfo &ParentInfo); 155 void traceBitCast(BitCastInst *BitCast, CallInst *Parent, 156 CallInfo &ParentInfo); 157 void traceGEP(GetElementPtrInst *GEP, CallInst *Parent, 158 CallInfo &ParentInfo); 159 void collectAICallChains(Function &F); 160 161 bool IsPreserveDIAccessIndexCall(const CallInst *Call, CallInfo &Cinfo); 162 bool IsValidAIChain(const MDNode *ParentMeta, uint32_t ParentAI, 163 const MDNode *ChildMeta); 164 bool removePreserveAccessIndexIntrinsic(Function &F); 165 void replaceWithGEP(std::vector<CallInst *> &CallList, 166 uint32_t NumOfZerosIndex, uint32_t DIIndex); 167 bool HasPreserveFieldInfoCall(CallInfoStack &CallStack); 168 void GetStorageBitRange(DIDerivedType *MemberTy, Align RecordAlignment, 169 uint32_t &StartBitOffset, uint32_t &EndBitOffset); 170 uint32_t GetFieldInfo(uint32_t InfoKind, DICompositeType *CTy, 171 uint32_t AccessIndex, uint32_t PatchImm, 172 Align RecordAlignment); 173 174 Value *computeBaseAndAccessKey(CallInst *Call, CallInfo &CInfo, 175 std::string &AccessKey, MDNode *&BaseMeta); 176 MDNode *computeAccessKey(CallInst *Call, CallInfo &CInfo, 177 std::string &AccessKey, bool &IsInt32Ret); 178 uint64_t getConstant(const Value *IndexValue); 179 bool transformGEPChain(CallInst *Call, CallInfo &CInfo); 180 }; 181 182 std::map<std::string, GlobalVariable *> BPFAbstractMemberAccess::GEPGlobals; 183 184 class BPFAbstractMemberAccessLegacyPass final : public FunctionPass { 185 BPFTargetMachine *TM; 186 187 bool runOnFunction(Function &F) override { 188 return BPFAbstractMemberAccess(TM).run(F); 189 } 190 191 public: 192 static char ID; 193 194 // Add optional BPFTargetMachine parameter so that BPF backend can add the 195 // phase with target machine to find out the endianness. The default 196 // constructor (without parameters) is used by the pass manager for managing 197 // purposes. 198 BPFAbstractMemberAccessLegacyPass(BPFTargetMachine *TM = nullptr) 199 : FunctionPass(ID), TM(TM) {} 200 }; 201 202 } // End anonymous namespace 203 204 char BPFAbstractMemberAccessLegacyPass::ID = 0; 205 INITIALIZE_PASS(BPFAbstractMemberAccessLegacyPass, DEBUG_TYPE, 206 "BPF Abstract Member Access", false, false) 207 208 FunctionPass *llvm::createBPFAbstractMemberAccess(BPFTargetMachine *TM) { 209 return new BPFAbstractMemberAccessLegacyPass(TM); 210 } 211 212 bool BPFAbstractMemberAccess::run(Function &F) { 213 LLVM_DEBUG(dbgs() << "********** Abstract Member Accesses **********\n"); 214 215 M = F.getParent(); 216 if (!M) 217 return false; 218 219 // Bail out if no debug info. 220 if (M->debug_compile_units().empty()) 221 return false; 222 223 DL = &M->getDataLayout(); 224 return doTransformation(F); 225 } 226 227 static bool SkipDIDerivedTag(unsigned Tag, bool skipTypedef) { 228 if (Tag != dwarf::DW_TAG_typedef && Tag != dwarf::DW_TAG_const_type && 229 Tag != dwarf::DW_TAG_volatile_type && 230 Tag != dwarf::DW_TAG_restrict_type && 231 Tag != dwarf::DW_TAG_member) 232 return false; 233 if (Tag == dwarf::DW_TAG_typedef && !skipTypedef) 234 return false; 235 return true; 236 } 237 238 static DIType * stripQualifiers(DIType *Ty, bool skipTypedef = true) { 239 while (auto *DTy = dyn_cast<DIDerivedType>(Ty)) { 240 if (!SkipDIDerivedTag(DTy->getTag(), skipTypedef)) 241 break; 242 Ty = DTy->getBaseType(); 243 } 244 return Ty; 245 } 246 247 static const DIType * stripQualifiers(const DIType *Ty) { 248 while (auto *DTy = dyn_cast<DIDerivedType>(Ty)) { 249 if (!SkipDIDerivedTag(DTy->getTag(), true)) 250 break; 251 Ty = DTy->getBaseType(); 252 } 253 return Ty; 254 } 255 256 static uint32_t calcArraySize(const DICompositeType *CTy, uint32_t StartDim) { 257 DINodeArray Elements = CTy->getElements(); 258 uint32_t DimSize = 1; 259 for (uint32_t I = StartDim; I < Elements.size(); ++I) { 260 if (auto *Element = dyn_cast_or_null<DINode>(Elements[I])) 261 if (Element->getTag() == dwarf::DW_TAG_subrange_type) { 262 const DISubrange *SR = cast<DISubrange>(Element); 263 auto *CI = SR->getCount().dyn_cast<ConstantInt *>(); 264 DimSize *= CI->getSExtValue(); 265 } 266 } 267 268 return DimSize; 269 } 270 271 /// Check whether a call is a preserve_*_access_index intrinsic call or not. 272 bool BPFAbstractMemberAccess::IsPreserveDIAccessIndexCall(const CallInst *Call, 273 CallInfo &CInfo) { 274 if (!Call) 275 return false; 276 277 const auto *GV = dyn_cast<GlobalValue>(Call->getCalledOperand()); 278 if (!GV) 279 return false; 280 if (GV->getName().startswith("llvm.preserve.array.access.index")) { 281 CInfo.Kind = BPFPreserveArrayAI; 282 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index); 283 if (!CInfo.Metadata) 284 report_fatal_error("Missing metadata for llvm.preserve.array.access.index intrinsic"); 285 CInfo.AccessIndex = getConstant(Call->getArgOperand(2)); 286 CInfo.Base = Call->getArgOperand(0); 287 CInfo.RecordAlignment = 288 DL->getABITypeAlign(CInfo.Base->getType()->getPointerElementType()); 289 return true; 290 } 291 if (GV->getName().startswith("llvm.preserve.union.access.index")) { 292 CInfo.Kind = BPFPreserveUnionAI; 293 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index); 294 if (!CInfo.Metadata) 295 report_fatal_error("Missing metadata for llvm.preserve.union.access.index intrinsic"); 296 CInfo.AccessIndex = getConstant(Call->getArgOperand(1)); 297 CInfo.Base = Call->getArgOperand(0); 298 CInfo.RecordAlignment = 299 DL->getABITypeAlign(CInfo.Base->getType()->getPointerElementType()); 300 return true; 301 } 302 if (GV->getName().startswith("llvm.preserve.struct.access.index")) { 303 CInfo.Kind = BPFPreserveStructAI; 304 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index); 305 if (!CInfo.Metadata) 306 report_fatal_error("Missing metadata for llvm.preserve.struct.access.index intrinsic"); 307 CInfo.AccessIndex = getConstant(Call->getArgOperand(2)); 308 CInfo.Base = Call->getArgOperand(0); 309 CInfo.RecordAlignment = 310 DL->getABITypeAlign(CInfo.Base->getType()->getPointerElementType()); 311 return true; 312 } 313 if (GV->getName().startswith("llvm.bpf.preserve.field.info")) { 314 CInfo.Kind = BPFPreserveFieldInfoAI; 315 CInfo.Metadata = nullptr; 316 // Check validity of info_kind as clang did not check this. 317 uint64_t InfoKind = getConstant(Call->getArgOperand(1)); 318 if (InfoKind >= BPFCoreSharedInfo::MAX_FIELD_RELOC_KIND) 319 report_fatal_error("Incorrect info_kind for llvm.bpf.preserve.field.info intrinsic"); 320 CInfo.AccessIndex = InfoKind; 321 return true; 322 } 323 if (GV->getName().startswith("llvm.bpf.preserve.type.info")) { 324 CInfo.Kind = BPFPreserveFieldInfoAI; 325 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index); 326 if (!CInfo.Metadata) 327 report_fatal_error("Missing metadata for llvm.preserve.type.info intrinsic"); 328 uint64_t Flag = getConstant(Call->getArgOperand(1)); 329 if (Flag >= BPFCoreSharedInfo::MAX_PRESERVE_TYPE_INFO_FLAG) 330 report_fatal_error("Incorrect flag for llvm.bpf.preserve.type.info intrinsic"); 331 if (Flag == BPFCoreSharedInfo::PRESERVE_TYPE_INFO_EXISTENCE) 332 CInfo.AccessIndex = BPFCoreSharedInfo::TYPE_EXISTENCE; 333 else 334 CInfo.AccessIndex = BPFCoreSharedInfo::TYPE_SIZE; 335 return true; 336 } 337 if (GV->getName().startswith("llvm.bpf.preserve.enum.value")) { 338 CInfo.Kind = BPFPreserveFieldInfoAI; 339 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index); 340 if (!CInfo.Metadata) 341 report_fatal_error("Missing metadata for llvm.preserve.enum.value intrinsic"); 342 uint64_t Flag = getConstant(Call->getArgOperand(2)); 343 if (Flag >= BPFCoreSharedInfo::MAX_PRESERVE_ENUM_VALUE_FLAG) 344 report_fatal_error("Incorrect flag for llvm.bpf.preserve.enum.value intrinsic"); 345 if (Flag == BPFCoreSharedInfo::PRESERVE_ENUM_VALUE_EXISTENCE) 346 CInfo.AccessIndex = BPFCoreSharedInfo::ENUM_VALUE_EXISTENCE; 347 else 348 CInfo.AccessIndex = BPFCoreSharedInfo::ENUM_VALUE; 349 return true; 350 } 351 352 return false; 353 } 354 355 void BPFAbstractMemberAccess::replaceWithGEP(std::vector<CallInst *> &CallList, 356 uint32_t DimensionIndex, 357 uint32_t GEPIndex) { 358 for (auto Call : CallList) { 359 uint32_t Dimension = 1; 360 if (DimensionIndex > 0) 361 Dimension = getConstant(Call->getArgOperand(DimensionIndex)); 362 363 Constant *Zero = 364 ConstantInt::get(Type::getInt32Ty(Call->getParent()->getContext()), 0); 365 SmallVector<Value *, 4> IdxList; 366 for (unsigned I = 0; I < Dimension; ++I) 367 IdxList.push_back(Zero); 368 IdxList.push_back(Call->getArgOperand(GEPIndex)); 369 370 auto *GEP = GetElementPtrInst::CreateInBounds( 371 Call->getArgOperand(0)->getType()->getPointerElementType(), 372 Call->getArgOperand(0), IdxList, "", Call); 373 Call->replaceAllUsesWith(GEP); 374 Call->eraseFromParent(); 375 } 376 } 377 378 bool BPFAbstractMemberAccess::removePreserveAccessIndexIntrinsic(Function &F) { 379 std::vector<CallInst *> PreserveArrayIndexCalls; 380 std::vector<CallInst *> PreserveUnionIndexCalls; 381 std::vector<CallInst *> PreserveStructIndexCalls; 382 bool Found = false; 383 384 for (auto &BB : F) 385 for (auto &I : BB) { 386 auto *Call = dyn_cast<CallInst>(&I); 387 CallInfo CInfo; 388 if (!IsPreserveDIAccessIndexCall(Call, CInfo)) 389 continue; 390 391 Found = true; 392 if (CInfo.Kind == BPFPreserveArrayAI) 393 PreserveArrayIndexCalls.push_back(Call); 394 else if (CInfo.Kind == BPFPreserveUnionAI) 395 PreserveUnionIndexCalls.push_back(Call); 396 else 397 PreserveStructIndexCalls.push_back(Call); 398 } 399 400 // do the following transformation: 401 // . addr = preserve_array_access_index(base, dimension, index) 402 // is transformed to 403 // addr = GEP(base, dimenion's zero's, index) 404 // . addr = preserve_union_access_index(base, di_index) 405 // is transformed to 406 // addr = base, i.e., all usages of "addr" are replaced by "base". 407 // . addr = preserve_struct_access_index(base, gep_index, di_index) 408 // is transformed to 409 // addr = GEP(base, 0, gep_index) 410 replaceWithGEP(PreserveArrayIndexCalls, 1, 2); 411 replaceWithGEP(PreserveStructIndexCalls, 0, 1); 412 for (auto Call : PreserveUnionIndexCalls) { 413 Call->replaceAllUsesWith(Call->getArgOperand(0)); 414 Call->eraseFromParent(); 415 } 416 417 return Found; 418 } 419 420 /// Check whether the access index chain is valid. We check 421 /// here because there may be type casts between two 422 /// access indexes. We want to ensure memory access still valid. 423 bool BPFAbstractMemberAccess::IsValidAIChain(const MDNode *ParentType, 424 uint32_t ParentAI, 425 const MDNode *ChildType) { 426 if (!ChildType) 427 return true; // preserve_field_info, no type comparison needed. 428 429 const DIType *PType = stripQualifiers(cast<DIType>(ParentType)); 430 const DIType *CType = stripQualifiers(cast<DIType>(ChildType)); 431 432 // Child is a derived/pointer type, which is due to type casting. 433 // Pointer type cannot be in the middle of chain. 434 if (isa<DIDerivedType>(CType)) 435 return false; 436 437 // Parent is a pointer type. 438 if (const auto *PtrTy = dyn_cast<DIDerivedType>(PType)) { 439 if (PtrTy->getTag() != dwarf::DW_TAG_pointer_type) 440 return false; 441 return stripQualifiers(PtrTy->getBaseType()) == CType; 442 } 443 444 // Otherwise, struct/union/array types 445 const auto *PTy = dyn_cast<DICompositeType>(PType); 446 const auto *CTy = dyn_cast<DICompositeType>(CType); 447 assert(PTy && CTy && "ParentType or ChildType is null or not composite"); 448 449 uint32_t PTyTag = PTy->getTag(); 450 assert(PTyTag == dwarf::DW_TAG_array_type || 451 PTyTag == dwarf::DW_TAG_structure_type || 452 PTyTag == dwarf::DW_TAG_union_type); 453 454 uint32_t CTyTag = CTy->getTag(); 455 assert(CTyTag == dwarf::DW_TAG_array_type || 456 CTyTag == dwarf::DW_TAG_structure_type || 457 CTyTag == dwarf::DW_TAG_union_type); 458 459 // Multi dimensional arrays, base element should be the same 460 if (PTyTag == dwarf::DW_TAG_array_type && PTyTag == CTyTag) 461 return PTy->getBaseType() == CTy->getBaseType(); 462 463 DIType *Ty; 464 if (PTyTag == dwarf::DW_TAG_array_type) 465 Ty = PTy->getBaseType(); 466 else 467 Ty = dyn_cast<DIType>(PTy->getElements()[ParentAI]); 468 469 return dyn_cast<DICompositeType>(stripQualifiers(Ty)) == CTy; 470 } 471 472 void BPFAbstractMemberAccess::traceAICall(CallInst *Call, 473 CallInfo &ParentInfo) { 474 for (User *U : Call->users()) { 475 Instruction *Inst = dyn_cast<Instruction>(U); 476 if (!Inst) 477 continue; 478 479 if (auto *BI = dyn_cast<BitCastInst>(Inst)) { 480 traceBitCast(BI, Call, ParentInfo); 481 } else if (auto *CI = dyn_cast<CallInst>(Inst)) { 482 CallInfo ChildInfo; 483 484 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) && 485 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex, 486 ChildInfo.Metadata)) { 487 AIChain[CI] = std::make_pair(Call, ParentInfo); 488 traceAICall(CI, ChildInfo); 489 } else { 490 BaseAICalls[Call] = ParentInfo; 491 } 492 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) { 493 if (GI->hasAllZeroIndices()) 494 traceGEP(GI, Call, ParentInfo); 495 else 496 BaseAICalls[Call] = ParentInfo; 497 } else { 498 BaseAICalls[Call] = ParentInfo; 499 } 500 } 501 } 502 503 void BPFAbstractMemberAccess::traceBitCast(BitCastInst *BitCast, 504 CallInst *Parent, 505 CallInfo &ParentInfo) { 506 for (User *U : BitCast->users()) { 507 Instruction *Inst = dyn_cast<Instruction>(U); 508 if (!Inst) 509 continue; 510 511 if (auto *BI = dyn_cast<BitCastInst>(Inst)) { 512 traceBitCast(BI, Parent, ParentInfo); 513 } else if (auto *CI = dyn_cast<CallInst>(Inst)) { 514 CallInfo ChildInfo; 515 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) && 516 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex, 517 ChildInfo.Metadata)) { 518 AIChain[CI] = std::make_pair(Parent, ParentInfo); 519 traceAICall(CI, ChildInfo); 520 } else { 521 BaseAICalls[Parent] = ParentInfo; 522 } 523 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) { 524 if (GI->hasAllZeroIndices()) 525 traceGEP(GI, Parent, ParentInfo); 526 else 527 BaseAICalls[Parent] = ParentInfo; 528 } else { 529 BaseAICalls[Parent] = ParentInfo; 530 } 531 } 532 } 533 534 void BPFAbstractMemberAccess::traceGEP(GetElementPtrInst *GEP, CallInst *Parent, 535 CallInfo &ParentInfo) { 536 for (User *U : GEP->users()) { 537 Instruction *Inst = dyn_cast<Instruction>(U); 538 if (!Inst) 539 continue; 540 541 if (auto *BI = dyn_cast<BitCastInst>(Inst)) { 542 traceBitCast(BI, Parent, ParentInfo); 543 } else if (auto *CI = dyn_cast<CallInst>(Inst)) { 544 CallInfo ChildInfo; 545 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) && 546 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex, 547 ChildInfo.Metadata)) { 548 AIChain[CI] = std::make_pair(Parent, ParentInfo); 549 traceAICall(CI, ChildInfo); 550 } else { 551 BaseAICalls[Parent] = ParentInfo; 552 } 553 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) { 554 if (GI->hasAllZeroIndices()) 555 traceGEP(GI, Parent, ParentInfo); 556 else 557 BaseAICalls[Parent] = ParentInfo; 558 } else { 559 BaseAICalls[Parent] = ParentInfo; 560 } 561 } 562 } 563 564 void BPFAbstractMemberAccess::collectAICallChains(Function &F) { 565 AIChain.clear(); 566 BaseAICalls.clear(); 567 568 for (auto &BB : F) 569 for (auto &I : BB) { 570 CallInfo CInfo; 571 auto *Call = dyn_cast<CallInst>(&I); 572 if (!IsPreserveDIAccessIndexCall(Call, CInfo) || 573 AIChain.find(Call) != AIChain.end()) 574 continue; 575 576 traceAICall(Call, CInfo); 577 } 578 } 579 580 uint64_t BPFAbstractMemberAccess::getConstant(const Value *IndexValue) { 581 const ConstantInt *CV = dyn_cast<ConstantInt>(IndexValue); 582 assert(CV); 583 return CV->getValue().getZExtValue(); 584 } 585 586 /// Get the start and the end of storage offset for \p MemberTy. 587 void BPFAbstractMemberAccess::GetStorageBitRange(DIDerivedType *MemberTy, 588 Align RecordAlignment, 589 uint32_t &StartBitOffset, 590 uint32_t &EndBitOffset) { 591 uint32_t MemberBitSize = MemberTy->getSizeInBits(); 592 uint32_t MemberBitOffset = MemberTy->getOffsetInBits(); 593 uint32_t AlignBits = RecordAlignment.value() * 8; 594 if (RecordAlignment > 8 || MemberBitSize > AlignBits) 595 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info, " 596 "requiring too big alignment"); 597 598 StartBitOffset = MemberBitOffset & ~(AlignBits - 1); 599 if ((StartBitOffset + AlignBits) < (MemberBitOffset + MemberBitSize)) 600 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info, " 601 "cross alignment boundary"); 602 EndBitOffset = StartBitOffset + AlignBits; 603 } 604 605 uint32_t BPFAbstractMemberAccess::GetFieldInfo(uint32_t InfoKind, 606 DICompositeType *CTy, 607 uint32_t AccessIndex, 608 uint32_t PatchImm, 609 Align RecordAlignment) { 610 if (InfoKind == BPFCoreSharedInfo::FIELD_EXISTENCE) 611 return 1; 612 613 uint32_t Tag = CTy->getTag(); 614 if (InfoKind == BPFCoreSharedInfo::FIELD_BYTE_OFFSET) { 615 if (Tag == dwarf::DW_TAG_array_type) { 616 auto *EltTy = stripQualifiers(CTy->getBaseType()); 617 PatchImm += AccessIndex * calcArraySize(CTy, 1) * 618 (EltTy->getSizeInBits() >> 3); 619 } else if (Tag == dwarf::DW_TAG_structure_type) { 620 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]); 621 if (!MemberTy->isBitField()) { 622 PatchImm += MemberTy->getOffsetInBits() >> 3; 623 } else { 624 unsigned SBitOffset, NextSBitOffset; 625 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, 626 NextSBitOffset); 627 PatchImm += SBitOffset >> 3; 628 } 629 } 630 return PatchImm; 631 } 632 633 if (InfoKind == BPFCoreSharedInfo::FIELD_BYTE_SIZE) { 634 if (Tag == dwarf::DW_TAG_array_type) { 635 auto *EltTy = stripQualifiers(CTy->getBaseType()); 636 return calcArraySize(CTy, 1) * (EltTy->getSizeInBits() >> 3); 637 } else { 638 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]); 639 uint32_t SizeInBits = MemberTy->getSizeInBits(); 640 if (!MemberTy->isBitField()) 641 return SizeInBits >> 3; 642 643 unsigned SBitOffset, NextSBitOffset; 644 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, NextSBitOffset); 645 SizeInBits = NextSBitOffset - SBitOffset; 646 if (SizeInBits & (SizeInBits - 1)) 647 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info"); 648 return SizeInBits >> 3; 649 } 650 } 651 652 if (InfoKind == BPFCoreSharedInfo::FIELD_SIGNEDNESS) { 653 const DIType *BaseTy; 654 if (Tag == dwarf::DW_TAG_array_type) { 655 // Signedness only checked when final array elements are accessed. 656 if (CTy->getElements().size() != 1) 657 report_fatal_error("Invalid array expression for llvm.bpf.preserve.field.info"); 658 BaseTy = stripQualifiers(CTy->getBaseType()); 659 } else { 660 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]); 661 BaseTy = stripQualifiers(MemberTy->getBaseType()); 662 } 663 664 // Only basic types and enum types have signedness. 665 const auto *BTy = dyn_cast<DIBasicType>(BaseTy); 666 while (!BTy) { 667 const auto *CompTy = dyn_cast<DICompositeType>(BaseTy); 668 // Report an error if the field expression does not have signedness. 669 if (!CompTy || CompTy->getTag() != dwarf::DW_TAG_enumeration_type) 670 report_fatal_error("Invalid field expression for llvm.bpf.preserve.field.info"); 671 BaseTy = stripQualifiers(CompTy->getBaseType()); 672 BTy = dyn_cast<DIBasicType>(BaseTy); 673 } 674 uint32_t Encoding = BTy->getEncoding(); 675 return (Encoding == dwarf::DW_ATE_signed || Encoding == dwarf::DW_ATE_signed_char); 676 } 677 678 if (InfoKind == BPFCoreSharedInfo::FIELD_LSHIFT_U64) { 679 // The value is loaded into a value with FIELD_BYTE_SIZE size, 680 // and then zero or sign extended to U64. 681 // FIELD_LSHIFT_U64 and FIELD_RSHIFT_U64 are operations 682 // to extract the original value. 683 const Triple &Triple = TM->getTargetTriple(); 684 DIDerivedType *MemberTy = nullptr; 685 bool IsBitField = false; 686 uint32_t SizeInBits; 687 688 if (Tag == dwarf::DW_TAG_array_type) { 689 auto *EltTy = stripQualifiers(CTy->getBaseType()); 690 SizeInBits = calcArraySize(CTy, 1) * EltTy->getSizeInBits(); 691 } else { 692 MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]); 693 SizeInBits = MemberTy->getSizeInBits(); 694 IsBitField = MemberTy->isBitField(); 695 } 696 697 if (!IsBitField) { 698 if (SizeInBits > 64) 699 report_fatal_error("too big field size for llvm.bpf.preserve.field.info"); 700 return 64 - SizeInBits; 701 } 702 703 unsigned SBitOffset, NextSBitOffset; 704 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, NextSBitOffset); 705 if (NextSBitOffset - SBitOffset > 64) 706 report_fatal_error("too big field size for llvm.bpf.preserve.field.info"); 707 708 unsigned OffsetInBits = MemberTy->getOffsetInBits(); 709 if (Triple.getArch() == Triple::bpfel) 710 return SBitOffset + 64 - OffsetInBits - SizeInBits; 711 else 712 return OffsetInBits + 64 - NextSBitOffset; 713 } 714 715 if (InfoKind == BPFCoreSharedInfo::FIELD_RSHIFT_U64) { 716 DIDerivedType *MemberTy = nullptr; 717 bool IsBitField = false; 718 uint32_t SizeInBits; 719 if (Tag == dwarf::DW_TAG_array_type) { 720 auto *EltTy = stripQualifiers(CTy->getBaseType()); 721 SizeInBits = calcArraySize(CTy, 1) * EltTy->getSizeInBits(); 722 } else { 723 MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]); 724 SizeInBits = MemberTy->getSizeInBits(); 725 IsBitField = MemberTy->isBitField(); 726 } 727 728 if (!IsBitField) { 729 if (SizeInBits > 64) 730 report_fatal_error("too big field size for llvm.bpf.preserve.field.info"); 731 return 64 - SizeInBits; 732 } 733 734 unsigned SBitOffset, NextSBitOffset; 735 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, NextSBitOffset); 736 if (NextSBitOffset - SBitOffset > 64) 737 report_fatal_error("too big field size for llvm.bpf.preserve.field.info"); 738 739 return 64 - SizeInBits; 740 } 741 742 llvm_unreachable("Unknown llvm.bpf.preserve.field.info info kind"); 743 } 744 745 bool BPFAbstractMemberAccess::HasPreserveFieldInfoCall(CallInfoStack &CallStack) { 746 // This is called in error return path, no need to maintain CallStack. 747 while (CallStack.size()) { 748 auto StackElem = CallStack.top(); 749 if (StackElem.second.Kind == BPFPreserveFieldInfoAI) 750 return true; 751 CallStack.pop(); 752 } 753 return false; 754 } 755 756 /// Compute the base of the whole preserve_* intrinsics chains, i.e., the base 757 /// pointer of the first preserve_*_access_index call, and construct the access 758 /// string, which will be the name of a global variable. 759 Value *BPFAbstractMemberAccess::computeBaseAndAccessKey(CallInst *Call, 760 CallInfo &CInfo, 761 std::string &AccessKey, 762 MDNode *&TypeMeta) { 763 Value *Base = nullptr; 764 std::string TypeName; 765 CallInfoStack CallStack; 766 767 // Put the access chain into a stack with the top as the head of the chain. 768 while (Call) { 769 CallStack.push(std::make_pair(Call, CInfo)); 770 CInfo = AIChain[Call].second; 771 Call = AIChain[Call].first; 772 } 773 774 // The access offset from the base of the head of chain is also 775 // calculated here as all debuginfo types are available. 776 777 // Get type name and calculate the first index. 778 // We only want to get type name from typedef, structure or union. 779 // If user wants a relocation like 780 // int *p; ... __builtin_preserve_access_index(&p[4]) ... 781 // or 782 // int a[10][20]; ... __builtin_preserve_access_index(&a[2][3]) ... 783 // we will skip them. 784 uint32_t FirstIndex = 0; 785 uint32_t PatchImm = 0; // AccessOffset or the requested field info 786 uint32_t InfoKind = BPFCoreSharedInfo::FIELD_BYTE_OFFSET; 787 while (CallStack.size()) { 788 auto StackElem = CallStack.top(); 789 Call = StackElem.first; 790 CInfo = StackElem.second; 791 792 if (!Base) 793 Base = CInfo.Base; 794 795 DIType *PossibleTypeDef = stripQualifiers(cast<DIType>(CInfo.Metadata), 796 false); 797 DIType *Ty = stripQualifiers(PossibleTypeDef); 798 if (CInfo.Kind == BPFPreserveUnionAI || 799 CInfo.Kind == BPFPreserveStructAI) { 800 // struct or union type. If the typedef is in the metadata, always 801 // use the typedef. 802 TypeName = std::string(PossibleTypeDef->getName()); 803 TypeMeta = PossibleTypeDef; 804 PatchImm += FirstIndex * (Ty->getSizeInBits() >> 3); 805 break; 806 } 807 808 assert(CInfo.Kind == BPFPreserveArrayAI); 809 810 // Array entries will always be consumed for accumulative initial index. 811 CallStack.pop(); 812 813 // BPFPreserveArrayAI 814 uint64_t AccessIndex = CInfo.AccessIndex; 815 816 DIType *BaseTy = nullptr; 817 bool CheckElemType = false; 818 if (const auto *CTy = dyn_cast<DICompositeType>(Ty)) { 819 // array type 820 assert(CTy->getTag() == dwarf::DW_TAG_array_type); 821 822 823 FirstIndex += AccessIndex * calcArraySize(CTy, 1); 824 BaseTy = stripQualifiers(CTy->getBaseType()); 825 CheckElemType = CTy->getElements().size() == 1; 826 } else { 827 // pointer type 828 auto *DTy = cast<DIDerivedType>(Ty); 829 assert(DTy->getTag() == dwarf::DW_TAG_pointer_type); 830 831 BaseTy = stripQualifiers(DTy->getBaseType()); 832 CTy = dyn_cast<DICompositeType>(BaseTy); 833 if (!CTy) { 834 CheckElemType = true; 835 } else if (CTy->getTag() != dwarf::DW_TAG_array_type) { 836 FirstIndex += AccessIndex; 837 CheckElemType = true; 838 } else { 839 FirstIndex += AccessIndex * calcArraySize(CTy, 0); 840 } 841 } 842 843 if (CheckElemType) { 844 auto *CTy = dyn_cast<DICompositeType>(BaseTy); 845 if (!CTy) { 846 if (HasPreserveFieldInfoCall(CallStack)) 847 report_fatal_error("Invalid field access for llvm.preserve.field.info intrinsic"); 848 return nullptr; 849 } 850 851 unsigned CTag = CTy->getTag(); 852 if (CTag == dwarf::DW_TAG_structure_type || CTag == dwarf::DW_TAG_union_type) { 853 TypeName = std::string(CTy->getName()); 854 } else { 855 if (HasPreserveFieldInfoCall(CallStack)) 856 report_fatal_error("Invalid field access for llvm.preserve.field.info intrinsic"); 857 return nullptr; 858 } 859 TypeMeta = CTy; 860 PatchImm += FirstIndex * (CTy->getSizeInBits() >> 3); 861 break; 862 } 863 } 864 assert(TypeName.size()); 865 AccessKey += std::to_string(FirstIndex); 866 867 // Traverse the rest of access chain to complete offset calculation 868 // and access key construction. 869 while (CallStack.size()) { 870 auto StackElem = CallStack.top(); 871 CInfo = StackElem.second; 872 CallStack.pop(); 873 874 if (CInfo.Kind == BPFPreserveFieldInfoAI) { 875 InfoKind = CInfo.AccessIndex; 876 if (InfoKind == BPFCoreSharedInfo::FIELD_EXISTENCE) 877 PatchImm = 1; 878 break; 879 } 880 881 // If the next Call (the top of the stack) is a BPFPreserveFieldInfoAI, 882 // the action will be extracting field info. 883 if (CallStack.size()) { 884 auto StackElem2 = CallStack.top(); 885 CallInfo CInfo2 = StackElem2.second; 886 if (CInfo2.Kind == BPFPreserveFieldInfoAI) { 887 InfoKind = CInfo2.AccessIndex; 888 assert(CallStack.size() == 1); 889 } 890 } 891 892 // Access Index 893 uint64_t AccessIndex = CInfo.AccessIndex; 894 AccessKey += ":" + std::to_string(AccessIndex); 895 896 MDNode *MDN = CInfo.Metadata; 897 // At this stage, it cannot be pointer type. 898 auto *CTy = cast<DICompositeType>(stripQualifiers(cast<DIType>(MDN))); 899 PatchImm = GetFieldInfo(InfoKind, CTy, AccessIndex, PatchImm, 900 CInfo.RecordAlignment); 901 } 902 903 // Access key is the 904 // "llvm." + type name + ":" + reloc type + ":" + patched imm + "$" + 905 // access string, 906 // uniquely identifying one relocation. 907 // The prefix "llvm." indicates this is a temporary global, which should 908 // not be emitted to ELF file. 909 AccessKey = "llvm." + TypeName + ":" + std::to_string(InfoKind) + ":" + 910 std::to_string(PatchImm) + "$" + AccessKey; 911 912 return Base; 913 } 914 915 MDNode *BPFAbstractMemberAccess::computeAccessKey(CallInst *Call, 916 CallInfo &CInfo, 917 std::string &AccessKey, 918 bool &IsInt32Ret) { 919 DIType *Ty = stripQualifiers(cast<DIType>(CInfo.Metadata), false); 920 assert(!Ty->getName().empty()); 921 922 int64_t PatchImm; 923 std::string AccessStr("0"); 924 if (CInfo.AccessIndex == BPFCoreSharedInfo::TYPE_EXISTENCE) { 925 PatchImm = 1; 926 } else if (CInfo.AccessIndex == BPFCoreSharedInfo::TYPE_SIZE) { 927 // typedef debuginfo type has size 0, get the eventual base type. 928 DIType *BaseTy = stripQualifiers(Ty, true); 929 PatchImm = BaseTy->getSizeInBits() / 8; 930 } else { 931 // ENUM_VALUE_EXISTENCE and ENUM_VALUE 932 IsInt32Ret = false; 933 934 const auto *CE = cast<ConstantExpr>(Call->getArgOperand(1)); 935 const GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 936 assert(GV->hasInitializer()); 937 const ConstantDataArray *DA = cast<ConstantDataArray>(GV->getInitializer()); 938 assert(DA->isString()); 939 StringRef ValueStr = DA->getAsString(); 940 941 // ValueStr format: <EnumeratorStr>:<Value> 942 size_t Separator = ValueStr.find_first_of(':'); 943 StringRef EnumeratorStr = ValueStr.substr(0, Separator); 944 945 // Find enumerator index in the debuginfo 946 DIType *BaseTy = stripQualifiers(Ty, true); 947 const auto *CTy = cast<DICompositeType>(BaseTy); 948 assert(CTy->getTag() == dwarf::DW_TAG_enumeration_type); 949 int EnumIndex = 0; 950 for (const auto Element : CTy->getElements()) { 951 const auto *Enum = cast<DIEnumerator>(Element); 952 if (Enum->getName() == EnumeratorStr) { 953 AccessStr = std::to_string(EnumIndex); 954 break; 955 } 956 EnumIndex++; 957 } 958 959 if (CInfo.AccessIndex == BPFCoreSharedInfo::ENUM_VALUE) { 960 StringRef EValueStr = ValueStr.substr(Separator + 1); 961 PatchImm = std::stoll(std::string(EValueStr)); 962 } else { 963 PatchImm = 1; 964 } 965 } 966 967 AccessKey = "llvm." + Ty->getName().str() + ":" + 968 std::to_string(CInfo.AccessIndex) + std::string(":") + 969 std::to_string(PatchImm) + std::string("$") + AccessStr; 970 971 return Ty; 972 } 973 974 /// Call/Kind is the base preserve_*_access_index() call. Attempts to do 975 /// transformation to a chain of relocable GEPs. 976 bool BPFAbstractMemberAccess::transformGEPChain(CallInst *Call, 977 CallInfo &CInfo) { 978 std::string AccessKey; 979 MDNode *TypeMeta; 980 Value *Base = nullptr; 981 bool IsInt32Ret; 982 983 IsInt32Ret = CInfo.Kind == BPFPreserveFieldInfoAI; 984 if (CInfo.Kind == BPFPreserveFieldInfoAI && CInfo.Metadata) { 985 TypeMeta = computeAccessKey(Call, CInfo, AccessKey, IsInt32Ret); 986 } else { 987 Base = computeBaseAndAccessKey(Call, CInfo, AccessKey, TypeMeta); 988 if (!Base) 989 return false; 990 } 991 992 BasicBlock *BB = Call->getParent(); 993 GlobalVariable *GV; 994 995 if (GEPGlobals.find(AccessKey) == GEPGlobals.end()) { 996 IntegerType *VarType; 997 if (IsInt32Ret) 998 VarType = Type::getInt32Ty(BB->getContext()); // 32bit return value 999 else 1000 VarType = Type::getInt64Ty(BB->getContext()); // 64bit ptr or enum value 1001 1002 GV = new GlobalVariable(*M, VarType, false, GlobalVariable::ExternalLinkage, 1003 NULL, AccessKey); 1004 GV->addAttribute(BPFCoreSharedInfo::AmaAttr); 1005 GV->setMetadata(LLVMContext::MD_preserve_access_index, TypeMeta); 1006 GEPGlobals[AccessKey] = GV; 1007 } else { 1008 GV = GEPGlobals[AccessKey]; 1009 } 1010 1011 if (CInfo.Kind == BPFPreserveFieldInfoAI) { 1012 // Load the global variable which represents the returned field info. 1013 LoadInst *LDInst; 1014 if (IsInt32Ret) 1015 LDInst = new LoadInst(Type::getInt32Ty(BB->getContext()), GV, "", Call); 1016 else 1017 LDInst = new LoadInst(Type::getInt64Ty(BB->getContext()), GV, "", Call); 1018 1019 Instruction *PassThroughInst = 1020 BPFCoreSharedInfo::insertPassThrough(M, BB, LDInst, Call); 1021 Call->replaceAllUsesWith(PassThroughInst); 1022 Call->eraseFromParent(); 1023 return true; 1024 } 1025 1026 // For any original GEP Call and Base %2 like 1027 // %4 = bitcast %struct.net_device** %dev1 to i64* 1028 // it is transformed to: 1029 // %6 = load llvm.sk_buff:0:50$0:0:0:2:0 1030 // %7 = bitcast %struct.sk_buff* %2 to i8* 1031 // %8 = getelementptr i8, i8* %7, %6 1032 // %9 = bitcast i8* %8 to i64* 1033 // using %9 instead of %4 1034 // The original Call inst is removed. 1035 1036 // Load the global variable. 1037 auto *LDInst = new LoadInst(Type::getInt64Ty(BB->getContext()), GV, "", Call); 1038 1039 // Generate a BitCast 1040 auto *BCInst = new BitCastInst(Base, Type::getInt8PtrTy(BB->getContext())); 1041 BB->getInstList().insert(Call->getIterator(), BCInst); 1042 1043 // Generate a GetElementPtr 1044 auto *GEP = GetElementPtrInst::Create(Type::getInt8Ty(BB->getContext()), 1045 BCInst, LDInst); 1046 BB->getInstList().insert(Call->getIterator(), GEP); 1047 1048 // Generate a BitCast 1049 auto *BCInst2 = new BitCastInst(GEP, Call->getType()); 1050 BB->getInstList().insert(Call->getIterator(), BCInst2); 1051 1052 // For the following code, 1053 // Block0: 1054 // ... 1055 // if (...) goto Block1 else ... 1056 // Block1: 1057 // %6 = load llvm.sk_buff:0:50$0:0:0:2:0 1058 // %7 = bitcast %struct.sk_buff* %2 to i8* 1059 // %8 = getelementptr i8, i8* %7, %6 1060 // ... 1061 // goto CommonExit 1062 // Block2: 1063 // ... 1064 // if (...) goto Block3 else ... 1065 // Block3: 1066 // %6 = load llvm.bpf_map:0:40$0:0:0:2:0 1067 // %7 = bitcast %struct.sk_buff* %2 to i8* 1068 // %8 = getelementptr i8, i8* %7, %6 1069 // ... 1070 // goto CommonExit 1071 // CommonExit 1072 // SimplifyCFG may generate: 1073 // Block0: 1074 // ... 1075 // if (...) goto Block_Common else ... 1076 // Block2: 1077 // ... 1078 // if (...) goto Block_Common else ... 1079 // Block_Common: 1080 // PHI = [llvm.sk_buff:0:50$0:0:0:2:0, llvm.bpf_map:0:40$0:0:0:2:0] 1081 // %6 = load PHI 1082 // %7 = bitcast %struct.sk_buff* %2 to i8* 1083 // %8 = getelementptr i8, i8* %7, %6 1084 // ... 1085 // goto CommonExit 1086 // For the above code, we cannot perform proper relocation since 1087 // "load PHI" has two possible relocations. 1088 // 1089 // To prevent above tail merging, we use __builtin_bpf_passthrough() 1090 // where one of its parameters is a seq_num. Since two 1091 // __builtin_bpf_passthrough() funcs will always have different seq_num, 1092 // tail merging cannot happen. The __builtin_bpf_passthrough() will be 1093 // removed in the beginning of Target IR passes. 1094 // 1095 // This approach is also used in other places when global var 1096 // representing a relocation is used. 1097 Instruction *PassThroughInst = 1098 BPFCoreSharedInfo::insertPassThrough(M, BB, BCInst2, Call); 1099 Call->replaceAllUsesWith(PassThroughInst); 1100 Call->eraseFromParent(); 1101 1102 return true; 1103 } 1104 1105 bool BPFAbstractMemberAccess::doTransformation(Function &F) { 1106 bool Transformed = false; 1107 1108 // Collect PreserveDIAccessIndex Intrinsic call chains. 1109 // The call chains will be used to generate the access 1110 // patterns similar to GEP. 1111 collectAICallChains(F); 1112 1113 for (auto &C : BaseAICalls) 1114 Transformed = transformGEPChain(C.first, C.second) || Transformed; 1115 1116 return removePreserveAccessIndexIntrinsic(F) || Transformed; 1117 } 1118 1119 PreservedAnalyses 1120 BPFAbstractMemberAccessPass::run(Function &F, FunctionAnalysisManager &AM) { 1121 return BPFAbstractMemberAccess(TM).run(F) ? PreservedAnalyses::none() 1122 : PreservedAnalyses::all(); 1123 } 1124