1 //== MemoryTaggingSupport.cpp - helpers for memory tagging implementations ===//
2 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3 // See https://llvm.org/LICENSE.txt for license information.
4 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file declares common infrastructure for HWAddressSanitizer and
9 // Aarch64StackTagging.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
14
15 #include "llvm/Analysis/CFG.h"
16 #include "llvm/Analysis/PostDominators.h"
17 #include "llvm/Analysis/ValueTracking.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/IntrinsicInst.h"
20
21 namespace llvm {
22 namespace memtag {
23 namespace {
maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst * > & Insts,const DominatorTree * DT,const LoopInfo * LI,size_t MaxLifetimes)24 bool maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst *> &Insts,
25 const DominatorTree *DT, const LoopInfo *LI,
26 size_t MaxLifetimes) {
27 // If we have too many lifetime ends, give up, as the algorithm below is N^2.
28 if (Insts.size() > MaxLifetimes)
29 return true;
30 for (size_t I = 0; I < Insts.size(); ++I) {
31 for (size_t J = 0; J < Insts.size(); ++J) {
32 if (I == J)
33 continue;
34 if (isPotentiallyReachable(Insts[I], Insts[J], nullptr, DT, LI))
35 return true;
36 }
37 }
38 return false;
39 }
40 } // namespace
41
forAllReachableExits(const DominatorTree & DT,const PostDominatorTree & PDT,const LoopInfo & LI,const Instruction * Start,const SmallVectorImpl<IntrinsicInst * > & Ends,const SmallVectorImpl<Instruction * > & RetVec,llvm::function_ref<void (Instruction *)> Callback)42 bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT,
43 const LoopInfo &LI, const Instruction *Start,
44 const SmallVectorImpl<IntrinsicInst *> &Ends,
45 const SmallVectorImpl<Instruction *> &RetVec,
46 llvm::function_ref<void(Instruction *)> Callback) {
47 if (Ends.size() == 1 && PDT.dominates(Ends[0], Start)) {
48 Callback(Ends[0]);
49 return true;
50 }
51 SmallPtrSet<BasicBlock *, 2> EndBlocks;
52 for (auto *End : Ends) {
53 EndBlocks.insert(End->getParent());
54 }
55 SmallVector<Instruction *, 8> ReachableRetVec;
56 unsigned NumCoveredExits = 0;
57 for (auto *RI : RetVec) {
58 if (!isPotentiallyReachable(Start, RI, nullptr, &DT, &LI))
59 continue;
60 ReachableRetVec.push_back(RI);
61 // If there is an end in the same basic block as the return, we know for
62 // sure that the return is covered. Otherwise, we can check whether there
63 // is a way to reach the RI from the start of the lifetime without passing
64 // through an end.
65 if (EndBlocks.count(RI->getParent()) > 0 ||
66 !isPotentiallyReachable(Start, RI, &EndBlocks, &DT, &LI)) {
67 ++NumCoveredExits;
68 }
69 }
70 // If there's a mix of covered and non-covered exits, just put the untag
71 // on exits, so we avoid the redundancy of untagging twice.
72 if (NumCoveredExits == ReachableRetVec.size()) {
73 for (auto *End : Ends)
74 Callback(End);
75 } else {
76 for (auto *RI : ReachableRetVec)
77 Callback(RI);
78 // We may have inserted untag outside of the lifetime interval.
79 // Signal the caller to remove the lifetime end call for this alloca.
80 return false;
81 }
82 return true;
83 }
84
isStandardLifetime(const SmallVectorImpl<IntrinsicInst * > & LifetimeStart,const SmallVectorImpl<IntrinsicInst * > & LifetimeEnd,const DominatorTree * DT,const LoopInfo * LI,size_t MaxLifetimes)85 bool isStandardLifetime(const SmallVectorImpl<IntrinsicInst *> &LifetimeStart,
86 const SmallVectorImpl<IntrinsicInst *> &LifetimeEnd,
87 const DominatorTree *DT, const LoopInfo *LI,
88 size_t MaxLifetimes) {
89 // An alloca that has exactly one start and end in every possible execution.
90 // If it has multiple ends, they have to be unreachable from each other, so
91 // at most one of them is actually used for each execution of the function.
92 return LifetimeStart.size() == 1 &&
93 (LifetimeEnd.size() == 1 ||
94 (LifetimeEnd.size() > 0 &&
95 !maybeReachableFromEachOther(LifetimeEnd, DT, LI, MaxLifetimes)));
96 }
97
getUntagLocationIfFunctionExit(Instruction & Inst)98 Instruction *getUntagLocationIfFunctionExit(Instruction &Inst) {
99 if (isa<ReturnInst>(Inst)) {
100 if (CallInst *CI = Inst.getParent()->getTerminatingMustTailCall())
101 return CI;
102 return &Inst;
103 }
104 if (isa<ResumeInst, CleanupReturnInst>(Inst)) {
105 return &Inst;
106 }
107 return nullptr;
108 }
109
visit(Instruction & Inst)110 void StackInfoBuilder::visit(Instruction &Inst) {
111 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
112 if (CI->canReturnTwice()) {
113 Info.CallsReturnTwice = true;
114 }
115 }
116 if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
117 if (IsInterestingAlloca(*AI)) {
118 Info.AllocasToInstrument[AI].AI = AI;
119 }
120 return;
121 }
122 auto *II = dyn_cast<IntrinsicInst>(&Inst);
123 if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
124 II->getIntrinsicID() == Intrinsic::lifetime_end)) {
125 AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
126 if (!AI) {
127 Info.UnrecognizedLifetimes.push_back(&Inst);
128 return;
129 }
130 if (!IsInterestingAlloca(*AI))
131 return;
132 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
133 Info.AllocasToInstrument[AI].LifetimeStart.push_back(II);
134 else
135 Info.AllocasToInstrument[AI].LifetimeEnd.push_back(II);
136 return;
137 }
138 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
139 for (Value *V : DVI->location_ops()) {
140 if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
141 if (!IsInterestingAlloca(*AI))
142 continue;
143 AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
144 auto &DVIVec = AInfo.DbgVariableIntrinsics;
145 if (DVIVec.empty() || DVIVec.back() != DVI)
146 DVIVec.push_back(DVI);
147 }
148 }
149 }
150 Instruction *ExitUntag = getUntagLocationIfFunctionExit(Inst);
151 if (ExitUntag)
152 Info.RetVec.push_back(ExitUntag);
153 }
154
getAllocaSizeInBytes(const AllocaInst & AI)155 uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
156 auto DL = AI.getModule()->getDataLayout();
157 return *AI.getAllocationSizeInBits(DL) / 8;
158 }
159
alignAndPadAlloca(memtag::AllocaInfo & Info,llvm::Align Alignment)160 void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Alignment) {
161 const Align NewAlignment = std::max(Info.AI->getAlign(), Alignment);
162 Info.AI->setAlignment(NewAlignment);
163 auto &Ctx = Info.AI->getFunction()->getContext();
164
165 uint64_t Size = getAllocaSizeInBytes(*Info.AI);
166 uint64_t AlignedSize = alignTo(Size, Alignment);
167 if (Size == AlignedSize)
168 return;
169
170 // Add padding to the alloca.
171 Type *AllocatedType =
172 Info.AI->isArrayAllocation()
173 ? ArrayType::get(
174 Info.AI->getAllocatedType(),
175 cast<ConstantInt>(Info.AI->getArraySize())->getZExtValue())
176 : Info.AI->getAllocatedType();
177 Type *PaddingType = ArrayType::get(Type::getInt8Ty(Ctx), AlignedSize - Size);
178 Type *TypeWithPadding = StructType::get(AllocatedType, PaddingType);
179 auto *NewAI =
180 new AllocaInst(TypeWithPadding, Info.AI->getType()->getAddressSpace(),
181 nullptr, "", Info.AI);
182 NewAI->takeName(Info.AI);
183 NewAI->setAlignment(Info.AI->getAlign());
184 NewAI->setUsedWithInAlloca(Info.AI->isUsedWithInAlloca());
185 NewAI->setSwiftError(Info.AI->isSwiftError());
186 NewAI->copyMetadata(*Info.AI);
187
188 auto *NewPtr = new BitCastInst(NewAI, Info.AI->getType(), "", Info.AI);
189 Info.AI->replaceAllUsesWith(NewPtr);
190 Info.AI->eraseFromParent();
191 Info.AI = NewAI;
192 }
193
194 } // namespace memtag
195 } // namespace llvm
196