1 //===- ARMParallelDSP.cpp - Parallel DSP Pass -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Armv6 introduced instructions to perform 32-bit SIMD operations. The
11 /// purpose of this pass is do some IR pattern matching to create ACLE
12 /// DSP intrinsics, which map on these 32-bit SIMD operations.
13 /// This pass runs only when unaligned accesses is supported/enabled.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/LoopAccessAnalysis.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/NoFolder.h"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
25 #include "llvm/Pass.h"
26 #include "llvm/PassRegistry.h"
27 #include "llvm/PassSupport.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/IR/PatternMatch.h"
30 #include "llvm/CodeGen/TargetPassConfig.h"
31 #include "ARM.h"
32 #include "ARMSubtarget.h"
33 
34 using namespace llvm;
35 using namespace PatternMatch;
36 
37 #define DEBUG_TYPE "arm-parallel-dsp"
38 
39 STATISTIC(NumSMLAD , "Number of smlad instructions generated");
40 
41 static cl::opt<bool>
42 DisableParallelDSP("disable-arm-parallel-dsp", cl::Hidden, cl::init(false),
43                    cl::desc("Disable the ARM Parallel DSP pass"));
44 
45 namespace {
46   struct MulCandidate;
47   class Reduction;
48 
49   using MulCandList = SmallVector<std::unique_ptr<MulCandidate>, 8>;
50   using MemInstList = SmallVectorImpl<LoadInst*>;
51   using MulPairList = SmallVector<std::pair<MulCandidate*, MulCandidate*>, 8>;
52 
53   // 'MulCandidate' holds the multiplication instructions that are candidates
54   // for parallel execution.
55   struct MulCandidate {
56     Instruction   *Root;
57     Value*        LHS;
58     Value*        RHS;
59     bool          Exchange = false;
60     bool          ReadOnly = true;
61     SmallVector<LoadInst*, 2> VecLd;    // Container for loads to widen.
62 
63     MulCandidate(Instruction *I, Value *lhs, Value *rhs) :
64       Root(I), LHS(lhs), RHS(rhs) { }
65 
66     bool HasTwoLoadInputs() const {
67       return isa<LoadInst>(LHS) && isa<LoadInst>(RHS);
68     }
69 
70     LoadInst *getBaseLoad() const {
71       return VecLd.front();
72     }
73   };
74 
75   /// Represent a sequence of multiply-accumulate operations with the aim to
76   /// perform the multiplications in parallel.
77   class Reduction {
78     Instruction     *Root = nullptr;
79     Value           *Acc = nullptr;
80     MulCandList     Muls;
81     MulPairList        MulPairs;
82     SmallPtrSet<Instruction*, 4> Adds;
83 
84   public:
85     Reduction() = delete;
86 
87     Reduction (Instruction *Add) : Root(Add) { }
88 
89     /// Record an Add instruction that is a part of the this reduction.
90     void InsertAdd(Instruction *I) { Adds.insert(I); }
91 
92     /// Record a MulCandidate, rooted at a Mul instruction, that is a part of
93     /// this reduction.
94     void InsertMul(Instruction *I, Value *LHS, Value *RHS) {
95       Muls.push_back(std::make_unique<MulCandidate>(I, LHS, RHS));
96     }
97 
98     /// Add the incoming accumulator value, returns true if a value had not
99     /// already been added. Returning false signals to the user that this
100     /// reduction already has a value to initialise the accumulator.
101     bool InsertAcc(Value *V) {
102       if (Acc)
103         return false;
104       Acc = V;
105       return true;
106     }
107 
108     /// Set two MulCandidates, rooted at muls, that can be executed as a single
109     /// parallel operation.
110     void AddMulPair(MulCandidate *Mul0, MulCandidate *Mul1) {
111       MulPairs.push_back(std::make_pair(Mul0, Mul1));
112     }
113 
114     /// Return true if enough mul operations are found that can be executed in
115     /// parallel.
116     bool CreateParallelPairs();
117 
118     /// Return the add instruction which is the root of the reduction.
119     Instruction *getRoot() { return Root; }
120 
121     bool is64Bit() const { return Root->getType()->isIntegerTy(64); }
122 
123     /// Return the incoming value to be accumulated. This maybe null.
124     Value *getAccumulator() { return Acc; }
125 
126     /// Return the set of adds that comprise the reduction.
127     SmallPtrSetImpl<Instruction*> &getAdds() { return Adds; }
128 
129     /// Return the MulCandidate, rooted at mul instruction, that comprise the
130     /// the reduction.
131     MulCandList &getMuls() { return Muls; }
132 
133     /// Return the MulCandidate, rooted at mul instructions, that have been
134     /// paired for parallel execution.
135     MulPairList &getMulPairs() { return MulPairs; }
136 
137     /// To finalise, replace the uses of the root with the intrinsic call.
138     void UpdateRoot(Instruction *SMLAD) {
139       Root->replaceAllUsesWith(SMLAD);
140     }
141   };
142 
143   class WidenedLoad {
144     LoadInst *NewLd = nullptr;
145     SmallVector<LoadInst*, 4> Loads;
146 
147   public:
148     WidenedLoad(SmallVectorImpl<LoadInst*> &Lds, LoadInst *Wide)
149       : NewLd(Wide) {
150       for (auto *I : Lds)
151         Loads.push_back(I);
152     }
153     LoadInst *getLoad() {
154       return NewLd;
155     }
156   };
157 
158   class ARMParallelDSP : public FunctionPass {
159     ScalarEvolution   *SE;
160     AliasAnalysis     *AA;
161     TargetLibraryInfo *TLI;
162     DominatorTree     *DT;
163     const DataLayout  *DL;
164     Module            *M;
165     std::map<LoadInst*, LoadInst*> LoadPairs;
166     SmallPtrSet<LoadInst*, 4> OffsetLoads;
167     std::map<LoadInst*, std::unique_ptr<WidenedLoad>> WideLoads;
168 
169     template<unsigned>
170     bool IsNarrowSequence(Value *V, Value *&Src);
171 
172     bool RecordMemoryOps(BasicBlock *BB);
173     void InsertParallelMACs(Reduction &Reduction);
174     bool AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1, MemInstList &VecMem);
175     LoadInst* CreateWideLoad(MemInstList &Loads, IntegerType *LoadTy);
176     bool CreateParallelPairs(Reduction &R);
177 
178     /// Try to match and generate: SMLAD, SMLADX - Signed Multiply Accumulate
179     /// Dual performs two signed 16x16-bit multiplications. It adds the
180     /// products to a 32-bit accumulate operand. Optionally, the instruction can
181     /// exchange the halfwords of the second operand before performing the
182     /// arithmetic.
183     bool MatchSMLAD(Function &F);
184 
185   public:
186     static char ID;
187 
188     ARMParallelDSP() : FunctionPass(ID) { }
189 
190     void getAnalysisUsage(AnalysisUsage &AU) const override {
191       FunctionPass::getAnalysisUsage(AU);
192       AU.addRequired<AssumptionCacheTracker>();
193       AU.addRequired<ScalarEvolutionWrapperPass>();
194       AU.addRequired<AAResultsWrapperPass>();
195       AU.addRequired<TargetLibraryInfoWrapperPass>();
196       AU.addRequired<DominatorTreeWrapperPass>();
197       AU.addRequired<TargetPassConfig>();
198       AU.addPreserved<ScalarEvolutionWrapperPass>();
199       AU.addPreserved<GlobalsAAWrapperPass>();
200       AU.setPreservesCFG();
201     }
202 
203     bool runOnFunction(Function &F) override {
204       if (DisableParallelDSP)
205         return false;
206       if (skipFunction(F))
207         return false;
208 
209       SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
210       AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
211       TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
212       DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
213       auto &TPC = getAnalysis<TargetPassConfig>();
214 
215       M = F.getParent();
216       DL = &M->getDataLayout();
217 
218       auto &TM = TPC.getTM<TargetMachine>();
219       auto *ST = &TM.getSubtarget<ARMSubtarget>(F);
220 
221       if (!ST->allowsUnalignedMem()) {
222         LLVM_DEBUG(dbgs() << "Unaligned memory access not supported: not "
223                              "running pass ARMParallelDSP\n");
224         return false;
225       }
226 
227       if (!ST->hasDSP()) {
228         LLVM_DEBUG(dbgs() << "DSP extension not enabled: not running pass "
229                              "ARMParallelDSP\n");
230         return false;
231       }
232 
233       if (!ST->isLittle()) {
234         LLVM_DEBUG(dbgs() << "Only supporting little endian: not running pass "
235                           << "ARMParallelDSP\n");
236         return false;
237       }
238 
239       LLVM_DEBUG(dbgs() << "\n== Parallel DSP pass ==\n");
240       LLVM_DEBUG(dbgs() << " - " << F.getName() << "\n\n");
241 
242       bool Changes = MatchSMLAD(F);
243       return Changes;
244     }
245   };
246 }
247 
248 template<typename MemInst>
249 static bool AreSequentialAccesses(MemInst *MemOp0, MemInst *MemOp1,
250                                   const DataLayout &DL, ScalarEvolution &SE) {
251   if (isConsecutiveAccess(MemOp0, MemOp1, DL, SE))
252     return true;
253   return false;
254 }
255 
256 bool ARMParallelDSP::AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1,
257                                         MemInstList &VecMem) {
258   if (!Ld0 || !Ld1)
259     return false;
260 
261   if (!LoadPairs.count(Ld0) || LoadPairs[Ld0] != Ld1)
262     return false;
263 
264   LLVM_DEBUG(dbgs() << "Loads are sequential and valid:\n";
265     dbgs() << "Ld0:"; Ld0->dump();
266     dbgs() << "Ld1:"; Ld1->dump();
267   );
268 
269   VecMem.clear();
270   VecMem.push_back(Ld0);
271   VecMem.push_back(Ld1);
272   return true;
273 }
274 
275 // MaxBitwidth: the maximum supported bitwidth of the elements in the DSP
276 // instructions, which is set to 16. So here we should collect all i8 and i16
277 // narrow operations.
278 // TODO: we currently only collect i16, and will support i8 later, so that's
279 // why we check that types are equal to MaxBitWidth, and not <= MaxBitWidth.
280 template<unsigned MaxBitWidth>
281 bool ARMParallelDSP::IsNarrowSequence(Value *V, Value *&Src) {
282   if (auto *SExt = dyn_cast<SExtInst>(V)) {
283     if (SExt->getSrcTy()->getIntegerBitWidth() != MaxBitWidth)
284       return false;
285 
286     if (auto *Ld = dyn_cast<LoadInst>(SExt->getOperand(0))) {
287       // Check that these load could be paired.
288       if (!LoadPairs.count(Ld) && !OffsetLoads.count(Ld))
289         return false;
290 
291       Src = Ld;
292       return true;
293     }
294   }
295   return false;
296 }
297 
298 /// Iterate through the block and record base, offset pairs of loads which can
299 /// be widened into a single load.
300 bool ARMParallelDSP::RecordMemoryOps(BasicBlock *BB) {
301   SmallVector<LoadInst*, 8> Loads;
302   SmallVector<Instruction*, 8> Writes;
303   LoadPairs.clear();
304   WideLoads.clear();
305 
306   // Collect loads and instruction that may write to memory. For now we only
307   // record loads which are simple, sign-extended and have a single user.
308   // TODO: Allow zero-extended loads.
309   for (auto &I : *BB) {
310     if (I.mayWriteToMemory())
311       Writes.push_back(&I);
312     auto *Ld = dyn_cast<LoadInst>(&I);
313     if (!Ld || !Ld->isSimple() ||
314         !Ld->hasOneUse() || !isa<SExtInst>(Ld->user_back()))
315       continue;
316     Loads.push_back(Ld);
317   }
318 
319   using InstSet = std::set<Instruction*>;
320   using DepMap = std::map<Instruction*, InstSet>;
321   DepMap RAWDeps;
322 
323   // Record any writes that may alias a load.
324   const auto Size = LocationSize::unknown();
325   for (auto Read : Loads) {
326     for (auto Write : Writes) {
327       MemoryLocation ReadLoc =
328         MemoryLocation(Read->getPointerOperand(), Size);
329 
330       if (!isModOrRefSet(intersectModRef(AA->getModRefInfo(Write, ReadLoc),
331           ModRefInfo::ModRef)))
332         continue;
333       if (DT->dominates(Write, Read))
334         RAWDeps[Read].insert(Write);
335     }
336   }
337 
338   // Check whether there's not a write between the two loads which would
339   // prevent them from being safely merged.
340   auto SafeToPair = [&](LoadInst *Base, LoadInst *Offset) {
341     LoadInst *Dominator = DT->dominates(Base, Offset) ? Base : Offset;
342     LoadInst *Dominated = DT->dominates(Base, Offset) ? Offset : Base;
343 
344     if (RAWDeps.count(Dominated)) {
345       InstSet &WritesBefore = RAWDeps[Dominated];
346 
347       for (auto Before : WritesBefore) {
348         // We can't move the second load backward, past a write, to merge
349         // with the first load.
350         if (DT->dominates(Dominator, Before))
351           return false;
352       }
353     }
354     return true;
355   };
356 
357   // Record base, offset load pairs.
358   for (auto *Base : Loads) {
359     for (auto *Offset : Loads) {
360       if (Base == Offset)
361         continue;
362 
363       if (AreSequentialAccesses<LoadInst>(Base, Offset, *DL, *SE) &&
364           SafeToPair(Base, Offset)) {
365         LoadPairs[Base] = Offset;
366         OffsetLoads.insert(Offset);
367         break;
368       }
369     }
370   }
371 
372   LLVM_DEBUG(if (!LoadPairs.empty()) {
373                dbgs() << "Consecutive load pairs:\n";
374                for (auto &MapIt : LoadPairs) {
375                  LLVM_DEBUG(dbgs() << *MapIt.first << ", "
376                             << *MapIt.second << "\n");
377                }
378              });
379   return LoadPairs.size() > 1;
380 }
381 
382 // The pass needs to identify integer add/sub reductions of 16-bit vector
383 // multiplications.
384 // To use SMLAD:
385 // 1) we first need to find integer add then look for this pattern:
386 //
387 // acc0 = ...
388 // ld0 = load i16
389 // sext0 = sext i16 %ld0 to i32
390 // ld1 = load i16
391 // sext1 = sext i16 %ld1 to i32
392 // mul0 = mul %sext0, %sext1
393 // ld2 = load i16
394 // sext2 = sext i16 %ld2 to i32
395 // ld3 = load i16
396 // sext3 = sext i16 %ld3 to i32
397 // mul1 = mul i32 %sext2, %sext3
398 // add0 = add i32 %mul0, %acc0
399 // acc1 = add i32 %add0, %mul1
400 //
401 // Which can be selected to:
402 //
403 // ldr r0
404 // ldr r1
405 // smlad r2, r0, r1, r2
406 //
407 // If constants are used instead of loads, these will need to be hoisted
408 // out and into a register.
409 //
410 // If loop invariants are used instead of loads, these need to be packed
411 // before the loop begins.
412 //
413 bool ARMParallelDSP::MatchSMLAD(Function &F) {
414   // Search recursively back through the operands to find a tree of values that
415   // form a multiply-accumulate chain. The search records the Add and Mul
416   // instructions that form the reduction and allows us to find a single value
417   // to be used as the initial input to the accumlator.
418   std::function<bool(Value*, BasicBlock*, Reduction&)> Search = [&]
419     (Value *V, BasicBlock *BB, Reduction &R) -> bool {
420 
421     // If we find a non-instruction, try to use it as the initial accumulator
422     // value. This may have already been found during the search in which case
423     // this function will return false, signaling a search fail.
424     auto *I = dyn_cast<Instruction>(V);
425     if (!I)
426       return R.InsertAcc(V);
427 
428     if (I->getParent() != BB)
429       return false;
430 
431     switch (I->getOpcode()) {
432     default:
433       break;
434     case Instruction::PHI:
435       // Could be the accumulator value.
436       return R.InsertAcc(V);
437     case Instruction::Add: {
438       // Adds should be adding together two muls, or another add and a mul to
439       // be within the mac chain. One of the operands may also be the
440       // accumulator value at which point we should stop searching.
441       bool ValidLHS = Search(I->getOperand(0), BB, R);
442       bool ValidRHS = Search(I->getOperand(1), BB, R);
443       if (!ValidLHS && !ValidLHS)
444         return false;
445       else if (ValidLHS && ValidRHS) {
446         R.InsertAdd(I);
447         return true;
448       } else {
449         R.InsertAdd(I);
450         return R.InsertAcc(I);
451       }
452     }
453     case Instruction::Mul: {
454       Value *MulOp0 = I->getOperand(0);
455       Value *MulOp1 = I->getOperand(1);
456       if (isa<SExtInst>(MulOp0) && isa<SExtInst>(MulOp1)) {
457         Value *LHS = nullptr;
458         Value *RHS = nullptr;
459         if (IsNarrowSequence<16>(MulOp0, LHS) &&
460             IsNarrowSequence<16>(MulOp1, RHS)) {
461           R.InsertMul(I, LHS, RHS);
462           return true;
463         }
464       }
465       return false;
466     }
467     case Instruction::SExt:
468       return Search(I->getOperand(0), BB, R);
469     }
470     return false;
471   };
472 
473   bool Changed = false;
474 
475   for (auto &BB : F) {
476     SmallPtrSet<Instruction*, 4> AllAdds;
477     if (!RecordMemoryOps(&BB))
478       continue;
479 
480     for (Instruction &I : reverse(BB)) {
481       if (I.getOpcode() != Instruction::Add)
482         continue;
483 
484       if (AllAdds.count(&I))
485         continue;
486 
487       const auto *Ty = I.getType();
488       if (!Ty->isIntegerTy(32) && !Ty->isIntegerTy(64))
489         continue;
490 
491       Reduction R(&I);
492       if (!Search(&I, &BB, R))
493         continue;
494 
495       if (!CreateParallelPairs(R))
496         continue;
497 
498       InsertParallelMACs(R);
499       Changed = true;
500       AllAdds.insert(R.getAdds().begin(), R.getAdds().end());
501     }
502   }
503 
504   return Changed;
505 }
506 
507 bool ARMParallelDSP::CreateParallelPairs(Reduction &R) {
508 
509   // Not enough mul operations to make a pair.
510   if (R.getMuls().size() < 2)
511     return false;
512 
513   // Check that the muls operate directly upon sign extended loads.
514   for (auto &MulCand : R.getMuls()) {
515     if (!MulCand->HasTwoLoadInputs())
516       return false;
517   }
518 
519   auto CanPair = [&](Reduction &R, MulCandidate *PMul0, MulCandidate *PMul1) {
520     // The first elements of each vector should be loads with sexts. If we
521     // find that its two pairs of consecutive loads, then these can be
522     // transformed into two wider loads and the users can be replaced with
523     // DSP intrinsics.
524     auto Ld0 = static_cast<LoadInst*>(PMul0->LHS);
525     auto Ld1 = static_cast<LoadInst*>(PMul1->LHS);
526     auto Ld2 = static_cast<LoadInst*>(PMul0->RHS);
527     auto Ld3 = static_cast<LoadInst*>(PMul1->RHS);
528 
529     LLVM_DEBUG(dbgs() << "Loads:\n"
530                << " - " << *Ld0 << "\n"
531                << " - " << *Ld1 << "\n"
532                << " - " << *Ld2 << "\n"
533                << " - " << *Ld3 << "\n");
534 
535     if (AreSequentialLoads(Ld0, Ld1, PMul0->VecLd)) {
536       if (AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
537         LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
538         R.AddMulPair(PMul0, PMul1);
539         return true;
540       } else if (AreSequentialLoads(Ld3, Ld2, PMul1->VecLd)) {
541         LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
542         LLVM_DEBUG(dbgs() << "    exchanging Ld2 and Ld3\n");
543         PMul1->Exchange = true;
544         R.AddMulPair(PMul0, PMul1);
545         return true;
546       }
547     } else if (AreSequentialLoads(Ld1, Ld0, PMul0->VecLd) &&
548                AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
549       LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
550       LLVM_DEBUG(dbgs() << "    exchanging Ld0 and Ld1\n");
551       LLVM_DEBUG(dbgs() << "    and swapping muls\n");
552       PMul0->Exchange = true;
553       // Only the second operand can be exchanged, so swap the muls.
554       R.AddMulPair(PMul1, PMul0);
555       return true;
556     }
557     return false;
558   };
559 
560   MulCandList &Muls = R.getMuls();
561   const unsigned Elems = Muls.size();
562   SmallPtrSet<const Instruction*, 4> Paired;
563   for (unsigned i = 0; i < Elems; ++i) {
564     MulCandidate *PMul0 = static_cast<MulCandidate*>(Muls[i].get());
565     if (Paired.count(PMul0->Root))
566       continue;
567 
568     for (unsigned j = 0; j < Elems; ++j) {
569       if (i == j)
570         continue;
571 
572       MulCandidate *PMul1 = static_cast<MulCandidate*>(Muls[j].get());
573       if (Paired.count(PMul1->Root))
574         continue;
575 
576       const Instruction *Mul0 = PMul0->Root;
577       const Instruction *Mul1 = PMul1->Root;
578       if (Mul0 == Mul1)
579         continue;
580 
581       assert(PMul0 != PMul1 && "expected different chains");
582 
583       if (CanPair(R, PMul0, PMul1)) {
584         Paired.insert(Mul0);
585         Paired.insert(Mul1);
586         break;
587       }
588     }
589   }
590   return !R.getMulPairs().empty();
591 }
592 
593 
594 void ARMParallelDSP::InsertParallelMACs(Reduction &R) {
595 
596   auto CreateSMLAD = [&](LoadInst* WideLd0, LoadInst *WideLd1,
597                          Value *Acc, bool Exchange,
598                          Instruction *InsertAfter) {
599     // Replace the reduction chain with an intrinsic call
600 
601     Value* Args[] = { WideLd0, WideLd1, Acc };
602     Function *SMLAD = nullptr;
603     if (Exchange)
604       SMLAD = Acc->getType()->isIntegerTy(32) ?
605         Intrinsic::getDeclaration(M, Intrinsic::arm_smladx) :
606         Intrinsic::getDeclaration(M, Intrinsic::arm_smlaldx);
607     else
608       SMLAD = Acc->getType()->isIntegerTy(32) ?
609         Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
610         Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
611 
612     IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
613                                 ++BasicBlock::iterator(InsertAfter));
614     Instruction *Call = Builder.CreateCall(SMLAD, Args);
615     NumSMLAD++;
616     return Call;
617   };
618 
619   Instruction *InsertAfter = R.getRoot();
620   Value *Acc = R.getAccumulator();
621   if (!Acc)
622     Acc = ConstantInt::get(IntegerType::get(M->getContext(), 32), 0);
623 
624   IntegerType *Ty = IntegerType::get(M->getContext(), 32);
625   LLVM_DEBUG(dbgs() << "Root: " << *InsertAfter << "\n"
626              << "Acc: " << *Acc << "\n");
627   for (auto &Pair : R.getMulPairs()) {
628     MulCandidate *LHSMul = Pair.first;
629     MulCandidate *RHSMul = Pair.second;
630     LLVM_DEBUG(dbgs() << "Muls:\n"
631                << "- " << *LHSMul->Root << "\n"
632                << "- " << *RHSMul->Root << "\n");
633     LoadInst *BaseLHS = LHSMul->getBaseLoad();
634     LoadInst *BaseRHS = RHSMul->getBaseLoad();
635     LoadInst *WideLHS = WideLoads.count(BaseLHS) ?
636       WideLoads[BaseLHS]->getLoad() : CreateWideLoad(LHSMul->VecLd, Ty);
637     LoadInst *WideRHS = WideLoads.count(BaseRHS) ?
638       WideLoads[BaseRHS]->getLoad() : CreateWideLoad(RHSMul->VecLd, Ty);
639 
640     Acc = CreateSMLAD(WideLHS, WideRHS, Acc, RHSMul->Exchange, InsertAfter);
641     InsertAfter = cast<Instruction>(Acc);
642   }
643   R.UpdateRoot(cast<Instruction>(Acc));
644 }
645 
646 LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
647                                          IntegerType *LoadTy) {
648   assert(Loads.size() == 2 && "currently only support widening two loads");
649 
650   LoadInst *Base = Loads[0];
651   LoadInst *Offset = Loads[1];
652 
653   Instruction *BaseSExt = dyn_cast<SExtInst>(Base->user_back());
654   Instruction *OffsetSExt = dyn_cast<SExtInst>(Offset->user_back());
655 
656   assert((BaseSExt && OffsetSExt)
657          && "Loads should have a single, extending, user");
658 
659   std::function<void(Value*, Value*)> MoveBefore =
660     [&](Value *A, Value *B) -> void {
661       if (!isa<Instruction>(A) || !isa<Instruction>(B))
662         return;
663 
664       auto *Source = cast<Instruction>(A);
665       auto *Sink = cast<Instruction>(B);
666 
667       if (DT->dominates(Source, Sink) ||
668           Source->getParent() != Sink->getParent() ||
669           isa<PHINode>(Source) || isa<PHINode>(Sink))
670         return;
671 
672       Source->moveBefore(Sink);
673       for (auto &Op : Source->operands())
674         MoveBefore(Op, Source);
675     };
676 
677   // Insert the load at the point of the original dominating load.
678   LoadInst *DomLoad = DT->dominates(Base, Offset) ? Base : Offset;
679   IRBuilder<NoFolder> IRB(DomLoad->getParent(),
680                           ++BasicBlock::iterator(DomLoad));
681 
682   // Bitcast the pointer to a wider type and create the wide load, while making
683   // sure to maintain the original alignment as this prevents ldrd from being
684   // generated when it could be illegal due to memory alignment.
685   const unsigned AddrSpace = DomLoad->getPointerAddressSpace();
686   Value *VecPtr = IRB.CreateBitCast(Base->getPointerOperand(),
687                                     LoadTy->getPointerTo(AddrSpace));
688   LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr,
689                                              Base->getAlignment());
690 
691   // Make sure everything is in the correct order in the basic block.
692   MoveBefore(Base->getPointerOperand(), VecPtr);
693   MoveBefore(VecPtr, WideLoad);
694 
695   // From the wide load, create two values that equal the original two loads.
696   // Loads[0] needs trunc while Loads[1] needs a lshr and trunc.
697   // TODO: Support big-endian as well.
698   Value *Bottom = IRB.CreateTrunc(WideLoad, Base->getType());
699   Value *NewBaseSExt = IRB.CreateSExt(Bottom, BaseSExt->getType());
700   BaseSExt->replaceAllUsesWith(NewBaseSExt);
701 
702   IntegerType *OffsetTy = cast<IntegerType>(Offset->getType());
703   Value *ShiftVal = ConstantInt::get(LoadTy, OffsetTy->getBitWidth());
704   Value *Top = IRB.CreateLShr(WideLoad, ShiftVal);
705   Value *Trunc = IRB.CreateTrunc(Top, OffsetTy);
706   Value *NewOffsetSExt = IRB.CreateSExt(Trunc, OffsetSExt->getType());
707   OffsetSExt->replaceAllUsesWith(NewOffsetSExt);
708 
709   WideLoads.emplace(std::make_pair(Base,
710                                    std::make_unique<WidenedLoad>(Loads, WideLoad)));
711   return WideLoad;
712 }
713 
714 Pass *llvm::createARMParallelDSPPass() {
715   return new ARMParallelDSP();
716 }
717 
718 char ARMParallelDSP::ID = 0;
719 
720 INITIALIZE_PASS_BEGIN(ARMParallelDSP, "arm-parallel-dsp",
721                 "Transform functions to use DSP intrinsics", false, false)
722 INITIALIZE_PASS_END(ARMParallelDSP, "arm-parallel-dsp",
723                 "Transform functions to use DSP intrinsics", false, false)
724