1 //===- ARMParallelDSP.cpp - Parallel DSP Pass -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Armv6 introduced instructions to perform 32-bit SIMD operations. The
11 /// purpose of this pass is do some IR pattern matching to create ACLE
12 /// DSP intrinsics, which map on these 32-bit SIMD operations.
13 /// This pass runs only when unaligned accesses is supported/enabled.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/LoopAccessAnalysis.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/NoFolder.h"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
25 #include "llvm/Pass.h"
26 #include "llvm/PassRegistry.h"
27 #include "llvm/PassSupport.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/IR/PatternMatch.h"
30 #include "llvm/CodeGen/TargetPassConfig.h"
31 #include "ARM.h"
32 #include "ARMSubtarget.h"
33 
34 using namespace llvm;
35 using namespace PatternMatch;
36 
37 #define DEBUG_TYPE "arm-parallel-dsp"
38 
39 STATISTIC(NumSMLAD , "Number of smlad instructions generated");
40 
41 static cl::opt<bool>
42 DisableParallelDSP("disable-arm-parallel-dsp", cl::Hidden, cl::init(false),
43                    cl::desc("Disable the ARM Parallel DSP pass"));
44 
45 namespace {
46   struct OpChain;
47   struct MulCandidate;
48   class Reduction;
49 
50   using MulCandList     = SmallVector<std::unique_ptr<MulCandidate>, 8>;
51   using ReductionList   = SmallVector<Reduction, 8>;
52   using ValueList       = SmallVector<Value*, 8>;
53   using MemInstList     = SmallVector<LoadInst*, 8>;
54   using PMACPair        = std::pair<MulCandidate*,MulCandidate*>;
55   using PMACPairList    = SmallVector<PMACPair, 8>;
56 
57   // 'MulCandidate' holds the multiplication instructions that are candidates
58   // for parallel execution.
59   struct MulCandidate {
60     Instruction   *Root;
61     MemInstList   VecLd;    // Container for loads to widen.
62     Value*        LHS;
63     Value*        RHS;
64     bool          Exchange = false;
65     bool          ReadOnly = true;
66 
67     MulCandidate(Instruction *I, ValueList &lhs, ValueList &rhs) :
68       Root(I), LHS(lhs.front()), RHS(rhs.front()) { }
69 
70     bool HasTwoLoadInputs() const {
71       return isa<LoadInst>(LHS) && isa<LoadInst>(RHS);
72     }
73   };
74 
75   /// Represent a sequence of multiply-accumulate operations with the aim to
76   /// perform the multiplications in parallel.
77   class Reduction {
78     Instruction     *Root = nullptr;
79     Value           *Acc = nullptr;
80     MulCandList     Muls;
81     PMACPairList        MulPairs;
82     SmallPtrSet<Instruction*, 4> Adds;
83 
84   public:
85     Reduction() = delete;
86 
87     Reduction (Instruction *Add) : Root(Add) { }
88 
89     /// Record an Add instruction that is a part of the this reduction.
90     void InsertAdd(Instruction *I) { Adds.insert(I); }
91 
92     /// Record a MulCandidate, rooted at a Mul instruction, that is a part of
93     /// this reduction.
94     void InsertMul(Instruction *I, ValueList &LHS, ValueList &RHS) {
95       Muls.push_back(make_unique<MulCandidate>(I, LHS, RHS));
96     }
97 
98     /// Add the incoming accumulator value, returns true if a value had not
99     /// already been added. Returning false signals to the user that this
100     /// reduction already has a value to initialise the accumulator.
101     bool InsertAcc(Value *V) {
102       if (Acc)
103         return false;
104       Acc = V;
105       return true;
106     }
107 
108     /// Set two MulCandidates, rooted at muls, that can be executed as a single
109     /// parallel operation.
110     void AddMulPair(MulCandidate *Mul0, MulCandidate *Mul1) {
111       MulPairs.push_back(std::make_pair(Mul0, Mul1));
112     }
113 
114     /// Return true if enough mul operations are found that can be executed in
115     /// parallel.
116     bool CreateParallelPairs();
117 
118     /// Return the add instruction which is the root of the reduction.
119     Instruction *getRoot() { return Root; }
120 
121     /// Return the incoming value to be accumulated. This maybe null.
122     Value *getAccumulator() { return Acc; }
123 
124     /// Return the set of adds that comprise the reduction.
125     SmallPtrSetImpl<Instruction*> &getAdds() { return Adds; }
126 
127     /// Return the MulCandidate, rooted at mul instruction, that comprise the
128     /// the reduction.
129     MulCandList &getMuls() { return Muls; }
130 
131     /// Return the MulCandidate, rooted at mul instructions, that have been
132     /// paired for parallel execution.
133     PMACPairList &getMulPairs() { return MulPairs; }
134 
135     /// To finalise, replace the uses of the root with the intrinsic call.
136     void UpdateRoot(Instruction *SMLAD) {
137       Root->replaceAllUsesWith(SMLAD);
138     }
139   };
140 
141   class WidenedLoad {
142     LoadInst *NewLd = nullptr;
143     SmallVector<LoadInst*, 4> Loads;
144 
145   public:
146     WidenedLoad(SmallVectorImpl<LoadInst*> &Lds, LoadInst *Wide)
147       : NewLd(Wide) {
148       for (auto *I : Lds)
149         Loads.push_back(I);
150     }
151     LoadInst *getLoad() {
152       return NewLd;
153     }
154   };
155 
156   class ARMParallelDSP : public FunctionPass {
157     ScalarEvolution   *SE;
158     AliasAnalysis     *AA;
159     TargetLibraryInfo *TLI;
160     DominatorTree     *DT;
161     const DataLayout  *DL;
162     Module            *M;
163     std::map<LoadInst*, LoadInst*> LoadPairs;
164     SmallPtrSet<LoadInst*, 4> OffsetLoads;
165     std::map<LoadInst*, std::unique_ptr<WidenedLoad>> WideLoads;
166 
167     template<unsigned>
168     bool IsNarrowSequence(Value *V, ValueList &VL);
169 
170     bool RecordMemoryOps(BasicBlock *BB);
171     void InsertParallelMACs(Reduction &Reduction);
172     bool AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1, MemInstList &VecMem);
173     LoadInst* CreateWideLoad(SmallVectorImpl<LoadInst*> &Loads,
174                              IntegerType *LoadTy);
175     bool CreateParallelPairs(Reduction &R);
176 
177     /// Try to match and generate: SMLAD, SMLADX - Signed Multiply Accumulate
178     /// Dual performs two signed 16x16-bit multiplications. It adds the
179     /// products to a 32-bit accumulate operand. Optionally, the instruction can
180     /// exchange the halfwords of the second operand before performing the
181     /// arithmetic.
182     bool MatchSMLAD(Function &F);
183 
184   public:
185     static char ID;
186 
187     ARMParallelDSP() : FunctionPass(ID) { }
188 
189     void getAnalysisUsage(AnalysisUsage &AU) const override {
190       FunctionPass::getAnalysisUsage(AU);
191       AU.addRequired<AssumptionCacheTracker>();
192       AU.addRequired<ScalarEvolutionWrapperPass>();
193       AU.addRequired<AAResultsWrapperPass>();
194       AU.addRequired<TargetLibraryInfoWrapperPass>();
195       AU.addRequired<DominatorTreeWrapperPass>();
196       AU.addRequired<TargetPassConfig>();
197       AU.addPreserved<ScalarEvolutionWrapperPass>();
198       AU.addPreserved<GlobalsAAWrapperPass>();
199       AU.setPreservesCFG();
200     }
201 
202     bool runOnFunction(Function &F) override {
203       if (DisableParallelDSP)
204         return false;
205       if (skipFunction(F))
206         return false;
207 
208       SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
209       AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
210       TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
211       DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
212       auto &TPC = getAnalysis<TargetPassConfig>();
213 
214       M = F.getParent();
215       DL = &M->getDataLayout();
216 
217       auto &TM = TPC.getTM<TargetMachine>();
218       auto *ST = &TM.getSubtarget<ARMSubtarget>(F);
219 
220       if (!ST->allowsUnalignedMem()) {
221         LLVM_DEBUG(dbgs() << "Unaligned memory access not supported: not "
222                              "running pass ARMParallelDSP\n");
223         return false;
224       }
225 
226       if (!ST->hasDSP()) {
227         LLVM_DEBUG(dbgs() << "DSP extension not enabled: not running pass "
228                              "ARMParallelDSP\n");
229         return false;
230       }
231 
232       if (!ST->isLittle()) {
233         LLVM_DEBUG(dbgs() << "Only supporting little endian: not running pass "
234                           << "ARMParallelDSP\n");
235         return false;
236       }
237 
238       LLVM_DEBUG(dbgs() << "\n== Parallel DSP pass ==\n");
239       LLVM_DEBUG(dbgs() << " - " << F.getName() << "\n\n");
240 
241       bool Changes = MatchSMLAD(F);
242       return Changes;
243     }
244   };
245 }
246 
247 template<typename MemInst>
248 static bool AreSequentialAccesses(MemInst *MemOp0, MemInst *MemOp1,
249                                   const DataLayout &DL, ScalarEvolution &SE) {
250   if (isConsecutiveAccess(MemOp0, MemOp1, DL, SE))
251     return true;
252   return false;
253 }
254 
255 bool ARMParallelDSP::AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1,
256                                         MemInstList &VecMem) {
257   if (!Ld0 || !Ld1)
258     return false;
259 
260   if (!LoadPairs.count(Ld0) || LoadPairs[Ld0] != Ld1)
261     return false;
262 
263   LLVM_DEBUG(dbgs() << "Loads are sequential and valid:\n";
264     dbgs() << "Ld0:"; Ld0->dump();
265     dbgs() << "Ld1:"; Ld1->dump();
266   );
267 
268   VecMem.clear();
269   VecMem.push_back(Ld0);
270   VecMem.push_back(Ld1);
271   return true;
272 }
273 
274 // MaxBitwidth: the maximum supported bitwidth of the elements in the DSP
275 // instructions, which is set to 16. So here we should collect all i8 and i16
276 // narrow operations.
277 // TODO: we currently only collect i16, and will support i8 later, so that's
278 // why we check that types are equal to MaxBitWidth, and not <= MaxBitWidth.
279 template<unsigned MaxBitWidth>
280 bool ARMParallelDSP::IsNarrowSequence(Value *V, ValueList &VL) {
281   if (auto *SExt = dyn_cast<SExtInst>(V)) {
282     if (SExt->getSrcTy()->getIntegerBitWidth() != MaxBitWidth)
283       return false;
284 
285     if (auto *Ld = dyn_cast<LoadInst>(SExt->getOperand(0))) {
286       // Check that these load could be paired.
287       if (!LoadPairs.count(Ld) && !OffsetLoads.count(Ld))
288         return false;
289 
290       VL.push_back(Ld);
291       VL.push_back(SExt);
292       return true;
293     }
294   }
295   return false;
296 }
297 
298 /// Iterate through the block and record base, offset pairs of loads which can
299 /// be widened into a single load.
300 bool ARMParallelDSP::RecordMemoryOps(BasicBlock *BB) {
301   SmallVector<LoadInst*, 8> Loads;
302   SmallVector<Instruction*, 8> Writes;
303   LoadPairs.clear();
304   WideLoads.clear();
305 
306   // Collect loads and instruction that may write to memory. For now we only
307   // record loads which are simple, sign-extended and have a single user.
308   // TODO: Allow zero-extended loads.
309   for (auto &I : *BB) {
310     if (I.mayWriteToMemory())
311       Writes.push_back(&I);
312     auto *Ld = dyn_cast<LoadInst>(&I);
313     if (!Ld || !Ld->isSimple() ||
314         !Ld->hasOneUse() || !isa<SExtInst>(Ld->user_back()))
315       continue;
316     Loads.push_back(Ld);
317   }
318 
319   using InstSet = std::set<Instruction*>;
320   using DepMap = std::map<Instruction*, InstSet>;
321   DepMap RAWDeps;
322 
323   // Record any writes that may alias a load.
324   const auto Size = LocationSize::unknown();
325   for (auto Read : Loads) {
326     for (auto Write : Writes) {
327       MemoryLocation ReadLoc =
328         MemoryLocation(Read->getPointerOperand(), Size);
329 
330       if (!isModOrRefSet(intersectModRef(AA->getModRefInfo(Write, ReadLoc),
331           ModRefInfo::ModRef)))
332         continue;
333       if (DT->dominates(Write, Read))
334         RAWDeps[Read].insert(Write);
335     }
336   }
337 
338   // Check whether there's not a write between the two loads which would
339   // prevent them from being safely merged.
340   auto SafeToPair = [&](LoadInst *Base, LoadInst *Offset) {
341     LoadInst *Dominator = DT->dominates(Base, Offset) ? Base : Offset;
342     LoadInst *Dominated = DT->dominates(Base, Offset) ? Offset : Base;
343 
344     if (RAWDeps.count(Dominated)) {
345       InstSet &WritesBefore = RAWDeps[Dominated];
346 
347       for (auto Before : WritesBefore) {
348 
349         // We can't move the second load backward, past a write, to merge
350         // with the first load.
351         if (DT->dominates(Dominator, Before))
352           return false;
353       }
354     }
355     return true;
356   };
357 
358   // Record base, offset load pairs.
359   for (auto *Base : Loads) {
360     for (auto *Offset : Loads) {
361       if (Base == Offset)
362         continue;
363 
364       if (AreSequentialAccesses<LoadInst>(Base, Offset, *DL, *SE) &&
365           SafeToPair(Base, Offset)) {
366         LoadPairs[Base] = Offset;
367         OffsetLoads.insert(Offset);
368         break;
369       }
370     }
371   }
372 
373   LLVM_DEBUG(if (!LoadPairs.empty()) {
374                dbgs() << "Consecutive load pairs:\n";
375                for (auto &MapIt : LoadPairs) {
376                  LLVM_DEBUG(dbgs() << *MapIt.first << ", "
377                             << *MapIt.second << "\n");
378                }
379              });
380   return LoadPairs.size() > 1;
381 }
382 
383 // The pass needs to identify integer add/sub reductions of 16-bit vector
384 // multiplications.
385 // To use SMLAD:
386 // 1) we first need to find integer add then look for this pattern:
387 //
388 // acc0 = ...
389 // ld0 = load i16
390 // sext0 = sext i16 %ld0 to i32
391 // ld1 = load i16
392 // sext1 = sext i16 %ld1 to i32
393 // mul0 = mul %sext0, %sext1
394 // ld2 = load i16
395 // sext2 = sext i16 %ld2 to i32
396 // ld3 = load i16
397 // sext3 = sext i16 %ld3 to i32
398 // mul1 = mul i32 %sext2, %sext3
399 // add0 = add i32 %mul0, %acc0
400 // acc1 = add i32 %add0, %mul1
401 //
402 // Which can be selected to:
403 //
404 // ldr r0
405 // ldr r1
406 // smlad r2, r0, r1, r2
407 //
408 // If constants are used instead of loads, these will need to be hoisted
409 // out and into a register.
410 //
411 // If loop invariants are used instead of loads, these need to be packed
412 // before the loop begins.
413 //
414 bool ARMParallelDSP::MatchSMLAD(Function &F) {
415   // Search recursively back through the operands to find a tree of values that
416   // form a multiply-accumulate chain. The search records the Add and Mul
417   // instructions that form the reduction and allows us to find a single value
418   // to be used as the initial input to the accumlator.
419   std::function<bool(Value*, BasicBlock*, Reduction&)> Search = [&]
420     (Value *V, BasicBlock *BB, Reduction &R) -> bool {
421 
422     // If we find a non-instruction, try to use it as the initial accumulator
423     // value. This may have already been found during the search in which case
424     // this function will return false, signaling a search fail.
425     auto *I = dyn_cast<Instruction>(V);
426     if (!I)
427       return R.InsertAcc(V);
428 
429     if (I->getParent() != BB)
430       return false;
431 
432     switch (I->getOpcode()) {
433     default:
434       break;
435     case Instruction::PHI:
436       // Could be the accumulator value.
437       return R.InsertAcc(V);
438     case Instruction::Add: {
439       // Adds should be adding together two muls, or another add and a mul to
440       // be within the mac chain. One of the operands may also be the
441       // accumulator value at which point we should stop searching.
442       bool ValidLHS = Search(I->getOperand(0), BB, R);
443       bool ValidRHS = Search(I->getOperand(1), BB, R);
444       if (!ValidLHS && !ValidLHS)
445         return false;
446       else if (ValidLHS && ValidRHS) {
447         R.InsertAdd(I);
448         return true;
449       } else {
450         R.InsertAdd(I);
451         return R.InsertAcc(I);
452       }
453     }
454     case Instruction::Mul: {
455       Value *MulOp0 = I->getOperand(0);
456       Value *MulOp1 = I->getOperand(1);
457       if (isa<SExtInst>(MulOp0) && isa<SExtInst>(MulOp1)) {
458         ValueList LHS;
459         ValueList RHS;
460         if (IsNarrowSequence<16>(MulOp0, LHS) &&
461             IsNarrowSequence<16>(MulOp1, RHS)) {
462           R.InsertMul(I, LHS, RHS);
463           return true;
464         }
465       }
466       return false;
467     }
468     case Instruction::SExt:
469       return Search(I->getOperand(0), BB, R);
470     }
471     return false;
472   };
473 
474   bool Changed = false;
475 
476   for (auto &BB : F) {
477     SmallPtrSet<Instruction*, 4> AllAdds;
478     if (!RecordMemoryOps(&BB))
479       continue;
480 
481     for (Instruction &I : reverse(BB)) {
482       if (I.getOpcode() != Instruction::Add)
483         continue;
484 
485       if (AllAdds.count(&I))
486         continue;
487 
488       const auto *Ty = I.getType();
489       if (!Ty->isIntegerTy(32) && !Ty->isIntegerTy(64))
490         continue;
491 
492       Reduction R(&I);
493       if (!Search(&I, &BB, R))
494         continue;
495 
496       if (!CreateParallelPairs(R))
497         continue;
498 
499       InsertParallelMACs(R);
500       Changed = true;
501       AllAdds.insert(R.getAdds().begin(), R.getAdds().end());
502     }
503   }
504 
505   return Changed;
506 }
507 
508 bool ARMParallelDSP::CreateParallelPairs(Reduction &R) {
509 
510   // Not enough mul operations to make a pair.
511   if (R.getMuls().size() < 2)
512     return false;
513 
514   // Check that the muls operate directly upon sign extended loads.
515   for (auto &MulCand : R.getMuls()) {
516     if (!MulCand->HasTwoLoadInputs())
517       return false;
518   }
519 
520   auto CanPair = [&](Reduction &R, MulCandidate *PMul0, MulCandidate *PMul1) {
521     // The first elements of each vector should be loads with sexts. If we
522     // find that its two pairs of consecutive loads, then these can be
523     // transformed into two wider loads and the users can be replaced with
524     // DSP intrinsics.
525     auto Ld0 = static_cast<LoadInst*>(PMul0->LHS);
526     auto Ld1 = static_cast<LoadInst*>(PMul1->LHS);
527     auto Ld2 = static_cast<LoadInst*>(PMul0->RHS);
528     auto Ld3 = static_cast<LoadInst*>(PMul1->RHS);
529 
530     LLVM_DEBUG(dbgs() << "Loads:\n"
531                << " - " << *Ld0 << "\n"
532                << " - " << *Ld1 << "\n"
533                << " - " << *Ld2 << "\n"
534                << " - " << *Ld3 << "\n");
535 
536     if (AreSequentialLoads(Ld0, Ld1, PMul0->VecLd)) {
537       if (AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
538         LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
539         R.AddMulPair(PMul0, PMul1);
540         return true;
541       } else if (AreSequentialLoads(Ld3, Ld2, PMul1->VecLd)) {
542         LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
543         LLVM_DEBUG(dbgs() << "    exchanging Ld2 and Ld3\n");
544         PMul1->Exchange = true;
545         R.AddMulPair(PMul0, PMul1);
546         return true;
547       }
548     } else if (AreSequentialLoads(Ld1, Ld0, PMul0->VecLd) &&
549                AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
550       LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
551       LLVM_DEBUG(dbgs() << "    exchanging Ld0 and Ld1\n");
552       LLVM_DEBUG(dbgs() << "    and swapping muls\n");
553       PMul0->Exchange = true;
554       // Only the second operand can be exchanged, so swap the muls.
555       R.AddMulPair(PMul1, PMul0);
556       return true;
557     }
558     return false;
559   };
560 
561   MulCandList &Muls = R.getMuls();
562   const unsigned Elems = Muls.size();
563   SmallPtrSet<const Instruction*, 4> Paired;
564   for (unsigned i = 0; i < Elems; ++i) {
565     MulCandidate *PMul0 = static_cast<MulCandidate*>(Muls[i].get());
566     if (Paired.count(PMul0->Root))
567       continue;
568 
569     for (unsigned j = 0; j < Elems; ++j) {
570       if (i == j)
571         continue;
572 
573       MulCandidate *PMul1 = static_cast<MulCandidate*>(Muls[j].get());
574       if (Paired.count(PMul1->Root))
575         continue;
576 
577       const Instruction *Mul0 = PMul0->Root;
578       const Instruction *Mul1 = PMul1->Root;
579       if (Mul0 == Mul1)
580         continue;
581 
582       assert(PMul0 != PMul1 && "expected different chains");
583 
584       if (CanPair(R, PMul0, PMul1)) {
585         Paired.insert(Mul0);
586         Paired.insert(Mul1);
587         break;
588       }
589     }
590   }
591   return !R.getMulPairs().empty();
592 }
593 
594 
595 void ARMParallelDSP::InsertParallelMACs(Reduction &R) {
596 
597   auto CreateSMLADCall = [&](SmallVectorImpl<LoadInst*> &VecLd0,
598                              SmallVectorImpl<LoadInst*> &VecLd1,
599                              Value *Acc, bool Exchange,
600                              Instruction *InsertAfter) {
601     // Replace the reduction chain with an intrinsic call
602     IntegerType *Ty = IntegerType::get(M->getContext(), 32);
603     LoadInst *WideLd0 = WideLoads.count(VecLd0[0]) ?
604       WideLoads[VecLd0[0]]->getLoad() : CreateWideLoad(VecLd0, Ty);
605     LoadInst *WideLd1 = WideLoads.count(VecLd1[0]) ?
606       WideLoads[VecLd1[0]]->getLoad() : CreateWideLoad(VecLd1, Ty);
607 
608     Value* Args[] = { WideLd0, WideLd1, Acc };
609     Function *SMLAD = nullptr;
610     if (Exchange)
611       SMLAD = Acc->getType()->isIntegerTy(32) ?
612         Intrinsic::getDeclaration(M, Intrinsic::arm_smladx) :
613         Intrinsic::getDeclaration(M, Intrinsic::arm_smlaldx);
614     else
615       SMLAD = Acc->getType()->isIntegerTy(32) ?
616         Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
617         Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
618 
619     IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
620                                 ++BasicBlock::iterator(InsertAfter));
621     Instruction *Call = Builder.CreateCall(SMLAD, Args);
622     NumSMLAD++;
623     return Call;
624   };
625 
626   Instruction *InsertAfter = R.getRoot();
627   Value *Acc = R.getAccumulator();
628   if (!Acc)
629     Acc = ConstantInt::get(IntegerType::get(M->getContext(), 32), 0);
630 
631   LLVM_DEBUG(dbgs() << "Root: " << *InsertAfter << "\n"
632              << "Acc: " << *Acc << "\n");
633   for (auto &Pair : R.getMulPairs()) {
634     MulCandidate *PMul0 = Pair.first;
635     MulCandidate *PMul1 = Pair.second;
636     LLVM_DEBUG(dbgs() << "Muls:\n"
637                << "- " << *PMul0->Root << "\n"
638                << "- " << *PMul1->Root << "\n");
639 
640     Acc = CreateSMLADCall(PMul0->VecLd, PMul1->VecLd, Acc, PMul1->Exchange,
641                           InsertAfter);
642     InsertAfter = cast<Instruction>(Acc);
643   }
644   R.UpdateRoot(cast<Instruction>(Acc));
645 }
646 
647 LoadInst* ARMParallelDSP::CreateWideLoad(SmallVectorImpl<LoadInst*> &Loads,
648                                          IntegerType *LoadTy) {
649   assert(Loads.size() == 2 && "currently only support widening two loads");
650 
651   LoadInst *Base = Loads[0];
652   LoadInst *Offset = Loads[1];
653 
654   Instruction *BaseSExt = dyn_cast<SExtInst>(Base->user_back());
655   Instruction *OffsetSExt = dyn_cast<SExtInst>(Offset->user_back());
656 
657   assert((BaseSExt && OffsetSExt)
658          && "Loads should have a single, extending, user");
659 
660   std::function<void(Value*, Value*)> MoveBefore =
661     [&](Value *A, Value *B) -> void {
662       if (!isa<Instruction>(A) || !isa<Instruction>(B))
663         return;
664 
665       auto *Source = cast<Instruction>(A);
666       auto *Sink = cast<Instruction>(B);
667 
668       if (DT->dominates(Source, Sink) ||
669           Source->getParent() != Sink->getParent() ||
670           isa<PHINode>(Source) || isa<PHINode>(Sink))
671         return;
672 
673       Source->moveBefore(Sink);
674       for (auto &Op : Source->operands())
675         MoveBefore(Op, Source);
676     };
677 
678   // Insert the load at the point of the original dominating load.
679   LoadInst *DomLoad = DT->dominates(Base, Offset) ? Base : Offset;
680   IRBuilder<NoFolder> IRB(DomLoad->getParent(),
681                           ++BasicBlock::iterator(DomLoad));
682 
683   // Bitcast the pointer to a wider type and create the wide load, while making
684   // sure to maintain the original alignment as this prevents ldrd from being
685   // generated when it could be illegal due to memory alignment.
686   const unsigned AddrSpace = DomLoad->getPointerAddressSpace();
687   Value *VecPtr = IRB.CreateBitCast(Base->getPointerOperand(),
688                                     LoadTy->getPointerTo(AddrSpace));
689   LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr,
690                                              Base->getAlignment());
691 
692   // Make sure everything is in the correct order in the basic block.
693   MoveBefore(Base->getPointerOperand(), VecPtr);
694   MoveBefore(VecPtr, WideLoad);
695 
696   // From the wide load, create two values that equal the original two loads.
697   // Loads[0] needs trunc while Loads[1] needs a lshr and trunc.
698   // TODO: Support big-endian as well.
699   Value *Bottom = IRB.CreateTrunc(WideLoad, Base->getType());
700   BaseSExt->setOperand(0, Bottom);
701 
702   IntegerType *OffsetTy = cast<IntegerType>(Offset->getType());
703   Value *ShiftVal = ConstantInt::get(LoadTy, OffsetTy->getBitWidth());
704   Value *Top = IRB.CreateLShr(WideLoad, ShiftVal);
705   Value *Trunc = IRB.CreateTrunc(Top, OffsetTy);
706   OffsetSExt->setOperand(0, Trunc);
707 
708   WideLoads.emplace(std::make_pair(Base,
709                                    make_unique<WidenedLoad>(Loads, WideLoad)));
710   return WideLoad;
711 }
712 
713 Pass *llvm::createARMParallelDSPPass() {
714   return new ARMParallelDSP();
715 }
716 
717 char ARMParallelDSP::ID = 0;
718 
719 INITIALIZE_PASS_BEGIN(ARMParallelDSP, "arm-parallel-dsp",
720                 "Transform functions to use DSP intrinsics", false, false)
721 INITIALIZE_PASS_END(ARMParallelDSP, "arm-parallel-dsp",
722                 "Transform functions to use DSP intrinsics", false, false)
723