1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 ///    VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
14 ///    treated as proper graphs for generic algorithms;
15 /// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
16 ///    within VPBasicBlocks;
17 /// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
18 ///    instruction;
19 /// 5. The VPlan class holding a candidate for vectorization;
20 /// 6. The VPlanPrinter class providing a way to print a plan in dot format;
21 /// These are documented in docs/VectorizationPlan.rst.
22 //
23 //===----------------------------------------------------------------------===//
24 
25 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
26 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
27 
28 #include "VPlanLoopInfo.h"
29 #include "VPlanValue.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/DepthFirstIterator.h"
32 #include "llvm/ADT/GraphTraits.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/SmallBitVector.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/ADT/ilist.h"
40 #include "llvm/ADT/ilist_node.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/FMF.h"
44 #include "llvm/Support/InstructionCost.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstddef>
48 #include <map>
49 #include <string>
50 
51 namespace llvm {
52 
53 class BasicBlock;
54 class DominatorTree;
55 class InductionDescriptor;
56 class InnerLoopVectorizer;
57 class IRBuilderBase;
58 class LoopInfo;
59 class raw_ostream;
60 class RecurrenceDescriptor;
61 class Value;
62 class VPBasicBlock;
63 class VPRegionBlock;
64 class VPlan;
65 class VPReplicateRecipe;
66 class VPlanSlp;
67 
68 /// Returns a calculation for the total number of elements for a given \p VF.
69 /// For fixed width vectors this value is a constant, whereas for scalable
70 /// vectors it is an expression determined at runtime.
71 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF);
72 
73 /// Return a value for Step multiplied by VF.
74 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
75                        int64_t Step);
76 
77 /// A range of powers-of-2 vectorization factors with fixed start and
78 /// adjustable end. The range includes start and excludes end, e.g.,:
79 /// [1, 9) = {1, 2, 4, 8}
80 struct VFRange {
81   // A power of 2.
82   const ElementCount Start;
83 
84   // Need not be a power of 2. If End <= Start range is empty.
85   ElementCount End;
86 
87   bool isEmpty() const {
88     return End.getKnownMinValue() <= Start.getKnownMinValue();
89   }
90 
91   VFRange(const ElementCount &Start, const ElementCount &End)
92       : Start(Start), End(End) {
93     assert(Start.isScalable() == End.isScalable() &&
94            "Both Start and End should have the same scalable flag");
95     assert(isPowerOf2_32(Start.getKnownMinValue()) &&
96            "Expected Start to be a power of 2");
97   }
98 };
99 
100 using VPlanPtr = std::unique_ptr<VPlan>;
101 
102 /// In what follows, the term "input IR" refers to code that is fed into the
103 /// vectorizer whereas the term "output IR" refers to code that is generated by
104 /// the vectorizer.
105 
106 /// VPLane provides a way to access lanes in both fixed width and scalable
107 /// vectors, where for the latter the lane index sometimes needs calculating
108 /// as a runtime expression.
109 class VPLane {
110 public:
111   /// Kind describes how to interpret Lane.
112   enum class Kind : uint8_t {
113     /// For First, Lane is the index into the first N elements of a
114     /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
115     First,
116     /// For ScalableLast, Lane is the offset from the start of the last
117     /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
118     /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
119     /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
120     ScalableLast
121   };
122 
123 private:
124   /// in [0..VF)
125   unsigned Lane;
126 
127   /// Indicates how the Lane should be interpreted, as described above.
128   Kind LaneKind;
129 
130 public:
131   VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
132 
133   static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
134 
135   static VPLane getLastLaneForVF(const ElementCount &VF) {
136     unsigned LaneOffset = VF.getKnownMinValue() - 1;
137     Kind LaneKind;
138     if (VF.isScalable())
139       // In this case 'LaneOffset' refers to the offset from the start of the
140       // last subvector with VF.getKnownMinValue() elements.
141       LaneKind = VPLane::Kind::ScalableLast;
142     else
143       LaneKind = VPLane::Kind::First;
144     return VPLane(LaneOffset, LaneKind);
145   }
146 
147   /// Returns a compile-time known value for the lane index and asserts if the
148   /// lane can only be calculated at runtime.
149   unsigned getKnownLane() const {
150     assert(LaneKind == Kind::First);
151     return Lane;
152   }
153 
154   /// Returns an expression describing the lane index that can be used at
155   /// runtime.
156   Value *getAsRuntimeExpr(IRBuilderBase &Builder, const ElementCount &VF) const;
157 
158   /// Returns the Kind of lane offset.
159   Kind getKind() const { return LaneKind; }
160 
161   /// Returns true if this is the first lane of the whole vector.
162   bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
163 
164   /// Maps the lane to a cache index based on \p VF.
165   unsigned mapToCacheIndex(const ElementCount &VF) const {
166     switch (LaneKind) {
167     case VPLane::Kind::ScalableLast:
168       assert(VF.isScalable() && Lane < VF.getKnownMinValue());
169       return VF.getKnownMinValue() + Lane;
170     default:
171       assert(Lane < VF.getKnownMinValue());
172       return Lane;
173     }
174   }
175 
176   /// Returns the maxmimum number of lanes that we are able to consider
177   /// caching for \p VF.
178   static unsigned getNumCachedLanes(const ElementCount &VF) {
179     return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
180   }
181 };
182 
183 /// VPIteration represents a single point in the iteration space of the output
184 /// (vectorized and/or unrolled) IR loop.
185 struct VPIteration {
186   /// in [0..UF)
187   unsigned Part;
188 
189   VPLane Lane;
190 
191   VPIteration(unsigned Part, unsigned Lane,
192               VPLane::Kind Kind = VPLane::Kind::First)
193       : Part(Part), Lane(Lane, Kind) {}
194 
195   VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
196 
197   bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
198 };
199 
200 /// VPTransformState holds information passed down when "executing" a VPlan,
201 /// needed for generating the output IR.
202 struct VPTransformState {
203   VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
204                    DominatorTree *DT, IRBuilderBase &Builder,
205                    InnerLoopVectorizer *ILV, VPlan *Plan)
206       : VF(VF), UF(UF), LI(LI), DT(DT), Builder(Builder), ILV(ILV), Plan(Plan) {
207   }
208 
209   /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
210   ElementCount VF;
211   unsigned UF;
212 
213   /// Hold the indices to generate specific scalar instructions. Null indicates
214   /// that all instances are to be generated, using either scalar or vector
215   /// instructions.
216   Optional<VPIteration> Instance;
217 
218   struct DataState {
219     /// A type for vectorized values in the new loop. Each value from the
220     /// original loop, when vectorized, is represented by UF vector values in
221     /// the new unrolled loop, where UF is the unroll factor.
222     typedef SmallVector<Value *, 2> PerPartValuesTy;
223 
224     DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
225 
226     using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
227     DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
228   } Data;
229 
230   /// Get the generated Value for a given VPValue and a given Part. Note that
231   /// as some Defs are still created by ILV and managed in its ValueMap, this
232   /// method will delegate the call to ILV in such cases in order to provide
233   /// callers a consistent API.
234   /// \see set.
235   Value *get(VPValue *Def, unsigned Part);
236 
237   /// Get the generated Value for a given VPValue and given Part and Lane.
238   Value *get(VPValue *Def, const VPIteration &Instance);
239 
240   bool hasVectorValue(VPValue *Def, unsigned Part) {
241     auto I = Data.PerPartOutput.find(Def);
242     return I != Data.PerPartOutput.end() && Part < I->second.size() &&
243            I->second[Part];
244   }
245 
246   bool hasAnyVectorValue(VPValue *Def) const {
247     return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
248   }
249 
250   bool hasScalarValue(VPValue *Def, VPIteration Instance) {
251     auto I = Data.PerPartScalars.find(Def);
252     if (I == Data.PerPartScalars.end())
253       return false;
254     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
255     return Instance.Part < I->second.size() &&
256            CacheIdx < I->second[Instance.Part].size() &&
257            I->second[Instance.Part][CacheIdx];
258   }
259 
260   /// Set the generated Value for a given VPValue and a given Part.
261   void set(VPValue *Def, Value *V, unsigned Part) {
262     if (!Data.PerPartOutput.count(Def)) {
263       DataState::PerPartValuesTy Entry(UF);
264       Data.PerPartOutput[Def] = Entry;
265     }
266     Data.PerPartOutput[Def][Part] = V;
267   }
268   /// Reset an existing vector value for \p Def and a given \p Part.
269   void reset(VPValue *Def, Value *V, unsigned Part) {
270     auto Iter = Data.PerPartOutput.find(Def);
271     assert(Iter != Data.PerPartOutput.end() &&
272            "need to overwrite existing value");
273     Iter->second[Part] = V;
274   }
275 
276   /// Set the generated scalar \p V for \p Def and the given \p Instance.
277   void set(VPValue *Def, Value *V, const VPIteration &Instance) {
278     auto Iter = Data.PerPartScalars.insert({Def, {}});
279     auto &PerPartVec = Iter.first->second;
280     while (PerPartVec.size() <= Instance.Part)
281       PerPartVec.emplace_back();
282     auto &Scalars = PerPartVec[Instance.Part];
283     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
284     while (Scalars.size() <= CacheIdx)
285       Scalars.push_back(nullptr);
286     assert(!Scalars[CacheIdx] && "should overwrite existing value");
287     Scalars[CacheIdx] = V;
288   }
289 
290   /// Reset an existing scalar value for \p Def and a given \p Instance.
291   void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
292     auto Iter = Data.PerPartScalars.find(Def);
293     assert(Iter != Data.PerPartScalars.end() &&
294            "need to overwrite existing value");
295     assert(Instance.Part < Iter->second.size() &&
296            "need to overwrite existing value");
297     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
298     assert(CacheIdx < Iter->second[Instance.Part].size() &&
299            "need to overwrite existing value");
300     Iter->second[Instance.Part][CacheIdx] = V;
301   }
302 
303   /// Hold state information used when constructing the CFG of the output IR,
304   /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
305   struct CFGState {
306     /// The previous VPBasicBlock visited. Initially set to null.
307     VPBasicBlock *PrevVPBB = nullptr;
308 
309     /// The previous IR BasicBlock created or used. Initially set to the new
310     /// header BasicBlock.
311     BasicBlock *PrevBB = nullptr;
312 
313     /// The last IR BasicBlock in the output IR. Set to the new latch
314     /// BasicBlock, used for placing the newly created BasicBlocks.
315     BasicBlock *LastBB = nullptr;
316 
317     /// The IR BasicBlock that is the preheader of the vector loop in the output
318     /// IR.
319     /// FIXME: The vector preheader should also be modeled in VPlan, so any code
320     /// that needs to be added to the preheader gets directly generated by
321     /// VPlan. There should be no need to manage a pointer to the IR BasicBlock.
322     BasicBlock *VectorPreHeader = nullptr;
323 
324     /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
325     /// of replication, maps the BasicBlock of the last replica created.
326     SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
327 
328     /// Vector of VPBasicBlocks whose terminator instruction needs to be fixed
329     /// up at the end of vector code generation.
330     SmallVector<VPBasicBlock *, 8> VPBBsToFix;
331 
332     CFGState() = default;
333   } CFG;
334 
335   /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
336   LoopInfo *LI;
337 
338   /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
339   DominatorTree *DT;
340 
341   /// Hold a reference to the IRBuilder used to generate output IR code.
342   IRBuilderBase &Builder;
343 
344   VPValue2ValueTy VPValue2Value;
345 
346   /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
347   Value *CanonicalIV = nullptr;
348 
349   /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
350   InnerLoopVectorizer *ILV;
351 
352   /// Pointer to the VPlan code is generated for.
353   VPlan *Plan;
354 
355   /// Holds recipes that may generate a poison value that is used after
356   /// vectorization, even when their operands are not poison.
357   SmallPtrSet<VPRecipeBase *, 16> MayGeneratePoisonRecipes;
358 };
359 
360 /// VPUsers instance used by VPBlockBase to manage CondBit and the block
361 /// predicate. Currently VPBlockUsers are used in VPBlockBase for historical
362 /// reasons, but in the future the only VPUsers should either be recipes or
363 /// live-outs.VPBlockBase uses.
364 struct VPBlockUser : public VPUser {
365   VPBlockUser() : VPUser({}, VPUserID::Block) {}
366 
367   VPValue *getSingleOperandOrNull() {
368     if (getNumOperands() == 1)
369       return getOperand(0);
370 
371     return nullptr;
372   }
373   const VPValue *getSingleOperandOrNull() const {
374     if (getNumOperands() == 1)
375       return getOperand(0);
376 
377     return nullptr;
378   }
379 
380   void resetSingleOpUser(VPValue *NewVal) {
381     assert(getNumOperands() <= 1 && "Didn't expect more than one operand!");
382     if (!NewVal) {
383       if (getNumOperands() == 1)
384         removeLastOperand();
385       return;
386     }
387 
388     if (getNumOperands() == 1)
389       setOperand(0, NewVal);
390     else
391       addOperand(NewVal);
392   }
393 };
394 
395 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
396 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
397 class VPBlockBase {
398   friend class VPBlockUtils;
399 
400   const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
401 
402   /// An optional name for the block.
403   std::string Name;
404 
405   /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
406   /// it is a topmost VPBlockBase.
407   VPRegionBlock *Parent = nullptr;
408 
409   /// List of predecessor blocks.
410   SmallVector<VPBlockBase *, 1> Predecessors;
411 
412   /// List of successor blocks.
413   SmallVector<VPBlockBase *, 1> Successors;
414 
415   /// Successor selector managed by a VPUser. For blocks with zero or one
416   /// successors, there is no operand. Otherwise there is exactly one operand
417   /// which is the branch condition.
418   VPBlockUser CondBitUser;
419 
420   /// If the block is predicated, its predicate is stored as an operand of this
421   /// VPUser to maintain the def-use relations. Otherwise there is no operand
422   /// here.
423   VPBlockUser PredicateUser;
424 
425   /// VPlan containing the block. Can only be set on the entry block of the
426   /// plan.
427   VPlan *Plan = nullptr;
428 
429   /// Add \p Successor as the last successor to this block.
430   void appendSuccessor(VPBlockBase *Successor) {
431     assert(Successor && "Cannot add nullptr successor!");
432     Successors.push_back(Successor);
433   }
434 
435   /// Add \p Predecessor as the last predecessor to this block.
436   void appendPredecessor(VPBlockBase *Predecessor) {
437     assert(Predecessor && "Cannot add nullptr predecessor!");
438     Predecessors.push_back(Predecessor);
439   }
440 
441   /// Remove \p Predecessor from the predecessors of this block.
442   void removePredecessor(VPBlockBase *Predecessor) {
443     auto Pos = find(Predecessors, Predecessor);
444     assert(Pos && "Predecessor does not exist");
445     Predecessors.erase(Pos);
446   }
447 
448   /// Remove \p Successor from the successors of this block.
449   void removeSuccessor(VPBlockBase *Successor) {
450     auto Pos = find(Successors, Successor);
451     assert(Pos && "Successor does not exist");
452     Successors.erase(Pos);
453   }
454 
455 protected:
456   VPBlockBase(const unsigned char SC, const std::string &N)
457       : SubclassID(SC), Name(N) {}
458 
459 public:
460   /// An enumeration for keeping track of the concrete subclass of VPBlockBase
461   /// that are actually instantiated. Values of this enumeration are kept in the
462   /// SubclassID field of the VPBlockBase objects. They are used for concrete
463   /// type identification.
464   using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
465 
466   using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
467 
468   virtual ~VPBlockBase() = default;
469 
470   const std::string &getName() const { return Name; }
471 
472   void setName(const Twine &newName) { Name = newName.str(); }
473 
474   /// \return an ID for the concrete type of this object.
475   /// This is used to implement the classof checks. This should not be used
476   /// for any other purpose, as the values may change as LLVM evolves.
477   unsigned getVPBlockID() const { return SubclassID; }
478 
479   VPRegionBlock *getParent() { return Parent; }
480   const VPRegionBlock *getParent() const { return Parent; }
481 
482   /// \return A pointer to the plan containing the current block.
483   VPlan *getPlan();
484   const VPlan *getPlan() const;
485 
486   /// Sets the pointer of the plan containing the block. The block must be the
487   /// entry block into the VPlan.
488   void setPlan(VPlan *ParentPlan);
489 
490   void setParent(VPRegionBlock *P) { Parent = P; }
491 
492   /// \return the VPBasicBlock that is the entry of this VPBlockBase,
493   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
494   /// VPBlockBase is a VPBasicBlock, it is returned.
495   const VPBasicBlock *getEntryBasicBlock() const;
496   VPBasicBlock *getEntryBasicBlock();
497 
498   /// \return the VPBasicBlock that is the exit of this VPBlockBase,
499   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
500   /// VPBlockBase is a VPBasicBlock, it is returned.
501   const VPBasicBlock *getExitBasicBlock() const;
502   VPBasicBlock *getExitBasicBlock();
503 
504   const VPBlocksTy &getSuccessors() const { return Successors; }
505   VPBlocksTy &getSuccessors() { return Successors; }
506 
507   iterator_range<VPBlockBase **> successors() { return Successors; }
508 
509   const VPBlocksTy &getPredecessors() const { return Predecessors; }
510   VPBlocksTy &getPredecessors() { return Predecessors; }
511 
512   /// \return the successor of this VPBlockBase if it has a single successor.
513   /// Otherwise return a null pointer.
514   VPBlockBase *getSingleSuccessor() const {
515     return (Successors.size() == 1 ? *Successors.begin() : nullptr);
516   }
517 
518   /// \return the predecessor of this VPBlockBase if it has a single
519   /// predecessor. Otherwise return a null pointer.
520   VPBlockBase *getSinglePredecessor() const {
521     return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
522   }
523 
524   size_t getNumSuccessors() const { return Successors.size(); }
525   size_t getNumPredecessors() const { return Predecessors.size(); }
526 
527   /// An Enclosing Block of a block B is any block containing B, including B
528   /// itself. \return the closest enclosing block starting from "this", which
529   /// has successors. \return the root enclosing block if all enclosing blocks
530   /// have no successors.
531   VPBlockBase *getEnclosingBlockWithSuccessors();
532 
533   /// \return the closest enclosing block starting from "this", which has
534   /// predecessors. \return the root enclosing block if all enclosing blocks
535   /// have no predecessors.
536   VPBlockBase *getEnclosingBlockWithPredecessors();
537 
538   /// \return the successors either attached directly to this VPBlockBase or, if
539   /// this VPBlockBase is the exit block of a VPRegionBlock and has no
540   /// successors of its own, search recursively for the first enclosing
541   /// VPRegionBlock that has successors and return them. If no such
542   /// VPRegionBlock exists, return the (empty) successors of the topmost
543   /// VPBlockBase reached.
544   const VPBlocksTy &getHierarchicalSuccessors() {
545     return getEnclosingBlockWithSuccessors()->getSuccessors();
546   }
547 
548   /// \return the hierarchical successor of this VPBlockBase if it has a single
549   /// hierarchical successor. Otherwise return a null pointer.
550   VPBlockBase *getSingleHierarchicalSuccessor() {
551     return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
552   }
553 
554   /// \return the predecessors either attached directly to this VPBlockBase or,
555   /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
556   /// predecessors of its own, search recursively for the first enclosing
557   /// VPRegionBlock that has predecessors and return them. If no such
558   /// VPRegionBlock exists, return the (empty) predecessors of the topmost
559   /// VPBlockBase reached.
560   const VPBlocksTy &getHierarchicalPredecessors() {
561     return getEnclosingBlockWithPredecessors()->getPredecessors();
562   }
563 
564   /// \return the hierarchical predecessor of this VPBlockBase if it has a
565   /// single hierarchical predecessor. Otherwise return a null pointer.
566   VPBlockBase *getSingleHierarchicalPredecessor() {
567     return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
568   }
569 
570   /// \return the condition bit selecting the successor.
571   VPValue *getCondBit();
572   /// \return the condition bit selecting the successor.
573   const VPValue *getCondBit() const;
574   /// Set the condition bit selecting the successor.
575   void setCondBit(VPValue *CV);
576 
577   /// \return the block's predicate.
578   VPValue *getPredicate();
579   /// \return the block's predicate.
580   const VPValue *getPredicate() const;
581   /// Set the block's predicate.
582   void setPredicate(VPValue *Pred);
583 
584   /// Set a given VPBlockBase \p Successor as the single successor of this
585   /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
586   /// This VPBlockBase must have no successors.
587   void setOneSuccessor(VPBlockBase *Successor) {
588     assert(Successors.empty() && "Setting one successor when others exist.");
589     appendSuccessor(Successor);
590   }
591 
592   /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
593   /// successors of this VPBlockBase. \p Condition is set as the successor
594   /// selector. This VPBlockBase is not added as predecessor of \p IfTrue or \p
595   /// IfFalse. This VPBlockBase must have no successors.
596   void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
597                         VPValue *Condition) {
598     assert(Successors.empty() && "Setting two successors when others exist.");
599     assert(Condition && "Setting two successors without condition!");
600     setCondBit(Condition);
601     appendSuccessor(IfTrue);
602     appendSuccessor(IfFalse);
603   }
604 
605   /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
606   /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
607   /// as successor of any VPBasicBlock in \p NewPreds.
608   void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
609     assert(Predecessors.empty() && "Block predecessors already set.");
610     for (auto *Pred : NewPreds)
611       appendPredecessor(Pred);
612   }
613 
614   /// Remove all the predecessor of this block.
615   void clearPredecessors() { Predecessors.clear(); }
616 
617   /// Remove all the successors of this block and set to null its condition bit
618   void clearSuccessors() {
619     Successors.clear();
620     setCondBit(nullptr);
621   }
622 
623   /// The method which generates the output IR that correspond to this
624   /// VPBlockBase, thereby "executing" the VPlan.
625   virtual void execute(struct VPTransformState *State) = 0;
626 
627   /// Delete all blocks reachable from a given VPBlockBase, inclusive.
628   static void deleteCFG(VPBlockBase *Entry);
629 
630   /// Return true if it is legal to hoist instructions into this block.
631   bool isLegalToHoistInto() {
632     // There are currently no constraints that prevent an instruction to be
633     // hoisted into a VPBlockBase.
634     return true;
635   }
636 
637   /// Replace all operands of VPUsers in the block with \p NewValue and also
638   /// replaces all uses of VPValues defined in the block with NewValue.
639   virtual void dropAllReferences(VPValue *NewValue) = 0;
640 
641 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
642   void printAsOperand(raw_ostream &OS, bool PrintType) const {
643     OS << getName();
644   }
645 
646   /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
647   /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
648   /// consequtive numbers.
649   ///
650   /// Note that the numbering is applied to the whole VPlan, so printing
651   /// individual blocks is consistent with the whole VPlan printing.
652   virtual void print(raw_ostream &O, const Twine &Indent,
653                      VPSlotTracker &SlotTracker) const = 0;
654 
655   /// Print plain-text dump of this VPlan to \p O.
656   void print(raw_ostream &O) const {
657     VPSlotTracker SlotTracker(getPlan());
658     print(O, "", SlotTracker);
659   }
660 
661   /// Print the successors of this block to \p O, prefixing all lines with \p
662   /// Indent.
663   void printSuccessors(raw_ostream &O, const Twine &Indent) const;
664 
665   /// Dump this VPBlockBase to dbgs().
666   LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
667 #endif
668 };
669 
670 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
671 /// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
672 /// and is responsible for deleting its defined values. Single-value
673 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
674 /// VPRecipeBase before VPValue.
675 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
676                      public VPDef,
677                      public VPUser {
678   friend VPBasicBlock;
679   friend class VPBlockUtils;
680 
681   /// Each VPRecipe belongs to a single VPBasicBlock.
682   VPBasicBlock *Parent = nullptr;
683 
684 public:
685   VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
686       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
687 
688   template <typename IterT>
689   VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
690       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
691   virtual ~VPRecipeBase() = default;
692 
693   /// \return the VPBasicBlock which this VPRecipe belongs to.
694   VPBasicBlock *getParent() { return Parent; }
695   const VPBasicBlock *getParent() const { return Parent; }
696 
697   /// The method which generates the output IR instructions that correspond to
698   /// this VPRecipe, thereby "executing" the VPlan.
699   virtual void execute(struct VPTransformState &State) = 0;
700 
701   /// Insert an unlinked recipe into a basic block immediately before
702   /// the specified recipe.
703   void insertBefore(VPRecipeBase *InsertPos);
704   /// Insert an unlinked recipe into \p BB immediately before the insertion
705   /// point \p IP;
706   void insertBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator IP);
707 
708   /// Insert an unlinked Recipe into a basic block immediately after
709   /// the specified Recipe.
710   void insertAfter(VPRecipeBase *InsertPos);
711 
712   /// Unlink this recipe from its current VPBasicBlock and insert it into
713   /// the VPBasicBlock that MovePos lives in, right after MovePos.
714   void moveAfter(VPRecipeBase *MovePos);
715 
716   /// Unlink this recipe and insert into BB before I.
717   ///
718   /// \pre I is a valid iterator into BB.
719   void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
720 
721   /// This method unlinks 'this' from the containing basic block, but does not
722   /// delete it.
723   void removeFromParent();
724 
725   /// This method unlinks 'this' from the containing basic block and deletes it.
726   ///
727   /// \returns an iterator pointing to the element after the erased one
728   iplist<VPRecipeBase>::iterator eraseFromParent();
729 
730   /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
731   /// otherwise.
732   Instruction *getUnderlyingInstr() {
733     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
734   }
735   const Instruction *getUnderlyingInstr() const {
736     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
737   }
738 
739   /// Method to support type inquiry through isa, cast, and dyn_cast.
740   static inline bool classof(const VPDef *D) {
741     // All VPDefs are also VPRecipeBases.
742     return true;
743   }
744 
745   static inline bool classof(const VPUser *U) {
746     return U->getVPUserID() == VPUser::VPUserID::Recipe;
747   }
748 
749   /// Returns true if the recipe may have side-effects.
750   bool mayHaveSideEffects() const;
751 
752   /// Returns true for PHI-like recipes.
753   bool isPhi() const {
754     return getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC;
755   }
756 
757   /// Returns true if the recipe may read from memory.
758   bool mayReadFromMemory() const;
759 
760   /// Returns true if the recipe may write to memory.
761   bool mayWriteToMemory() const;
762 
763   /// Returns true if the recipe may read from or write to memory.
764   bool mayReadOrWriteMemory() const {
765     return mayReadFromMemory() || mayWriteToMemory();
766   }
767 
768   /// Returns true if the recipe only uses the first lane of operand \p Op.
769   /// Conservatively returns false.
770   virtual bool onlyFirstLaneUsed(const VPValue *Op) const {
771     assert(is_contained(operands(), Op) &&
772            "Op must be an operand of the recipe");
773     return false;
774   }
775 };
776 
777 inline bool VPUser::classof(const VPDef *Def) {
778   return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
779          Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
780          Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
781          Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
782          Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
783          Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
784          Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
785          Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
786          Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
787          Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
788          Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
789 }
790 
791 /// This is a concrete Recipe that models a single VPlan-level instruction.
792 /// While as any Recipe it may generate a sequence of IR instructions when
793 /// executed, these instructions would always form a single-def expression as
794 /// the VPInstruction is also a single def-use vertex.
795 class VPInstruction : public VPRecipeBase, public VPValue {
796   friend class VPlanSlp;
797 
798 public:
799   /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
800   enum {
801     FirstOrderRecurrenceSplice =
802         Instruction::OtherOpsEnd + 1, // Combines the incoming and previous
803                                       // values of a first-order recurrence.
804     Not,
805     ICmpULE,
806     SLPLoad,
807     SLPStore,
808     ActiveLaneMask,
809     CanonicalIVIncrement,
810     CanonicalIVIncrementNUW,
811     BranchOnCount,
812   };
813 
814 private:
815   typedef unsigned char OpcodeTy;
816   OpcodeTy Opcode;
817   FastMathFlags FMF;
818   DebugLoc DL;
819 
820   /// Utility method serving execute(): generates a single instance of the
821   /// modeled instruction.
822   void generateInstruction(VPTransformState &State, unsigned Part);
823 
824 protected:
825   void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
826 
827 public:
828   VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands, DebugLoc DL)
829       : VPRecipeBase(VPRecipeBase::VPInstructionSC, Operands),
830         VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode),
831         DL(DL) {}
832 
833   VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands,
834                 DebugLoc DL = {})
835       : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands), DL) {}
836 
837   /// Method to support type inquiry through isa, cast, and dyn_cast.
838   static inline bool classof(const VPValue *V) {
839     return V->getVPValueID() == VPValue::VPVInstructionSC;
840   }
841 
842   VPInstruction *clone() const {
843     SmallVector<VPValue *, 2> Operands(operands());
844     return new VPInstruction(Opcode, Operands, DL);
845   }
846 
847   /// Method to support type inquiry through isa, cast, and dyn_cast.
848   static inline bool classof(const VPDef *R) {
849     return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
850   }
851 
852   /// Extra classof implementations to allow directly casting from VPUser ->
853   /// VPInstruction.
854   static inline bool classof(const VPUser *U) {
855     auto *R = dyn_cast<VPRecipeBase>(U);
856     return R && R->getVPDefID() == VPRecipeBase::VPInstructionSC;
857   }
858   static inline bool classof(const VPRecipeBase *R) {
859     return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
860   }
861 
862   unsigned getOpcode() const { return Opcode; }
863 
864   /// Generate the instruction.
865   /// TODO: We currently execute only per-part unless a specific instance is
866   /// provided.
867   void execute(VPTransformState &State) override;
868 
869 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
870   /// Print the VPInstruction to \p O.
871   void print(raw_ostream &O, const Twine &Indent,
872              VPSlotTracker &SlotTracker) const override;
873 
874   /// Print the VPInstruction to dbgs() (for debugging).
875   LLVM_DUMP_METHOD void dump() const;
876 #endif
877 
878   /// Return true if this instruction may modify memory.
879   bool mayWriteToMemory() const {
880     // TODO: we can use attributes of the called function to rule out memory
881     //       modifications.
882     return Opcode == Instruction::Store || Opcode == Instruction::Call ||
883            Opcode == Instruction::Invoke || Opcode == SLPStore;
884   }
885 
886   bool hasResult() const {
887     // CallInst may or may not have a result, depending on the called function.
888     // Conservatively return calls have results for now.
889     switch (getOpcode()) {
890     case Instruction::Ret:
891     case Instruction::Br:
892     case Instruction::Store:
893     case Instruction::Switch:
894     case Instruction::IndirectBr:
895     case Instruction::Resume:
896     case Instruction::CatchRet:
897     case Instruction::Unreachable:
898     case Instruction::Fence:
899     case Instruction::AtomicRMW:
900     case VPInstruction::BranchOnCount:
901       return false;
902     default:
903       return true;
904     }
905   }
906 
907   /// Set the fast-math flags.
908   void setFastMathFlags(FastMathFlags FMFNew);
909 
910   /// Returns true if the recipe only uses the first lane of operand \p Op.
911   bool onlyFirstLaneUsed(const VPValue *Op) const override {
912     assert(is_contained(operands(), Op) &&
913            "Op must be an operand of the recipe");
914     if (getOperand(0) != Op)
915       return false;
916     switch (getOpcode()) {
917     default:
918       return false;
919     case VPInstruction::ActiveLaneMask:
920     case VPInstruction::CanonicalIVIncrement:
921     case VPInstruction::CanonicalIVIncrementNUW:
922     case VPInstruction::BranchOnCount:
923       return true;
924     };
925     llvm_unreachable("switch should return");
926   }
927 };
928 
929 /// VPWidenRecipe is a recipe for producing a copy of vector type its
930 /// ingredient. This recipe covers most of the traditional vectorization cases
931 /// where each ingredient transforms into a vectorized version of itself.
932 class VPWidenRecipe : public VPRecipeBase, public VPValue {
933 public:
934   template <typename IterT>
935   VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
936       : VPRecipeBase(VPRecipeBase::VPWidenSC, Operands),
937         VPValue(VPValue::VPVWidenSC, &I, this) {}
938 
939   ~VPWidenRecipe() override = default;
940 
941   /// Method to support type inquiry through isa, cast, and dyn_cast.
942   static inline bool classof(const VPDef *D) {
943     return D->getVPDefID() == VPRecipeBase::VPWidenSC;
944   }
945   static inline bool classof(const VPValue *V) {
946     return V->getVPValueID() == VPValue::VPVWidenSC;
947   }
948 
949   /// Produce widened copies of all Ingredients.
950   void execute(VPTransformState &State) override;
951 
952 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
953   /// Print the recipe.
954   void print(raw_ostream &O, const Twine &Indent,
955              VPSlotTracker &SlotTracker) const override;
956 #endif
957 };
958 
959 /// A recipe for widening Call instructions.
960 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
961 
962 public:
963   template <typename IterT>
964   VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
965       : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments),
966         VPValue(VPValue::VPVWidenCallSC, &I, this) {}
967 
968   ~VPWidenCallRecipe() override = default;
969 
970   /// Method to support type inquiry through isa, cast, and dyn_cast.
971   static inline bool classof(const VPDef *D) {
972     return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
973   }
974 
975   /// Produce a widened version of the call instruction.
976   void execute(VPTransformState &State) override;
977 
978 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
979   /// Print the recipe.
980   void print(raw_ostream &O, const Twine &Indent,
981              VPSlotTracker &SlotTracker) const override;
982 #endif
983 };
984 
985 /// A recipe for widening select instructions.
986 class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
987 
988   /// Is the condition of the select loop invariant?
989   bool InvariantCond;
990 
991 public:
992   template <typename IterT>
993   VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
994                       bool InvariantCond)
995       : VPRecipeBase(VPRecipeBase::VPWidenSelectSC, Operands),
996         VPValue(VPValue::VPVWidenSelectSC, &I, this),
997         InvariantCond(InvariantCond) {}
998 
999   ~VPWidenSelectRecipe() override = default;
1000 
1001   /// Method to support type inquiry through isa, cast, and dyn_cast.
1002   static inline bool classof(const VPDef *D) {
1003     return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
1004   }
1005 
1006   /// Produce a widened version of the select instruction.
1007   void execute(VPTransformState &State) override;
1008 
1009 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1010   /// Print the recipe.
1011   void print(raw_ostream &O, const Twine &Indent,
1012              VPSlotTracker &SlotTracker) const override;
1013 #endif
1014 };
1015 
1016 /// A recipe for handling GEP instructions.
1017 class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
1018   bool IsPtrLoopInvariant;
1019   SmallBitVector IsIndexLoopInvariant;
1020 
1021 public:
1022   template <typename IterT>
1023   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
1024       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
1025         VPValue(VPWidenGEPSC, GEP, this),
1026         IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
1027 
1028   template <typename IterT>
1029   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
1030                    Loop *OrigLoop)
1031       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
1032         VPValue(VPValue::VPVWidenGEPSC, GEP, this),
1033         IsIndexLoopInvariant(GEP->getNumIndices(), false) {
1034     IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
1035     for (auto Index : enumerate(GEP->indices()))
1036       IsIndexLoopInvariant[Index.index()] =
1037           OrigLoop->isLoopInvariant(Index.value().get());
1038   }
1039   ~VPWidenGEPRecipe() override = default;
1040 
1041   /// Method to support type inquiry through isa, cast, and dyn_cast.
1042   static inline bool classof(const VPDef *D) {
1043     return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
1044   }
1045 
1046   /// Generate the gep nodes.
1047   void execute(VPTransformState &State) override;
1048 
1049 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1050   /// Print the recipe.
1051   void print(raw_ostream &O, const Twine &Indent,
1052              VPSlotTracker &SlotTracker) const override;
1053 #endif
1054 };
1055 
1056 /// A recipe for handling phi nodes of integer and floating-point inductions,
1057 /// producing their vector and scalar values.
1058 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase, public VPValue {
1059   PHINode *IV;
1060   const InductionDescriptor &IndDesc;
1061   bool NeedsScalarIV;
1062   bool NeedsVectorIV;
1063 
1064 public:
1065   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start,
1066                                 const InductionDescriptor &IndDesc,
1067                                 bool NeedsScalarIV, bool NeedsVectorIV)
1068       : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start}), VPValue(IV, this),
1069         IV(IV), IndDesc(IndDesc), NeedsScalarIV(NeedsScalarIV),
1070         NeedsVectorIV(NeedsVectorIV) {}
1071 
1072   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start,
1073                                 const InductionDescriptor &IndDesc,
1074                                 TruncInst *Trunc, bool NeedsScalarIV,
1075                                 bool NeedsVectorIV)
1076       : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start}), VPValue(Trunc, this),
1077         IV(IV), IndDesc(IndDesc), NeedsScalarIV(NeedsScalarIV),
1078         NeedsVectorIV(NeedsVectorIV) {}
1079 
1080   ~VPWidenIntOrFpInductionRecipe() override = default;
1081 
1082   /// Method to support type inquiry through isa, cast, and dyn_cast.
1083   static inline bool classof(const VPDef *D) {
1084     return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
1085   }
1086 
1087   /// Generate the vectorized and scalarized versions of the phi node as
1088   /// needed by their users.
1089   void execute(VPTransformState &State) override;
1090 
1091 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1092   /// Print the recipe.
1093   void print(raw_ostream &O, const Twine &Indent,
1094              VPSlotTracker &SlotTracker) const override;
1095 #endif
1096 
1097   /// Returns the start value of the induction.
1098   VPValue *getStartValue() { return getOperand(0); }
1099   const VPValue *getStartValue() const { return getOperand(0); }
1100 
1101   /// Returns the first defined value as TruncInst, if it is one or nullptr
1102   /// otherwise.
1103   TruncInst *getTruncInst() {
1104     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1105   }
1106   const TruncInst *getTruncInst() const {
1107     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1108   }
1109 
1110   PHINode *getPHINode() { return IV; }
1111 
1112   /// Returns the induction descriptor for the recipe.
1113   const InductionDescriptor &getInductionDescriptor() const { return IndDesc; }
1114 
1115   /// Returns true if the induction is canonical, i.e. starting at 0 and
1116   /// incremented by UF * VF (= the original IV is incremented by 1).
1117   bool isCanonical() const;
1118 
1119   /// Returns the scalar type of the induction.
1120   const Type *getScalarType() const {
1121     const TruncInst *TruncI = getTruncInst();
1122     return TruncI ? TruncI->getType() : IV->getType();
1123   }
1124 
1125   /// Returns true if a scalar phi needs to be created for the induction.
1126   bool needsScalarIV() const { return NeedsScalarIV; }
1127 
1128   /// Returns true if a vector phi needs to be created for the induction.
1129   bool needsVectorIV() const { return NeedsVectorIV; }
1130 };
1131 
1132 /// A pure virtual base class for all recipes modeling header phis, including
1133 /// phis for first order recurrences, pointer inductions and reductions. The
1134 /// start value is the first operand of the recipe and the incoming value from
1135 /// the backedge is the second operand.
1136 class VPHeaderPHIRecipe : public VPRecipeBase, public VPValue {
1137 protected:
1138   VPHeaderPHIRecipe(unsigned char VPVID, unsigned char VPDefID, PHINode *Phi,
1139                     VPValue *Start = nullptr)
1140       : VPRecipeBase(VPDefID, {}), VPValue(VPVID, Phi, this) {
1141     if (Start)
1142       addOperand(Start);
1143   }
1144 
1145 public:
1146   ~VPHeaderPHIRecipe() override = default;
1147 
1148   /// Method to support type inquiry through isa, cast, and dyn_cast.
1149   static inline bool classof(const VPRecipeBase *B) {
1150     return B->getVPDefID() == VPRecipeBase::VPCanonicalIVPHISC ||
1151            B->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC ||
1152            B->getVPDefID() == VPRecipeBase::VPReductionPHISC ||
1153            B->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC ||
1154            B->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1155   }
1156   static inline bool classof(const VPValue *V) {
1157     return V->getVPValueID() == VPValue::VPVCanonicalIVPHISC ||
1158            V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC ||
1159            V->getVPValueID() == VPValue::VPVReductionPHISC ||
1160            V->getVPValueID() == VPValue::VPVWidenIntOrFpInductionSC ||
1161            V->getVPValueID() == VPValue::VPVWidenPHISC;
1162   }
1163 
1164   /// Generate the phi nodes.
1165   void execute(VPTransformState &State) override = 0;
1166 
1167 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1168   /// Print the recipe.
1169   void print(raw_ostream &O, const Twine &Indent,
1170              VPSlotTracker &SlotTracker) const override = 0;
1171 #endif
1172 
1173   /// Returns the start value of the phi, if one is set.
1174   VPValue *getStartValue() {
1175     return getNumOperands() == 0 ? nullptr : getOperand(0);
1176   }
1177 
1178   /// Returns the incoming value from the loop backedge.
1179   VPValue *getBackedgeValue() {
1180     return getOperand(1);
1181   }
1182 
1183   /// Returns the backedge value as a recipe. The backedge value is guaranteed
1184   /// to be a recipe.
1185   VPRecipeBase *getBackedgeRecipe() {
1186     return cast<VPRecipeBase>(getBackedgeValue()->getDef());
1187   }
1188 };
1189 
1190 /// A recipe for handling header phis that are widened in the vector loop.
1191 /// In the VPlan native path, all incoming VPValues & VPBasicBlock pairs are
1192 /// managed in the recipe directly.
1193 class VPWidenPHIRecipe : public VPHeaderPHIRecipe {
1194   /// List of incoming blocks. Only used in the VPlan native path.
1195   SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1196 
1197 public:
1198   /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
1199   VPWidenPHIRecipe(PHINode *Phi, VPValue *Start = nullptr)
1200       : VPHeaderPHIRecipe(VPVWidenPHISC, VPWidenPHISC, Phi) {
1201     if (Start)
1202       addOperand(Start);
1203   }
1204 
1205   ~VPWidenPHIRecipe() override = default;
1206 
1207   /// Method to support type inquiry through isa, cast, and dyn_cast.
1208   static inline bool classof(const VPRecipeBase *B) {
1209     return B->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1210   }
1211   static inline bool classof(const VPHeaderPHIRecipe *R) {
1212     return R->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1213   }
1214   static inline bool classof(const VPValue *V) {
1215     return V->getVPValueID() == VPValue::VPVWidenPHISC;
1216   }
1217 
1218   /// Generate the phi/select nodes.
1219   void execute(VPTransformState &State) override;
1220 
1221 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1222   /// Print the recipe.
1223   void print(raw_ostream &O, const Twine &Indent,
1224              VPSlotTracker &SlotTracker) const override;
1225 #endif
1226 
1227   /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
1228   void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1229     addOperand(IncomingV);
1230     IncomingBlocks.push_back(IncomingBlock);
1231   }
1232 
1233   /// Returns the \p I th incoming VPBasicBlock.
1234   VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1235 
1236   /// Returns the \p I th incoming VPValue.
1237   VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1238 };
1239 
1240 /// A recipe for handling first-order recurrence phis. The start value is the
1241 /// first operand of the recipe and the incoming value from the backedge is the
1242 /// second operand.
1243 struct VPFirstOrderRecurrencePHIRecipe : public VPHeaderPHIRecipe {
1244   VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
1245       : VPHeaderPHIRecipe(VPVFirstOrderRecurrencePHISC,
1246                           VPFirstOrderRecurrencePHISC, Phi, &Start) {}
1247 
1248   /// Method to support type inquiry through isa, cast, and dyn_cast.
1249   static inline bool classof(const VPRecipeBase *R) {
1250     return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1251   }
1252   static inline bool classof(const VPHeaderPHIRecipe *R) {
1253     return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1254   }
1255   static inline bool classof(const VPValue *V) {
1256     return V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC;
1257   }
1258 
1259   void execute(VPTransformState &State) override;
1260 
1261 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1262   /// Print the recipe.
1263   void print(raw_ostream &O, const Twine &Indent,
1264              VPSlotTracker &SlotTracker) const override;
1265 #endif
1266 };
1267 
1268 /// A recipe for handling reduction phis. The start value is the first operand
1269 /// of the recipe and the incoming value from the backedge is the second
1270 /// operand.
1271 class VPReductionPHIRecipe : public VPHeaderPHIRecipe {
1272   /// Descriptor for the reduction.
1273   const RecurrenceDescriptor &RdxDesc;
1274 
1275   /// The phi is part of an in-loop reduction.
1276   bool IsInLoop;
1277 
1278   /// The phi is part of an ordered reduction. Requires IsInLoop to be true.
1279   bool IsOrdered;
1280 
1281 public:
1282   /// Create a new VPReductionPHIRecipe for the reduction \p Phi described by \p
1283   /// RdxDesc.
1284   VPReductionPHIRecipe(PHINode *Phi, const RecurrenceDescriptor &RdxDesc,
1285                        VPValue &Start, bool IsInLoop = false,
1286                        bool IsOrdered = false)
1287       : VPHeaderPHIRecipe(VPVReductionPHISC, VPReductionPHISC, Phi, &Start),
1288         RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
1289     assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
1290   }
1291 
1292   ~VPReductionPHIRecipe() override = default;
1293 
1294   /// Method to support type inquiry through isa, cast, and dyn_cast.
1295   static inline bool classof(const VPRecipeBase *R) {
1296     return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1297   }
1298   static inline bool classof(const VPHeaderPHIRecipe *R) {
1299     return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1300   }
1301   static inline bool classof(const VPValue *V) {
1302     return V->getVPValueID() == VPValue::VPVReductionPHISC;
1303   }
1304 
1305   /// Generate the phi/select nodes.
1306   void execute(VPTransformState &State) override;
1307 
1308 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1309   /// Print the recipe.
1310   void print(raw_ostream &O, const Twine &Indent,
1311              VPSlotTracker &SlotTracker) const override;
1312 #endif
1313 
1314   const RecurrenceDescriptor &getRecurrenceDescriptor() const {
1315     return RdxDesc;
1316   }
1317 
1318   /// Returns true, if the phi is part of an ordered reduction.
1319   bool isOrdered() const { return IsOrdered; }
1320 
1321   /// Returns true, if the phi is part of an in-loop reduction.
1322   bool isInLoop() const { return IsInLoop; }
1323 };
1324 
1325 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1326 /// instructions.
1327 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1328   PHINode *Phi;
1329 
1330 public:
1331   /// The blend operation is a User of the incoming values and of their
1332   /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1333   /// might be incoming with a full mask for which there is no VPValue.
1334   VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1335       : VPRecipeBase(VPBlendSC, Operands),
1336         VPValue(VPValue::VPVBlendSC, Phi, this), Phi(Phi) {
1337     assert(Operands.size() > 0 &&
1338            ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1339            "Expected either a single incoming value or a positive even number "
1340            "of operands");
1341   }
1342 
1343   /// Method to support type inquiry through isa, cast, and dyn_cast.
1344   static inline bool classof(const VPDef *D) {
1345     return D->getVPDefID() == VPRecipeBase::VPBlendSC;
1346   }
1347 
1348   /// Return the number of incoming values, taking into account that a single
1349   /// incoming value has no mask.
1350   unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1351 
1352   /// Return incoming value number \p Idx.
1353   VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1354 
1355   /// Return mask number \p Idx.
1356   VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1357 
1358   /// Generate the phi/select nodes.
1359   void execute(VPTransformState &State) override;
1360 
1361 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1362   /// Print the recipe.
1363   void print(raw_ostream &O, const Twine &Indent,
1364              VPSlotTracker &SlotTracker) const override;
1365 #endif
1366 
1367   /// Returns true if the recipe only uses the first lane of operand \p Op.
1368   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1369     assert(is_contained(operands(), Op) &&
1370            "Op must be an operand of the recipe");
1371     // Recursing through Blend recipes only, must terminate at header phi's the
1372     // latest.
1373     return all_of(users(), [this](VPUser *U) {
1374       return cast<VPRecipeBase>(U)->onlyFirstLaneUsed(this);
1375     });
1376   }
1377 };
1378 
1379 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1380 /// or stores into one wide load/store and shuffles. The first operand of a
1381 /// VPInterleave recipe is the address, followed by the stored values, followed
1382 /// by an optional mask.
1383 class VPInterleaveRecipe : public VPRecipeBase {
1384   const InterleaveGroup<Instruction> *IG;
1385 
1386   bool HasMask = false;
1387 
1388 public:
1389   VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1390                      ArrayRef<VPValue *> StoredValues, VPValue *Mask)
1391       : VPRecipeBase(VPInterleaveSC, {Addr}), IG(IG) {
1392     for (unsigned i = 0; i < IG->getFactor(); ++i)
1393       if (Instruction *I = IG->getMember(i)) {
1394         if (I->getType()->isVoidTy())
1395           continue;
1396         new VPValue(I, this);
1397       }
1398 
1399     for (auto *SV : StoredValues)
1400       addOperand(SV);
1401     if (Mask) {
1402       HasMask = true;
1403       addOperand(Mask);
1404     }
1405   }
1406   ~VPInterleaveRecipe() override = default;
1407 
1408   /// Method to support type inquiry through isa, cast, and dyn_cast.
1409   static inline bool classof(const VPDef *D) {
1410     return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
1411   }
1412 
1413   /// Return the address accessed by this recipe.
1414   VPValue *getAddr() const {
1415     return getOperand(0); // Address is the 1st, mandatory operand.
1416   }
1417 
1418   /// Return the mask used by this recipe. Note that a full mask is represented
1419   /// by a nullptr.
1420   VPValue *getMask() const {
1421     // Mask is optional and therefore the last, currently 2nd operand.
1422     return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1423   }
1424 
1425   /// Return the VPValues stored by this interleave group. If it is a load
1426   /// interleave group, return an empty ArrayRef.
1427   ArrayRef<VPValue *> getStoredValues() const {
1428     // The first operand is the address, followed by the stored values, followed
1429     // by an optional mask.
1430     return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1431         .slice(1, getNumStoreOperands());
1432   }
1433 
1434   /// Generate the wide load or store, and shuffles.
1435   void execute(VPTransformState &State) override;
1436 
1437 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1438   /// Print the recipe.
1439   void print(raw_ostream &O, const Twine &Indent,
1440              VPSlotTracker &SlotTracker) const override;
1441 #endif
1442 
1443   const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1444 
1445   /// Returns the number of stored operands of this interleave group. Returns 0
1446   /// for load interleave groups.
1447   unsigned getNumStoreOperands() const {
1448     return getNumOperands() - (HasMask ? 2 : 1);
1449   }
1450 };
1451 
1452 /// A recipe to represent inloop reduction operations, performing a reduction on
1453 /// a vector operand into a scalar value, and adding the result to a chain.
1454 /// The Operands are {ChainOp, VecOp, [Condition]}.
1455 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1456   /// The recurrence decriptor for the reduction in question.
1457   const RecurrenceDescriptor *RdxDesc;
1458   /// Pointer to the TTI, needed to create the target reduction
1459   const TargetTransformInfo *TTI;
1460 
1461 public:
1462   VPReductionRecipe(const RecurrenceDescriptor *R, Instruction *I,
1463                     VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp,
1464                     const TargetTransformInfo *TTI)
1465       : VPRecipeBase(VPRecipeBase::VPReductionSC, {ChainOp, VecOp}),
1466         VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), TTI(TTI) {
1467     if (CondOp)
1468       addOperand(CondOp);
1469   }
1470 
1471   ~VPReductionRecipe() override = default;
1472 
1473   /// Method to support type inquiry through isa, cast, and dyn_cast.
1474   static inline bool classof(const VPValue *V) {
1475     return V->getVPValueID() == VPValue::VPVReductionSC;
1476   }
1477 
1478   /// Generate the reduction in the loop
1479   void execute(VPTransformState &State) override;
1480 
1481 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1482   /// Print the recipe.
1483   void print(raw_ostream &O, const Twine &Indent,
1484              VPSlotTracker &SlotTracker) const override;
1485 #endif
1486 
1487   /// The VPValue of the scalar Chain being accumulated.
1488   VPValue *getChainOp() const { return getOperand(0); }
1489   /// The VPValue of the vector value to be reduced.
1490   VPValue *getVecOp() const { return getOperand(1); }
1491   /// The VPValue of the condition for the block.
1492   VPValue *getCondOp() const {
1493     return getNumOperands() > 2 ? getOperand(2) : nullptr;
1494   }
1495 };
1496 
1497 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1498 /// copies of the original scalar type, one per lane, instead of producing a
1499 /// single copy of widened type for all lanes. If the instruction is known to be
1500 /// uniform only one copy, per lane zero, will be generated.
1501 class VPReplicateRecipe : public VPRecipeBase, public VPValue {
1502   /// Indicator if only a single replica per lane is needed.
1503   bool IsUniform;
1504 
1505   /// Indicator if the replicas are also predicated.
1506   bool IsPredicated;
1507 
1508   /// Indicator if the scalar values should also be packed into a vector.
1509   bool AlsoPack;
1510 
1511 public:
1512   template <typename IterT>
1513   VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1514                     bool IsUniform, bool IsPredicated = false)
1515       : VPRecipeBase(VPReplicateSC, Operands), VPValue(VPVReplicateSC, I, this),
1516         IsUniform(IsUniform), IsPredicated(IsPredicated) {
1517     // Retain the previous behavior of predicateInstructions(), where an
1518     // insert-element of a predicated instruction got hoisted into the
1519     // predicated basic block iff it was its only user. This is achieved by
1520     // having predicated instructions also pack their values into a vector by
1521     // default unless they have a replicated user which uses their scalar value.
1522     AlsoPack = IsPredicated && !I->use_empty();
1523   }
1524 
1525   ~VPReplicateRecipe() override = default;
1526 
1527   /// Method to support type inquiry through isa, cast, and dyn_cast.
1528   static inline bool classof(const VPDef *D) {
1529     return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
1530   }
1531 
1532   static inline bool classof(const VPValue *V) {
1533     return V->getVPValueID() == VPValue::VPVReplicateSC;
1534   }
1535 
1536   /// Generate replicas of the desired Ingredient. Replicas will be generated
1537   /// for all parts and lanes unless a specific part and lane are specified in
1538   /// the \p State.
1539   void execute(VPTransformState &State) override;
1540 
1541   void setAlsoPack(bool Pack) { AlsoPack = Pack; }
1542 
1543 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1544   /// Print the recipe.
1545   void print(raw_ostream &O, const Twine &Indent,
1546              VPSlotTracker &SlotTracker) const override;
1547 #endif
1548 
1549   bool isUniform() const { return IsUniform; }
1550 
1551   bool isPacked() const { return AlsoPack; }
1552 
1553   bool isPredicated() const { return IsPredicated; }
1554 
1555   /// Returns true if the recipe only uses the first lane of operand \p Op.
1556   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1557     assert(is_contained(operands(), Op) &&
1558            "Op must be an operand of the recipe");
1559     return isUniform();
1560   }
1561 };
1562 
1563 /// A recipe for generating conditional branches on the bits of a mask.
1564 class VPBranchOnMaskRecipe : public VPRecipeBase {
1565 public:
1566   VPBranchOnMaskRecipe(VPValue *BlockInMask)
1567       : VPRecipeBase(VPBranchOnMaskSC, {}) {
1568     if (BlockInMask) // nullptr means all-one mask.
1569       addOperand(BlockInMask);
1570   }
1571 
1572   /// Method to support type inquiry through isa, cast, and dyn_cast.
1573   static inline bool classof(const VPDef *D) {
1574     return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
1575   }
1576 
1577   /// Generate the extraction of the appropriate bit from the block mask and the
1578   /// conditional branch.
1579   void execute(VPTransformState &State) override;
1580 
1581 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1582   /// Print the recipe.
1583   void print(raw_ostream &O, const Twine &Indent,
1584              VPSlotTracker &SlotTracker) const override {
1585     O << Indent << "BRANCH-ON-MASK ";
1586     if (VPValue *Mask = getMask())
1587       Mask->printAsOperand(O, SlotTracker);
1588     else
1589       O << " All-One";
1590   }
1591 #endif
1592 
1593   /// Return the mask used by this recipe. Note that a full mask is represented
1594   /// by a nullptr.
1595   VPValue *getMask() const {
1596     assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1597     // Mask is optional.
1598     return getNumOperands() == 1 ? getOperand(0) : nullptr;
1599   }
1600 };
1601 
1602 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1603 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1604 /// order to merge values that are set under such a branch and feed their uses.
1605 /// The phi nodes can be scalar or vector depending on the users of the value.
1606 /// This recipe works in concert with VPBranchOnMaskRecipe.
1607 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1608 public:
1609   /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1610   /// nodes after merging back from a Branch-on-Mask.
1611   VPPredInstPHIRecipe(VPValue *PredV)
1612       : VPRecipeBase(VPPredInstPHISC, PredV),
1613         VPValue(VPValue::VPVPredInstPHI, nullptr, this) {}
1614   ~VPPredInstPHIRecipe() override = default;
1615 
1616   /// Method to support type inquiry through isa, cast, and dyn_cast.
1617   static inline bool classof(const VPDef *D) {
1618     return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
1619   }
1620 
1621   /// Generates phi nodes for live-outs as needed to retain SSA form.
1622   void execute(VPTransformState &State) override;
1623 
1624 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1625   /// Print the recipe.
1626   void print(raw_ostream &O, const Twine &Indent,
1627              VPSlotTracker &SlotTracker) const override;
1628 #endif
1629 };
1630 
1631 /// A Recipe for widening load/store operations.
1632 /// The recipe uses the following VPValues:
1633 /// - For load: Address, optional mask
1634 /// - For store: Address, stored value, optional mask
1635 /// TODO: We currently execute only per-part unless a specific instance is
1636 /// provided.
1637 class VPWidenMemoryInstructionRecipe : public VPRecipeBase, public VPValue {
1638   Instruction &Ingredient;
1639 
1640   // Whether the loaded-from / stored-to addresses are consecutive.
1641   bool Consecutive;
1642 
1643   // Whether the consecutive loaded/stored addresses are in reverse order.
1644   bool Reverse;
1645 
1646   void setMask(VPValue *Mask) {
1647     if (!Mask)
1648       return;
1649     addOperand(Mask);
1650   }
1651 
1652   bool isMasked() const {
1653     return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
1654   }
1655 
1656 public:
1657   VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
1658                                  bool Consecutive, bool Reverse)
1659       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr}),
1660         VPValue(VPValue::VPVMemoryInstructionSC, &Load, this), Ingredient(Load),
1661         Consecutive(Consecutive), Reverse(Reverse) {
1662     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1663     setMask(Mask);
1664   }
1665 
1666   VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
1667                                  VPValue *StoredValue, VPValue *Mask,
1668                                  bool Consecutive, bool Reverse)
1669       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr, StoredValue}),
1670         VPValue(VPValue::VPVMemoryInstructionSC, &Store, this),
1671         Ingredient(Store), Consecutive(Consecutive), Reverse(Reverse) {
1672     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1673     setMask(Mask);
1674   }
1675 
1676   /// Method to support type inquiry through isa, cast, and dyn_cast.
1677   static inline bool classof(const VPDef *D) {
1678     return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
1679   }
1680 
1681   /// Return the address accessed by this recipe.
1682   VPValue *getAddr() const {
1683     return getOperand(0); // Address is the 1st, mandatory operand.
1684   }
1685 
1686   /// Return the mask used by this recipe. Note that a full mask is represented
1687   /// by a nullptr.
1688   VPValue *getMask() const {
1689     // Mask is optional and therefore the last operand.
1690     return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
1691   }
1692 
1693   /// Returns true if this recipe is a store.
1694   bool isStore() const { return isa<StoreInst>(Ingredient); }
1695 
1696   /// Return the address accessed by this recipe.
1697   VPValue *getStoredValue() const {
1698     assert(isStore() && "Stored value only available for store instructions");
1699     return getOperand(1); // Stored value is the 2nd, mandatory operand.
1700   }
1701 
1702   // Return whether the loaded-from / stored-to addresses are consecutive.
1703   bool isConsecutive() const { return Consecutive; }
1704 
1705   // Return whether the consecutive loaded/stored addresses are in reverse
1706   // order.
1707   bool isReverse() const { return Reverse; }
1708 
1709   /// Generate the wide load/store.
1710   void execute(VPTransformState &State) override;
1711 
1712 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1713   /// Print the recipe.
1714   void print(raw_ostream &O, const Twine &Indent,
1715              VPSlotTracker &SlotTracker) const override;
1716 #endif
1717 
1718   /// Returns true if the recipe only uses the first lane of operand \p Op.
1719   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1720     assert(is_contained(operands(), Op) &&
1721            "Op must be an operand of the recipe");
1722 
1723     // Widened, consecutive memory operations only demand the first lane of
1724     // their address.
1725     return Op == getAddr() && isConsecutive();
1726   }
1727 };
1728 
1729 /// Recipe to expand a SCEV expression.
1730 /// TODO: Currently the recipe always expands the expression in the loop
1731 /// pre-header, but the recipe is currently placed in the header; place it in
1732 /// the pre-header once the latter is modeled in VPlan as a VPBasicBlock.
1733 class VPExpandSCEVRecipe : public VPRecipeBase, public VPValue {
1734   const SCEV *Expr;
1735   ScalarEvolution &SE;
1736 
1737 public:
1738   VPExpandSCEVRecipe(const SCEV *Expr, ScalarEvolution &SE)
1739       : VPRecipeBase(VPExpandSCEVSC, {}), VPValue(nullptr, this), Expr(Expr),
1740         SE(SE) {}
1741 
1742   ~VPExpandSCEVRecipe() override = default;
1743 
1744   /// Method to support type inquiry through isa, cast, and dyn_cast.
1745   static inline bool classof(const VPDef *D) {
1746     return D->getVPDefID() == VPExpandSCEVSC;
1747   }
1748 
1749   /// Generate a canonical vector induction variable of the vector loop, with
1750   void execute(VPTransformState &State) override;
1751 
1752 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1753   /// Print the recipe.
1754   void print(raw_ostream &O, const Twine &Indent,
1755              VPSlotTracker &SlotTracker) const override;
1756 #endif
1757 };
1758 
1759 /// Canonical scalar induction phi of the vector loop. Starting at the specified
1760 /// start value (either 0 or the resume value when vectorizing the epilogue
1761 /// loop). VPWidenCanonicalIVRecipe represents the vector version of the
1762 /// canonical induction variable.
1763 class VPCanonicalIVPHIRecipe : public VPHeaderPHIRecipe {
1764   DebugLoc DL;
1765 
1766 public:
1767   VPCanonicalIVPHIRecipe(VPValue *StartV, DebugLoc DL)
1768       : VPHeaderPHIRecipe(VPValue::VPVCanonicalIVPHISC, VPCanonicalIVPHISC,
1769                           nullptr, StartV),
1770         DL(DL) {}
1771 
1772   ~VPCanonicalIVPHIRecipe() override = default;
1773 
1774   /// Method to support type inquiry through isa, cast, and dyn_cast.
1775   static inline bool classof(const VPDef *D) {
1776     return D->getVPDefID() == VPCanonicalIVPHISC;
1777   }
1778   static inline bool classof(const VPHeaderPHIRecipe *D) {
1779     return D->getVPDefID() == VPCanonicalIVPHISC;
1780   }
1781   static inline bool classof(const VPValue *V) {
1782     return V->getVPValueID() == VPValue::VPVCanonicalIVPHISC;
1783   }
1784 
1785   /// Generate the canonical scalar induction phi of the vector loop.
1786   void execute(VPTransformState &State) override;
1787 
1788 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1789   /// Print the recipe.
1790   void print(raw_ostream &O, const Twine &Indent,
1791              VPSlotTracker &SlotTracker) const override;
1792 #endif
1793 
1794   /// Returns the scalar type of the induction.
1795   const Type *getScalarType() const {
1796     return getOperand(0)->getLiveInIRValue()->getType();
1797   }
1798 
1799   /// Returns true if the recipe only uses the first lane of operand \p Op.
1800   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1801     assert(is_contained(operands(), Op) &&
1802            "Op must be an operand of the recipe");
1803     return true;
1804   }
1805 };
1806 
1807 /// A Recipe for widening the canonical induction variable of the vector loop.
1808 class VPWidenCanonicalIVRecipe : public VPRecipeBase, public VPValue {
1809 public:
1810   VPWidenCanonicalIVRecipe(VPCanonicalIVPHIRecipe *CanonicalIV)
1811       : VPRecipeBase(VPWidenCanonicalIVSC, {CanonicalIV}),
1812         VPValue(VPValue::VPVWidenCanonicalIVSC, nullptr, this) {}
1813 
1814   ~VPWidenCanonicalIVRecipe() override = default;
1815 
1816   /// Method to support type inquiry through isa, cast, and dyn_cast.
1817   static inline bool classof(const VPDef *D) {
1818     return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1819   }
1820 
1821   /// Extra classof implementations to allow directly casting from VPUser ->
1822   /// VPWidenCanonicalIVRecipe.
1823   static inline bool classof(const VPUser *U) {
1824     auto *R = dyn_cast<VPRecipeBase>(U);
1825     return R && R->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1826   }
1827   static inline bool classof(const VPRecipeBase *R) {
1828     return R->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1829   }
1830 
1831   /// Generate a canonical vector induction variable of the vector loop, with
1832   /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
1833   /// step = <VF*UF, VF*UF, ..., VF*UF>.
1834   void execute(VPTransformState &State) override;
1835 
1836 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1837   /// Print the recipe.
1838   void print(raw_ostream &O, const Twine &Indent,
1839              VPSlotTracker &SlotTracker) const override;
1840 #endif
1841 
1842   /// Returns the scalar type of the induction.
1843   const Type *getScalarType() const {
1844     return cast<VPCanonicalIVPHIRecipe>(getOperand(0)->getDef())
1845         ->getScalarType();
1846   }
1847 };
1848 
1849 /// A recipe for handling phi nodes of integer and floating-point inductions,
1850 /// producing their scalar values.
1851 class VPScalarIVStepsRecipe : public VPRecipeBase, public VPValue {
1852   /// Scalar type to use for the generated values.
1853   Type *Ty;
1854   /// If not nullptr, truncate the generated values to TruncToTy.
1855   Type *TruncToTy;
1856   const InductionDescriptor &IndDesc;
1857 
1858 public:
1859   VPScalarIVStepsRecipe(Type *Ty, const InductionDescriptor &IndDesc,
1860                         VPValue *CanonicalIV, VPValue *Start, VPValue *Step,
1861                         Type *TruncToTy)
1862       : VPRecipeBase(VPScalarIVStepsSC, {CanonicalIV, Start, Step}),
1863         VPValue(nullptr, this), Ty(Ty), TruncToTy(TruncToTy), IndDesc(IndDesc) {
1864   }
1865 
1866   ~VPScalarIVStepsRecipe() override = default;
1867 
1868   /// Method to support type inquiry through isa, cast, and dyn_cast.
1869   static inline bool classof(const VPDef *D) {
1870     return D->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
1871   }
1872   /// Extra classof implementations to allow directly casting from VPUser ->
1873   /// VPScalarIVStepsRecipe.
1874   static inline bool classof(const VPUser *U) {
1875     auto *R = dyn_cast<VPRecipeBase>(U);
1876     return R && R->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
1877   }
1878   static inline bool classof(const VPRecipeBase *R) {
1879     return R->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
1880   }
1881 
1882   /// Generate the scalarized versions of the phi node as needed by their users.
1883   void execute(VPTransformState &State) override;
1884 
1885 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1886   /// Print the recipe.
1887   void print(raw_ostream &O, const Twine &Indent,
1888              VPSlotTracker &SlotTracker) const override;
1889 #endif
1890 
1891   /// Returns true if the induction is canonical, i.e. starting at 0 and
1892   /// incremented by UF * VF (= the original IV is incremented by 1).
1893   bool isCanonical() const;
1894 
1895   VPCanonicalIVPHIRecipe *getCanonicalIV() const;
1896   VPValue *getStartValue() const { return getOperand(1); }
1897   VPValue *getStepValue() const { return getOperand(2); }
1898 };
1899 
1900 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
1901 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
1902 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
1903 class VPBasicBlock : public VPBlockBase {
1904 public:
1905   using RecipeListTy = iplist<VPRecipeBase>;
1906 
1907 private:
1908   /// The VPRecipes held in the order of output instructions to generate.
1909   RecipeListTy Recipes;
1910 
1911 public:
1912   VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
1913       : VPBlockBase(VPBasicBlockSC, Name.str()) {
1914     if (Recipe)
1915       appendRecipe(Recipe);
1916   }
1917 
1918   ~VPBasicBlock() override {
1919     while (!Recipes.empty())
1920       Recipes.pop_back();
1921   }
1922 
1923   /// Instruction iterators...
1924   using iterator = RecipeListTy::iterator;
1925   using const_iterator = RecipeListTy::const_iterator;
1926   using reverse_iterator = RecipeListTy::reverse_iterator;
1927   using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
1928 
1929   //===--------------------------------------------------------------------===//
1930   /// Recipe iterator methods
1931   ///
1932   inline iterator begin() { return Recipes.begin(); }
1933   inline const_iterator begin() const { return Recipes.begin(); }
1934   inline iterator end() { return Recipes.end(); }
1935   inline const_iterator end() const { return Recipes.end(); }
1936 
1937   inline reverse_iterator rbegin() { return Recipes.rbegin(); }
1938   inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
1939   inline reverse_iterator rend() { return Recipes.rend(); }
1940   inline const_reverse_iterator rend() const { return Recipes.rend(); }
1941 
1942   inline size_t size() const { return Recipes.size(); }
1943   inline bool empty() const { return Recipes.empty(); }
1944   inline const VPRecipeBase &front() const { return Recipes.front(); }
1945   inline VPRecipeBase &front() { return Recipes.front(); }
1946   inline const VPRecipeBase &back() const { return Recipes.back(); }
1947   inline VPRecipeBase &back() { return Recipes.back(); }
1948 
1949   /// Returns a reference to the list of recipes.
1950   RecipeListTy &getRecipeList() { return Recipes; }
1951 
1952   /// Returns a pointer to a member of the recipe list.
1953   static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
1954     return &VPBasicBlock::Recipes;
1955   }
1956 
1957   /// Method to support type inquiry through isa, cast, and dyn_cast.
1958   static inline bool classof(const VPBlockBase *V) {
1959     return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
1960   }
1961 
1962   void insert(VPRecipeBase *Recipe, iterator InsertPt) {
1963     assert(Recipe && "No recipe to append.");
1964     assert(!Recipe->Parent && "Recipe already in VPlan");
1965     Recipe->Parent = this;
1966     Recipes.insert(InsertPt, Recipe);
1967   }
1968 
1969   /// Augment the existing recipes of a VPBasicBlock with an additional
1970   /// \p Recipe as the last recipe.
1971   void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
1972 
1973   /// The method which generates the output IR instructions that correspond to
1974   /// this VPBasicBlock, thereby "executing" the VPlan.
1975   void execute(struct VPTransformState *State) override;
1976 
1977   /// Return the position of the first non-phi node recipe in the block.
1978   iterator getFirstNonPhi();
1979 
1980   /// Returns an iterator range over the PHI-like recipes in the block.
1981   iterator_range<iterator> phis() {
1982     return make_range(begin(), getFirstNonPhi());
1983   }
1984 
1985   void dropAllReferences(VPValue *NewValue) override;
1986 
1987   /// Split current block at \p SplitAt by inserting a new block between the
1988   /// current block and its successors and moving all recipes starting at
1989   /// SplitAt to the new block. Returns the new block.
1990   VPBasicBlock *splitAt(iterator SplitAt);
1991 
1992 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1993   /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
1994   /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
1995   ///
1996   /// Note that the numbering is applied to the whole VPlan, so printing
1997   /// individual blocks is consistent with the whole VPlan printing.
1998   void print(raw_ostream &O, const Twine &Indent,
1999              VPSlotTracker &SlotTracker) const override;
2000   using VPBlockBase::print; // Get the print(raw_stream &O) version.
2001 #endif
2002 
2003 private:
2004   /// Create an IR BasicBlock to hold the output instructions generated by this
2005   /// VPBasicBlock, and return it. Update the CFGState accordingly.
2006   BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
2007 };
2008 
2009 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
2010 /// which form a Single-Entry-Single-Exit subgraph of the output IR CFG.
2011 /// A VPRegionBlock may indicate that its contents are to be replicated several
2012 /// times. This is designed to support predicated scalarization, in which a
2013 /// scalar if-then code structure needs to be generated VF * UF times. Having
2014 /// this replication indicator helps to keep a single model for multiple
2015 /// candidate VF's. The actual replication takes place only once the desired VF
2016 /// and UF have been determined.
2017 class VPRegionBlock : public VPBlockBase {
2018   /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
2019   VPBlockBase *Entry;
2020 
2021   /// Hold the Single Exit of the SESE region modelled by the VPRegionBlock.
2022   VPBlockBase *Exit;
2023 
2024   /// An indicator whether this region is to generate multiple replicated
2025   /// instances of output IR corresponding to its VPBlockBases.
2026   bool IsReplicator;
2027 
2028 public:
2029   VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exit,
2030                 const std::string &Name = "", bool IsReplicator = false)
2031       : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exit(Exit),
2032         IsReplicator(IsReplicator) {
2033     assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
2034     assert(Exit->getSuccessors().empty() && "Exit block has successors.");
2035     Entry->setParent(this);
2036     Exit->setParent(this);
2037   }
2038   VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
2039       : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exit(nullptr),
2040         IsReplicator(IsReplicator) {}
2041 
2042   ~VPRegionBlock() override {
2043     if (Entry) {
2044       VPValue DummyValue;
2045       Entry->dropAllReferences(&DummyValue);
2046       deleteCFG(Entry);
2047     }
2048   }
2049 
2050   /// Method to support type inquiry through isa, cast, and dyn_cast.
2051   static inline bool classof(const VPBlockBase *V) {
2052     return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
2053   }
2054 
2055   const VPBlockBase *getEntry() const { return Entry; }
2056   VPBlockBase *getEntry() { return Entry; }
2057 
2058   /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
2059   /// EntryBlock must have no predecessors.
2060   void setEntry(VPBlockBase *EntryBlock) {
2061     assert(EntryBlock->getPredecessors().empty() &&
2062            "Entry block cannot have predecessors.");
2063     Entry = EntryBlock;
2064     EntryBlock->setParent(this);
2065   }
2066 
2067   // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
2068   // specific interface of llvm::Function, instead of using
2069   // GraphTraints::getEntryNode. We should add a new template parameter to
2070   // DominatorTreeBase representing the Graph type.
2071   VPBlockBase &front() const { return *Entry; }
2072 
2073   const VPBlockBase *getExit() const { return Exit; }
2074   VPBlockBase *getExit() { return Exit; }
2075 
2076   /// Set \p ExitBlock as the exit VPBlockBase of this VPRegionBlock. \p
2077   /// ExitBlock must have no successors.
2078   void setExit(VPBlockBase *ExitBlock) {
2079     assert(ExitBlock->getSuccessors().empty() &&
2080            "Exit block cannot have successors.");
2081     Exit = ExitBlock;
2082     ExitBlock->setParent(this);
2083   }
2084 
2085   /// An indicator whether this region is to generate multiple replicated
2086   /// instances of output IR corresponding to its VPBlockBases.
2087   bool isReplicator() const { return IsReplicator; }
2088 
2089   /// The method which generates the output IR instructions that correspond to
2090   /// this VPRegionBlock, thereby "executing" the VPlan.
2091   void execute(struct VPTransformState *State) override;
2092 
2093   void dropAllReferences(VPValue *NewValue) override;
2094 
2095 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2096   /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
2097   /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
2098   /// consequtive numbers.
2099   ///
2100   /// Note that the numbering is applied to the whole VPlan, so printing
2101   /// individual regions is consistent with the whole VPlan printing.
2102   void print(raw_ostream &O, const Twine &Indent,
2103              VPSlotTracker &SlotTracker) const override;
2104   using VPBlockBase::print; // Get the print(raw_stream &O) version.
2105 #endif
2106 };
2107 
2108 //===----------------------------------------------------------------------===//
2109 // GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs     //
2110 //===----------------------------------------------------------------------===//
2111 
2112 // The following set of template specializations implement GraphTraits to treat
2113 // any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
2114 // that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
2115 // VPBlockBase is a VPRegionBlock, this specialization provides access to its
2116 // successors/predecessors but not to the blocks inside the region.
2117 
2118 template <> struct GraphTraits<VPBlockBase *> {
2119   using NodeRef = VPBlockBase *;
2120   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
2121 
2122   static NodeRef getEntryNode(NodeRef N) { return N; }
2123 
2124   static inline ChildIteratorType child_begin(NodeRef N) {
2125     return N->getSuccessors().begin();
2126   }
2127 
2128   static inline ChildIteratorType child_end(NodeRef N) {
2129     return N->getSuccessors().end();
2130   }
2131 };
2132 
2133 template <> struct GraphTraits<const VPBlockBase *> {
2134   using NodeRef = const VPBlockBase *;
2135   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;
2136 
2137   static NodeRef getEntryNode(NodeRef N) { return N; }
2138 
2139   static inline ChildIteratorType child_begin(NodeRef N) {
2140     return N->getSuccessors().begin();
2141   }
2142 
2143   static inline ChildIteratorType child_end(NodeRef N) {
2144     return N->getSuccessors().end();
2145   }
2146 };
2147 
2148 // Inverse order specialization for VPBasicBlocks. Predecessors are used instead
2149 // of successors for the inverse traversal.
2150 template <> struct GraphTraits<Inverse<VPBlockBase *>> {
2151   using NodeRef = VPBlockBase *;
2152   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
2153 
2154   static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }
2155 
2156   static inline ChildIteratorType child_begin(NodeRef N) {
2157     return N->getPredecessors().begin();
2158   }
2159 
2160   static inline ChildIteratorType child_end(NodeRef N) {
2161     return N->getPredecessors().end();
2162   }
2163 };
2164 
2165 // The following set of template specializations implement GraphTraits to
2166 // treat VPRegionBlock as a graph and recurse inside its nodes. It's important
2167 // to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
2168 // (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
2169 // there won't be automatic recursion into other VPBlockBases that turn to be
2170 // VPRegionBlocks.
2171 
2172 template <>
2173 struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
2174   using GraphRef = VPRegionBlock *;
2175   using nodes_iterator = df_iterator<NodeRef>;
2176 
2177   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
2178 
2179   static nodes_iterator nodes_begin(GraphRef N) {
2180     return nodes_iterator::begin(N->getEntry());
2181   }
2182 
2183   static nodes_iterator nodes_end(GraphRef N) {
2184     // df_iterator::end() returns an empty iterator so the node used doesn't
2185     // matter.
2186     return nodes_iterator::end(N);
2187   }
2188 };
2189 
2190 template <>
2191 struct GraphTraits<const VPRegionBlock *>
2192     : public GraphTraits<const VPBlockBase *> {
2193   using GraphRef = const VPRegionBlock *;
2194   using nodes_iterator = df_iterator<NodeRef>;
2195 
2196   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
2197 
2198   static nodes_iterator nodes_begin(GraphRef N) {
2199     return nodes_iterator::begin(N->getEntry());
2200   }
2201 
2202   static nodes_iterator nodes_end(GraphRef N) {
2203     // df_iterator::end() returns an empty iterator so the node used doesn't
2204     // matter.
2205     return nodes_iterator::end(N);
2206   }
2207 };
2208 
2209 template <>
2210 struct GraphTraits<Inverse<VPRegionBlock *>>
2211     : public GraphTraits<Inverse<VPBlockBase *>> {
2212   using GraphRef = VPRegionBlock *;
2213   using nodes_iterator = df_iterator<NodeRef>;
2214 
2215   static NodeRef getEntryNode(Inverse<GraphRef> N) {
2216     return N.Graph->getExit();
2217   }
2218 
2219   static nodes_iterator nodes_begin(GraphRef N) {
2220     return nodes_iterator::begin(N->getExit());
2221   }
2222 
2223   static nodes_iterator nodes_end(GraphRef N) {
2224     // df_iterator::end() returns an empty iterator so the node used doesn't
2225     // matter.
2226     return nodes_iterator::end(N);
2227   }
2228 };
2229 
2230 /// Iterator to traverse all successors of a VPBlockBase node. This includes the
2231 /// entry node of VPRegionBlocks. Exit blocks of a region implicitly have their
2232 /// parent region's successors. This ensures all blocks in a region are visited
2233 /// before any blocks in a successor region when doing a reverse post-order
2234 // traversal of the graph.
2235 template <typename BlockPtrTy>
2236 class VPAllSuccessorsIterator
2237     : public iterator_facade_base<VPAllSuccessorsIterator<BlockPtrTy>,
2238                                   std::forward_iterator_tag, VPBlockBase> {
2239   BlockPtrTy Block;
2240   /// Index of the current successor. For VPBasicBlock nodes, this simply is the
2241   /// index for the successor array. For VPRegionBlock, SuccessorIdx == 0 is
2242   /// used for the region's entry block, and SuccessorIdx - 1 are the indices
2243   /// for the successor array.
2244   size_t SuccessorIdx;
2245 
2246   static BlockPtrTy getBlockWithSuccs(BlockPtrTy Current) {
2247     while (Current && Current->getNumSuccessors() == 0)
2248       Current = Current->getParent();
2249     return Current;
2250   }
2251 
2252   /// Templated helper to dereference successor \p SuccIdx of \p Block. Used by
2253   /// both the const and non-const operator* implementations.
2254   template <typename T1> static T1 deref(T1 Block, unsigned SuccIdx) {
2255     if (auto *R = dyn_cast<VPRegionBlock>(Block)) {
2256       if (SuccIdx == 0)
2257         return R->getEntry();
2258       SuccIdx--;
2259     }
2260 
2261     // For exit blocks, use the next parent region with successors.
2262     return getBlockWithSuccs(Block)->getSuccessors()[SuccIdx];
2263   }
2264 
2265 public:
2266   VPAllSuccessorsIterator(BlockPtrTy Block, size_t Idx = 0)
2267       : Block(Block), SuccessorIdx(Idx) {}
2268   VPAllSuccessorsIterator(const VPAllSuccessorsIterator &Other)
2269       : Block(Other.Block), SuccessorIdx(Other.SuccessorIdx) {}
2270 
2271   VPAllSuccessorsIterator &operator=(const VPAllSuccessorsIterator &R) {
2272     Block = R.Block;
2273     SuccessorIdx = R.SuccessorIdx;
2274     return *this;
2275   }
2276 
2277   static VPAllSuccessorsIterator end(BlockPtrTy Block) {
2278     BlockPtrTy ParentWithSuccs = getBlockWithSuccs(Block);
2279     unsigned NumSuccessors = ParentWithSuccs
2280                                  ? ParentWithSuccs->getNumSuccessors()
2281                                  : Block->getNumSuccessors();
2282 
2283     if (auto *R = dyn_cast<VPRegionBlock>(Block))
2284       return {R, NumSuccessors + 1};
2285     return {Block, NumSuccessors};
2286   }
2287 
2288   bool operator==(const VPAllSuccessorsIterator &R) const {
2289     return Block == R.Block && SuccessorIdx == R.SuccessorIdx;
2290   }
2291 
2292   const VPBlockBase *operator*() const { return deref(Block, SuccessorIdx); }
2293 
2294   BlockPtrTy operator*() { return deref(Block, SuccessorIdx); }
2295 
2296   VPAllSuccessorsIterator &operator++() {
2297     SuccessorIdx++;
2298     return *this;
2299   }
2300 
2301   VPAllSuccessorsIterator operator++(int X) {
2302     VPAllSuccessorsIterator Orig = *this;
2303     SuccessorIdx++;
2304     return Orig;
2305   }
2306 };
2307 
2308 /// Helper for GraphTraits specialization that traverses through VPRegionBlocks.
2309 template <typename BlockTy> class VPBlockRecursiveTraversalWrapper {
2310   BlockTy Entry;
2311 
2312 public:
2313   VPBlockRecursiveTraversalWrapper(BlockTy Entry) : Entry(Entry) {}
2314   BlockTy getEntry() { return Entry; }
2315 };
2316 
2317 /// GraphTraits specialization to recursively traverse VPBlockBase nodes,
2318 /// including traversing through VPRegionBlocks.  Exit blocks of a region
2319 /// implicitly have their parent region's successors. This ensures all blocks in
2320 /// a region are visited before any blocks in a successor region when doing a
2321 /// reverse post-order traversal of the graph.
2322 template <>
2323 struct GraphTraits<VPBlockRecursiveTraversalWrapper<VPBlockBase *>> {
2324   using NodeRef = VPBlockBase *;
2325   using ChildIteratorType = VPAllSuccessorsIterator<VPBlockBase *>;
2326 
2327   static NodeRef
2328   getEntryNode(VPBlockRecursiveTraversalWrapper<VPBlockBase *> N) {
2329     return N.getEntry();
2330   }
2331 
2332   static inline ChildIteratorType child_begin(NodeRef N) {
2333     return ChildIteratorType(N);
2334   }
2335 
2336   static inline ChildIteratorType child_end(NodeRef N) {
2337     return ChildIteratorType::end(N);
2338   }
2339 };
2340 
2341 template <>
2342 struct GraphTraits<VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> {
2343   using NodeRef = const VPBlockBase *;
2344   using ChildIteratorType = VPAllSuccessorsIterator<const VPBlockBase *>;
2345 
2346   static NodeRef
2347   getEntryNode(VPBlockRecursiveTraversalWrapper<const VPBlockBase *> N) {
2348     return N.getEntry();
2349   }
2350 
2351   static inline ChildIteratorType child_begin(NodeRef N) {
2352     return ChildIteratorType(N);
2353   }
2354 
2355   static inline ChildIteratorType child_end(NodeRef N) {
2356     return ChildIteratorType::end(N);
2357   }
2358 };
2359 
2360 /// VPlan models a candidate for vectorization, encoding various decisions take
2361 /// to produce efficient output IR, including which branches, basic-blocks and
2362 /// output IR instructions to generate, and their cost. VPlan holds a
2363 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
2364 /// VPBlock.
2365 class VPlan {
2366   friend class VPlanPrinter;
2367   friend class VPSlotTracker;
2368 
2369   /// Hold the single entry to the Hierarchical CFG of the VPlan.
2370   VPBlockBase *Entry;
2371 
2372   /// Holds the VFs applicable to this VPlan.
2373   SmallSetVector<ElementCount, 2> VFs;
2374 
2375   /// Holds the name of the VPlan, for printing.
2376   std::string Name;
2377 
2378   /// Holds all the external definitions created for this VPlan.
2379   // TODO: Introduce a specific representation for external definitions in
2380   // VPlan. External definitions must be immutable and hold a pointer to its
2381   // underlying IR that will be used to implement its structural comparison
2382   // (operators '==' and '<').
2383   SetVector<VPValue *> VPExternalDefs;
2384 
2385   /// Represents the trip count of the original loop, for folding
2386   /// the tail.
2387   VPValue *TripCount = nullptr;
2388 
2389   /// Represents the backedge taken count of the original loop, for folding
2390   /// the tail. It equals TripCount - 1.
2391   VPValue *BackedgeTakenCount = nullptr;
2392 
2393   /// Represents the vector trip count.
2394   VPValue VectorTripCount;
2395 
2396   /// Holds a mapping between Values and their corresponding VPValue inside
2397   /// VPlan.
2398   Value2VPValueTy Value2VPValue;
2399 
2400   /// Contains all VPValues that been allocated by addVPValue directly and need
2401   /// to be free when the plan's destructor is called.
2402   SmallVector<VPValue *, 16> VPValuesToFree;
2403 
2404   /// Holds the VPLoopInfo analysis for this VPlan.
2405   VPLoopInfo VPLInfo;
2406 
2407   /// Indicates whether it is safe use the Value2VPValue mapping or if the
2408   /// mapping cannot be used any longer, because it is stale.
2409   bool Value2VPValueEnabled = true;
2410 
2411 public:
2412   VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
2413     if (Entry)
2414       Entry->setPlan(this);
2415   }
2416 
2417   ~VPlan() {
2418     if (Entry) {
2419       VPValue DummyValue;
2420       for (VPBlockBase *Block : depth_first(Entry))
2421         Block->dropAllReferences(&DummyValue);
2422 
2423       VPBlockBase::deleteCFG(Entry);
2424     }
2425     for (VPValue *VPV : VPValuesToFree)
2426       delete VPV;
2427     if (TripCount)
2428       delete TripCount;
2429     if (BackedgeTakenCount)
2430       delete BackedgeTakenCount;
2431     for (VPValue *Def : VPExternalDefs)
2432       delete Def;
2433   }
2434 
2435   /// Prepare the plan for execution, setting up the required live-in values.
2436   void prepareToExecute(Value *TripCount, Value *VectorTripCount,
2437                         Value *CanonicalIVStartValue, VPTransformState &State);
2438 
2439   /// Generate the IR code for this VPlan.
2440   void execute(struct VPTransformState *State);
2441 
2442   VPBlockBase *getEntry() { return Entry; }
2443   const VPBlockBase *getEntry() const { return Entry; }
2444 
2445   VPBlockBase *setEntry(VPBlockBase *Block) {
2446     Entry = Block;
2447     Block->setPlan(this);
2448     return Entry;
2449   }
2450 
2451   /// The trip count of the original loop.
2452   VPValue *getOrCreateTripCount() {
2453     if (!TripCount)
2454       TripCount = new VPValue();
2455     return TripCount;
2456   }
2457 
2458   /// The backedge taken count of the original loop.
2459   VPValue *getOrCreateBackedgeTakenCount() {
2460     if (!BackedgeTakenCount)
2461       BackedgeTakenCount = new VPValue();
2462     return BackedgeTakenCount;
2463   }
2464 
2465   /// The vector trip count.
2466   VPValue &getVectorTripCount() { return VectorTripCount; }
2467 
2468   /// Mark the plan to indicate that using Value2VPValue is not safe any
2469   /// longer, because it may be stale.
2470   void disableValue2VPValue() { Value2VPValueEnabled = false; }
2471 
2472   void addVF(ElementCount VF) { VFs.insert(VF); }
2473 
2474   bool hasVF(ElementCount VF) { return VFs.count(VF); }
2475 
2476   const std::string &getName() const { return Name; }
2477 
2478   void setName(const Twine &newName) { Name = newName.str(); }
2479 
2480   /// Add \p VPVal to the pool of external definitions if it's not already
2481   /// in the pool.
2482   void addExternalDef(VPValue *VPVal) { VPExternalDefs.insert(VPVal); }
2483 
2484   void addVPValue(Value *V) {
2485     assert(Value2VPValueEnabled &&
2486            "IR value to VPValue mapping may be out of date!");
2487     assert(V && "Trying to add a null Value to VPlan");
2488     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2489     VPValue *VPV = new VPValue(V);
2490     Value2VPValue[V] = VPV;
2491     VPValuesToFree.push_back(VPV);
2492   }
2493 
2494   void addVPValue(Value *V, VPValue *VPV) {
2495     assert(Value2VPValueEnabled && "Value2VPValue mapping may be out of date!");
2496     assert(V && "Trying to add a null Value to VPlan");
2497     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2498     Value2VPValue[V] = VPV;
2499   }
2500 
2501   /// Returns the VPValue for \p V. \p OverrideAllowed can be used to disable
2502   /// checking whether it is safe to query VPValues using IR Values.
2503   VPValue *getVPValue(Value *V, bool OverrideAllowed = false) {
2504     assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2505            "Value2VPValue mapping may be out of date!");
2506     assert(V && "Trying to get the VPValue of a null Value");
2507     assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2508     return Value2VPValue[V];
2509   }
2510 
2511   /// Gets the VPValue or adds a new one (if none exists yet) for \p V. \p
2512   /// OverrideAllowed can be used to disable checking whether it is safe to
2513   /// query VPValues using IR Values.
2514   VPValue *getOrAddVPValue(Value *V, bool OverrideAllowed = false) {
2515     assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2516            "Value2VPValue mapping may be out of date!");
2517     assert(V && "Trying to get or add the VPValue of a null Value");
2518     if (!Value2VPValue.count(V))
2519       addVPValue(V);
2520     return getVPValue(V);
2521   }
2522 
2523   void removeVPValueFor(Value *V) {
2524     assert(Value2VPValueEnabled &&
2525            "IR value to VPValue mapping may be out of date!");
2526     Value2VPValue.erase(V);
2527   }
2528 
2529   /// Return the VPLoopInfo analysis for this VPlan.
2530   VPLoopInfo &getVPLoopInfo() { return VPLInfo; }
2531   const VPLoopInfo &getVPLoopInfo() const { return VPLInfo; }
2532 
2533 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2534   /// Print this VPlan to \p O.
2535   void print(raw_ostream &O) const;
2536 
2537   /// Print this VPlan in DOT format to \p O.
2538   void printDOT(raw_ostream &O) const;
2539 
2540   /// Dump the plan to stderr (for debugging).
2541   LLVM_DUMP_METHOD void dump() const;
2542 #endif
2543 
2544   /// Returns a range mapping the values the range \p Operands to their
2545   /// corresponding VPValues.
2546   iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2547   mapToVPValues(User::op_range Operands) {
2548     std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2549       return getOrAddVPValue(Op);
2550     };
2551     return map_range(Operands, Fn);
2552   }
2553 
2554   /// Returns true if \p VPV is uniform after vectorization.
2555   bool isUniformAfterVectorization(VPValue *VPV) const {
2556     auto RepR = dyn_cast_or_null<VPReplicateRecipe>(VPV->getDef());
2557     return !VPV->getDef() || (RepR && RepR->isUniform());
2558   }
2559 
2560   /// Returns the VPRegionBlock of the vector loop.
2561   VPRegionBlock *getVectorLoopRegion() {
2562     return cast<VPRegionBlock>(getEntry());
2563   }
2564 
2565   /// Returns the canonical induction recipe of the vector loop.
2566   VPCanonicalIVPHIRecipe *getCanonicalIV() {
2567     VPBasicBlock *EntryVPBB = getVectorLoopRegion()->getEntryBasicBlock();
2568     if (EntryVPBB->empty()) {
2569       // VPlan native path.
2570       EntryVPBB = cast<VPBasicBlock>(EntryVPBB->getSingleSuccessor());
2571     }
2572     return cast<VPCanonicalIVPHIRecipe>(&*EntryVPBB->begin());
2573   }
2574 
2575 private:
2576   /// Add to the given dominator tree the header block and every new basic block
2577   /// that was created between it and the latch block, inclusive.
2578   static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2579                                   BasicBlock *LoopPreHeaderBB,
2580                                   BasicBlock *LoopExitBB);
2581 };
2582 
2583 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2584 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2585 /// indented and follows the dot format.
2586 class VPlanPrinter {
2587   raw_ostream &OS;
2588   const VPlan &Plan;
2589   unsigned Depth = 0;
2590   unsigned TabWidth = 2;
2591   std::string Indent;
2592   unsigned BID = 0;
2593   SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2594 
2595   VPSlotTracker SlotTracker;
2596 
2597   /// Handle indentation.
2598   void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2599 
2600   /// Print a given \p Block of the Plan.
2601   void dumpBlock(const VPBlockBase *Block);
2602 
2603   /// Print the information related to the CFG edges going out of a given
2604   /// \p Block, followed by printing the successor blocks themselves.
2605   void dumpEdges(const VPBlockBase *Block);
2606 
2607   /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2608   /// its successor blocks.
2609   void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2610 
2611   /// Print a given \p Region of the Plan.
2612   void dumpRegion(const VPRegionBlock *Region);
2613 
2614   unsigned getOrCreateBID(const VPBlockBase *Block) {
2615     return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2616   }
2617 
2618   Twine getOrCreateName(const VPBlockBase *Block);
2619 
2620   Twine getUID(const VPBlockBase *Block);
2621 
2622   /// Print the information related to a CFG edge between two VPBlockBases.
2623   void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2624                 const Twine &Label);
2625 
2626 public:
2627   VPlanPrinter(raw_ostream &O, const VPlan &P)
2628       : OS(O), Plan(P), SlotTracker(&P) {}
2629 
2630   LLVM_DUMP_METHOD void dump();
2631 };
2632 
2633 struct VPlanIngredient {
2634   const Value *V;
2635 
2636   VPlanIngredient(const Value *V) : V(V) {}
2637 
2638   void print(raw_ostream &O) const;
2639 };
2640 
2641 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2642   I.print(OS);
2643   return OS;
2644 }
2645 
2646 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2647   Plan.print(OS);
2648   return OS;
2649 }
2650 #endif
2651 
2652 //===----------------------------------------------------------------------===//
2653 // VPlan Utilities
2654 //===----------------------------------------------------------------------===//
2655 
2656 /// Class that provides utilities for VPBlockBases in VPlan.
2657 class VPBlockUtils {
2658 public:
2659   VPBlockUtils() = delete;
2660 
2661   /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2662   /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2663   /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. \p BlockPtr's
2664   /// successors are moved from \p BlockPtr to \p NewBlock and \p BlockPtr's
2665   /// conditional bit is propagated to \p NewBlock. \p NewBlock must have
2666   /// neither successors nor predecessors.
2667   static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2668     assert(NewBlock->getSuccessors().empty() &&
2669            NewBlock->getPredecessors().empty() &&
2670            "Can't insert new block with predecessors or successors.");
2671     NewBlock->setParent(BlockPtr->getParent());
2672     SmallVector<VPBlockBase *> Succs(BlockPtr->successors());
2673     for (VPBlockBase *Succ : Succs) {
2674       disconnectBlocks(BlockPtr, Succ);
2675       connectBlocks(NewBlock, Succ);
2676     }
2677     NewBlock->setCondBit(BlockPtr->getCondBit());
2678     BlockPtr->setCondBit(nullptr);
2679     connectBlocks(BlockPtr, NewBlock);
2680   }
2681 
2682   /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2683   /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2684   /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2685   /// parent to \p IfTrue and \p IfFalse. \p Condition is set as the successor
2686   /// selector. \p BlockPtr must have no successors and \p IfTrue and \p IfFalse
2687   /// must have neither successors nor predecessors.
2688   static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2689                                    VPValue *Condition, VPBlockBase *BlockPtr) {
2690     assert(IfTrue->getSuccessors().empty() &&
2691            "Can't insert IfTrue with successors.");
2692     assert(IfFalse->getSuccessors().empty() &&
2693            "Can't insert IfFalse with successors.");
2694     BlockPtr->setTwoSuccessors(IfTrue, IfFalse, Condition);
2695     IfTrue->setPredecessors({BlockPtr});
2696     IfFalse->setPredecessors({BlockPtr});
2697     IfTrue->setParent(BlockPtr->getParent());
2698     IfFalse->setParent(BlockPtr->getParent());
2699   }
2700 
2701   /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2702   /// the successors of \p From and \p From to the predecessors of \p To. Both
2703   /// VPBlockBases must have the same parent, which can be null. Both
2704   /// VPBlockBases can be already connected to other VPBlockBases.
2705   static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2706     assert((From->getParent() == To->getParent()) &&
2707            "Can't connect two block with different parents");
2708     assert(From->getNumSuccessors() < 2 &&
2709            "Blocks can't have more than two successors.");
2710     From->appendSuccessor(To);
2711     To->appendPredecessor(From);
2712   }
2713 
2714   /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2715   /// from the successors of \p From and \p From from the predecessors of \p To.
2716   static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2717     assert(To && "Successor to disconnect is null.");
2718     From->removeSuccessor(To);
2719     To->removePredecessor(From);
2720   }
2721 
2722   /// Try to merge \p Block into its single predecessor, if \p Block is a
2723   /// VPBasicBlock and its predecessor has a single successor. Returns a pointer
2724   /// to the predecessor \p Block was merged into or nullptr otherwise.
2725   static VPBasicBlock *tryToMergeBlockIntoPredecessor(VPBlockBase *Block) {
2726     auto *VPBB = dyn_cast<VPBasicBlock>(Block);
2727     auto *PredVPBB =
2728         dyn_cast_or_null<VPBasicBlock>(Block->getSinglePredecessor());
2729     if (!VPBB || !PredVPBB || PredVPBB->getNumSuccessors() != 1)
2730       return nullptr;
2731 
2732     for (VPRecipeBase &R : make_early_inc_range(*VPBB))
2733       R.moveBefore(*PredVPBB, PredVPBB->end());
2734     VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
2735     auto *ParentRegion = cast<VPRegionBlock>(Block->getParent());
2736     if (ParentRegion->getExit() == Block)
2737       ParentRegion->setExit(PredVPBB);
2738     SmallVector<VPBlockBase *> Successors(Block->successors());
2739     for (auto *Succ : Successors) {
2740       VPBlockUtils::disconnectBlocks(Block, Succ);
2741       VPBlockUtils::connectBlocks(PredVPBB, Succ);
2742     }
2743     delete Block;
2744     return PredVPBB;
2745   }
2746 
2747   /// Returns true if the edge \p FromBlock -> \p ToBlock is a back-edge.
2748   static bool isBackEdge(const VPBlockBase *FromBlock,
2749                          const VPBlockBase *ToBlock, const VPLoopInfo *VPLI) {
2750     assert(FromBlock->getParent() == ToBlock->getParent() &&
2751            FromBlock->getParent() && "Must be in same region");
2752     const VPLoop *FromLoop = VPLI->getLoopFor(FromBlock);
2753     const VPLoop *ToLoop = VPLI->getLoopFor(ToBlock);
2754     if (!FromLoop || !ToLoop || FromLoop != ToLoop)
2755       return false;
2756 
2757     // A back-edge is a branch from the loop latch to its header.
2758     return ToLoop->isLoopLatch(FromBlock) && ToBlock == ToLoop->getHeader();
2759   }
2760 
2761   /// Returns true if \p Block is a loop latch
2762   static bool blockIsLoopLatch(const VPBlockBase *Block,
2763                                const VPLoopInfo *VPLInfo) {
2764     if (const VPLoop *ParentVPL = VPLInfo->getLoopFor(Block))
2765       return ParentVPL->isLoopLatch(Block);
2766 
2767     return false;
2768   }
2769 
2770   /// Count and return the number of succesors of \p PredBlock excluding any
2771   /// backedges.
2772   static unsigned countSuccessorsNoBE(VPBlockBase *PredBlock,
2773                                       VPLoopInfo *VPLI) {
2774     unsigned Count = 0;
2775     for (VPBlockBase *SuccBlock : PredBlock->getSuccessors()) {
2776       if (!VPBlockUtils::isBackEdge(PredBlock, SuccBlock, VPLI))
2777         Count++;
2778     }
2779     return Count;
2780   }
2781 
2782   /// Return an iterator range over \p Range which only includes \p BlockTy
2783   /// blocks. The accesses are casted to \p BlockTy.
2784   template <typename BlockTy, typename T>
2785   static auto blocksOnly(const T &Range) {
2786     // Create BaseTy with correct const-ness based on BlockTy.
2787     using BaseTy =
2788         typename std::conditional<std::is_const<BlockTy>::value,
2789                                   const VPBlockBase, VPBlockBase>::type;
2790 
2791     // We need to first create an iterator range over (const) BlocktTy & instead
2792     // of (const) BlockTy * for filter_range to work properly.
2793     auto Mapped =
2794         map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2795     auto Filter = make_filter_range(
2796         Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2797     return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2798       return cast<BlockTy>(&Block);
2799     });
2800   }
2801 };
2802 
2803 class VPInterleavedAccessInfo {
2804   DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2805       InterleaveGroupMap;
2806 
2807   /// Type for mapping of instruction based interleave groups to VPInstruction
2808   /// interleave groups
2809   using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2810                              InterleaveGroup<VPInstruction> *>;
2811 
2812   /// Recursively \p Region and populate VPlan based interleave groups based on
2813   /// \p IAI.
2814   void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2815                    InterleavedAccessInfo &IAI);
2816   /// Recursively traverse \p Block and populate VPlan based interleave groups
2817   /// based on \p IAI.
2818   void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
2819                   InterleavedAccessInfo &IAI);
2820 
2821 public:
2822   VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
2823 
2824   ~VPInterleavedAccessInfo() {
2825     SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
2826     // Avoid releasing a pointer twice.
2827     for (auto &I : InterleaveGroupMap)
2828       DelSet.insert(I.second);
2829     for (auto *Ptr : DelSet)
2830       delete Ptr;
2831   }
2832 
2833   /// Get the interleave group that \p Instr belongs to.
2834   ///
2835   /// \returns nullptr if doesn't have such group.
2836   InterleaveGroup<VPInstruction> *
2837   getInterleaveGroup(VPInstruction *Instr) const {
2838     return InterleaveGroupMap.lookup(Instr);
2839   }
2840 };
2841 
2842 /// Class that maps (parts of) an existing VPlan to trees of combined
2843 /// VPInstructions.
2844 class VPlanSlp {
2845   enum class OpMode { Failed, Load, Opcode };
2846 
2847   /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
2848   /// DenseMap keys.
2849   struct BundleDenseMapInfo {
2850     static SmallVector<VPValue *, 4> getEmptyKey() {
2851       return {reinterpret_cast<VPValue *>(-1)};
2852     }
2853 
2854     static SmallVector<VPValue *, 4> getTombstoneKey() {
2855       return {reinterpret_cast<VPValue *>(-2)};
2856     }
2857 
2858     static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
2859       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2860     }
2861 
2862     static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
2863                         const SmallVector<VPValue *, 4> &RHS) {
2864       return LHS == RHS;
2865     }
2866   };
2867 
2868   /// Mapping of values in the original VPlan to a combined VPInstruction.
2869   DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
2870       BundleToCombined;
2871 
2872   VPInterleavedAccessInfo &IAI;
2873 
2874   /// Basic block to operate on. For now, only instructions in a single BB are
2875   /// considered.
2876   const VPBasicBlock &BB;
2877 
2878   /// Indicates whether we managed to combine all visited instructions or not.
2879   bool CompletelySLP = true;
2880 
2881   /// Width of the widest combined bundle in bits.
2882   unsigned WidestBundleBits = 0;
2883 
2884   using MultiNodeOpTy =
2885       typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
2886 
2887   // Input operand bundles for the current multi node. Each multi node operand
2888   // bundle contains values not matching the multi node's opcode. They will
2889   // be reordered in reorderMultiNodeOps, once we completed building a
2890   // multi node.
2891   SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
2892 
2893   /// Indicates whether we are building a multi node currently.
2894   bool MultiNodeActive = false;
2895 
2896   /// Check if we can vectorize Operands together.
2897   bool areVectorizable(ArrayRef<VPValue *> Operands) const;
2898 
2899   /// Add combined instruction \p New for the bundle \p Operands.
2900   void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
2901 
2902   /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
2903   VPInstruction *markFailed();
2904 
2905   /// Reorder operands in the multi node to maximize sequential memory access
2906   /// and commutative operations.
2907   SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
2908 
2909   /// Choose the best candidate to use for the lane after \p Last. The set of
2910   /// candidates to choose from are values with an opcode matching \p Last's
2911   /// or loads consecutive to \p Last.
2912   std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
2913                                        SmallPtrSetImpl<VPValue *> &Candidates,
2914                                        VPInterleavedAccessInfo &IAI);
2915 
2916 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2917   /// Print bundle \p Values to dbgs().
2918   void dumpBundle(ArrayRef<VPValue *> Values);
2919 #endif
2920 
2921 public:
2922   VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
2923 
2924   ~VPlanSlp() = default;
2925 
2926   /// Tries to build an SLP tree rooted at \p Operands and returns a
2927   /// VPInstruction combining \p Operands, if they can be combined.
2928   VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
2929 
2930   /// Return the width of the widest combined bundle in bits.
2931   unsigned getWidestBundleBits() const { return WidestBundleBits; }
2932 
2933   /// Return true if all visited instruction can be combined.
2934   bool isCompletelySLP() const { return CompletelySLP; }
2935 };
2936 
2937 namespace vputils {
2938 
2939 /// Returns true if only the first lane of \p Def is used.
2940 bool onlyFirstLaneUsed(VPValue *Def);
2941 
2942 } // end namespace vputils
2943 
2944 } // end namespace llvm
2945 
2946 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
2947