1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 ///    VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
14 ///    treated as proper graphs for generic algorithms;
15 /// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
16 ///    within VPBasicBlocks;
17 /// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
18 ///    instruction;
19 /// 5. The VPlan class holding a candidate for vectorization;
20 /// 6. The VPlanPrinter class providing a way to print a plan in dot format;
21 /// These are documented in docs/VectorizationPlan.rst.
22 //
23 //===----------------------------------------------------------------------===//
24 
25 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
26 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
27 
28 #include "VPlanLoopInfo.h"
29 #include "VPlanValue.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/DepthFirstIterator.h"
32 #include "llvm/ADT/GraphTraits.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/SmallBitVector.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/ADT/ilist.h"
40 #include "llvm/ADT/ilist_node.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/Support/InstructionCost.h"
44 #include <algorithm>
45 #include <cassert>
46 #include <cstddef>
47 #include <map>
48 #include <string>
49 
50 namespace llvm {
51 
52 class BasicBlock;
53 class DominatorTree;
54 class InnerLoopVectorizer;
55 class LoopInfo;
56 class raw_ostream;
57 class RecurrenceDescriptor;
58 class Value;
59 class VPBasicBlock;
60 class VPRegionBlock;
61 class VPlan;
62 class VPlanSlp;
63 
64 /// Returns a calculation for the total number of elements for a given \p VF.
65 /// For fixed width vectors this value is a constant, whereas for scalable
66 /// vectors it is an expression determined at runtime.
67 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF);
68 
69 /// A range of powers-of-2 vectorization factors with fixed start and
70 /// adjustable end. The range includes start and excludes end, e.g.,:
71 /// [1, 9) = {1, 2, 4, 8}
72 struct VFRange {
73   // A power of 2.
74   const ElementCount Start;
75 
76   // Need not be a power of 2. If End <= Start range is empty.
77   ElementCount End;
78 
79   bool isEmpty() const {
80     return End.getKnownMinValue() <= Start.getKnownMinValue();
81   }
82 
83   VFRange(const ElementCount &Start, const ElementCount &End)
84       : Start(Start), End(End) {
85     assert(Start.isScalable() == End.isScalable() &&
86            "Both Start and End should have the same scalable flag");
87     assert(isPowerOf2_32(Start.getKnownMinValue()) &&
88            "Expected Start to be a power of 2");
89   }
90 };
91 
92 using VPlanPtr = std::unique_ptr<VPlan>;
93 
94 /// In what follows, the term "input IR" refers to code that is fed into the
95 /// vectorizer whereas the term "output IR" refers to code that is generated by
96 /// the vectorizer.
97 
98 /// VPLane provides a way to access lanes in both fixed width and scalable
99 /// vectors, where for the latter the lane index sometimes needs calculating
100 /// as a runtime expression.
101 class VPLane {
102 public:
103   /// Kind describes how to interpret Lane.
104   enum class Kind : uint8_t {
105     /// For First, Lane is the index into the first N elements of a
106     /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
107     First,
108     /// For ScalableLast, Lane is the offset from the start of the last
109     /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
110     /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
111     /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
112     ScalableLast
113   };
114 
115 private:
116   /// in [0..VF)
117   unsigned Lane;
118 
119   /// Indicates how the Lane should be interpreted, as described above.
120   Kind LaneKind;
121 
122 public:
123   VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
124 
125   static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
126 
127   static VPLane getLastLaneForVF(const ElementCount &VF) {
128     unsigned LaneOffset = VF.getKnownMinValue() - 1;
129     Kind LaneKind;
130     if (VF.isScalable())
131       // In this case 'LaneOffset' refers to the offset from the start of the
132       // last subvector with VF.getKnownMinValue() elements.
133       LaneKind = VPLane::Kind::ScalableLast;
134     else
135       LaneKind = VPLane::Kind::First;
136     return VPLane(LaneOffset, LaneKind);
137   }
138 
139   /// Returns a compile-time known value for the lane index and asserts if the
140   /// lane can only be calculated at runtime.
141   unsigned getKnownLane() const {
142     assert(LaneKind == Kind::First);
143     return Lane;
144   }
145 
146   /// Returns an expression describing the lane index that can be used at
147   /// runtime.
148   Value *getAsRuntimeExpr(IRBuilder<> &Builder, const ElementCount &VF) const;
149 
150   /// Returns the Kind of lane offset.
151   Kind getKind() const { return LaneKind; }
152 
153   /// Returns true if this is the first lane of the whole vector.
154   bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
155 
156   /// Maps the lane to a cache index based on \p VF.
157   unsigned mapToCacheIndex(const ElementCount &VF) const {
158     switch (LaneKind) {
159     case VPLane::Kind::ScalableLast:
160       assert(VF.isScalable() && Lane < VF.getKnownMinValue());
161       return VF.getKnownMinValue() + Lane;
162     default:
163       assert(Lane < VF.getKnownMinValue());
164       return Lane;
165     }
166   }
167 
168   /// Returns the maxmimum number of lanes that we are able to consider
169   /// caching for \p VF.
170   static unsigned getNumCachedLanes(const ElementCount &VF) {
171     return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
172   }
173 };
174 
175 /// VPIteration represents a single point in the iteration space of the output
176 /// (vectorized and/or unrolled) IR loop.
177 struct VPIteration {
178   /// in [0..UF)
179   unsigned Part;
180 
181   VPLane Lane;
182 
183   VPIteration(unsigned Part, unsigned Lane,
184               VPLane::Kind Kind = VPLane::Kind::First)
185       : Part(Part), Lane(Lane, Kind) {}
186 
187   VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
188 
189   bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
190 };
191 
192 /// VPTransformState holds information passed down when "executing" a VPlan,
193 /// needed for generating the output IR.
194 struct VPTransformState {
195   VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
196                    DominatorTree *DT, IRBuilder<> &Builder,
197                    InnerLoopVectorizer *ILV, VPlan *Plan)
198       : VF(VF), UF(UF), Instance(), LI(LI), DT(DT), Builder(Builder), ILV(ILV),
199         Plan(Plan) {}
200 
201   /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
202   ElementCount VF;
203   unsigned UF;
204 
205   /// Hold the indices to generate specific scalar instructions. Null indicates
206   /// that all instances are to be generated, using either scalar or vector
207   /// instructions.
208   Optional<VPIteration> Instance;
209 
210   struct DataState {
211     /// A type for vectorized values in the new loop. Each value from the
212     /// original loop, when vectorized, is represented by UF vector values in
213     /// the new unrolled loop, where UF is the unroll factor.
214     typedef SmallVector<Value *, 2> PerPartValuesTy;
215 
216     DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
217 
218     using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
219     DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
220   } Data;
221 
222   /// Get the generated Value for a given VPValue and a given Part. Note that
223   /// as some Defs are still created by ILV and managed in its ValueMap, this
224   /// method will delegate the call to ILV in such cases in order to provide
225   /// callers a consistent API.
226   /// \see set.
227   Value *get(VPValue *Def, unsigned Part);
228 
229   /// Get the generated Value for a given VPValue and given Part and Lane.
230   Value *get(VPValue *Def, const VPIteration &Instance);
231 
232   bool hasVectorValue(VPValue *Def, unsigned Part) {
233     auto I = Data.PerPartOutput.find(Def);
234     return I != Data.PerPartOutput.end() && Part < I->second.size() &&
235            I->second[Part];
236   }
237 
238   bool hasAnyVectorValue(VPValue *Def) const {
239     return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
240   }
241 
242   bool hasScalarValue(VPValue *Def, VPIteration Instance) {
243     auto I = Data.PerPartScalars.find(Def);
244     if (I == Data.PerPartScalars.end())
245       return false;
246     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
247     return Instance.Part < I->second.size() &&
248            CacheIdx < I->second[Instance.Part].size() &&
249            I->second[Instance.Part][CacheIdx];
250   }
251 
252   /// Set the generated Value for a given VPValue and a given Part.
253   void set(VPValue *Def, Value *V, unsigned Part) {
254     if (!Data.PerPartOutput.count(Def)) {
255       DataState::PerPartValuesTy Entry(UF);
256       Data.PerPartOutput[Def] = Entry;
257     }
258     Data.PerPartOutput[Def][Part] = V;
259   }
260   /// Reset an existing vector value for \p Def and a given \p Part.
261   void reset(VPValue *Def, Value *V, unsigned Part) {
262     auto Iter = Data.PerPartOutput.find(Def);
263     assert(Iter != Data.PerPartOutput.end() &&
264            "need to overwrite existing value");
265     Iter->second[Part] = V;
266   }
267 
268   /// Set the generated scalar \p V for \p Def and the given \p Instance.
269   void set(VPValue *Def, Value *V, const VPIteration &Instance) {
270     auto Iter = Data.PerPartScalars.insert({Def, {}});
271     auto &PerPartVec = Iter.first->second;
272     while (PerPartVec.size() <= Instance.Part)
273       PerPartVec.emplace_back();
274     auto &Scalars = PerPartVec[Instance.Part];
275     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
276     while (Scalars.size() <= CacheIdx)
277       Scalars.push_back(nullptr);
278     assert(!Scalars[CacheIdx] && "should overwrite existing value");
279     Scalars[CacheIdx] = V;
280   }
281 
282   /// Reset an existing scalar value for \p Def and a given \p Instance.
283   void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
284     auto Iter = Data.PerPartScalars.find(Def);
285     assert(Iter != Data.PerPartScalars.end() &&
286            "need to overwrite existing value");
287     assert(Instance.Part < Iter->second.size() &&
288            "need to overwrite existing value");
289     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
290     assert(CacheIdx < Iter->second[Instance.Part].size() &&
291            "need to overwrite existing value");
292     Iter->second[Instance.Part][CacheIdx] = V;
293   }
294 
295   /// Hold state information used when constructing the CFG of the output IR,
296   /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
297   struct CFGState {
298     /// The previous VPBasicBlock visited. Initially set to null.
299     VPBasicBlock *PrevVPBB = nullptr;
300 
301     /// The previous IR BasicBlock created or used. Initially set to the new
302     /// header BasicBlock.
303     BasicBlock *PrevBB = nullptr;
304 
305     /// The last IR BasicBlock in the output IR. Set to the new latch
306     /// BasicBlock, used for placing the newly created BasicBlocks.
307     BasicBlock *LastBB = nullptr;
308 
309     /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
310     /// of replication, maps the BasicBlock of the last replica created.
311     SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
312 
313     /// Vector of VPBasicBlocks whose terminator instruction needs to be fixed
314     /// up at the end of vector code generation.
315     SmallVector<VPBasicBlock *, 8> VPBBsToFix;
316 
317     CFGState() = default;
318   } CFG;
319 
320   /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
321   LoopInfo *LI;
322 
323   /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
324   DominatorTree *DT;
325 
326   /// Hold a reference to the IRBuilder used to generate output IR code.
327   IRBuilder<> &Builder;
328 
329   VPValue2ValueTy VPValue2Value;
330 
331   /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
332   Value *CanonicalIV = nullptr;
333 
334   /// Hold the trip count of the scalar loop.
335   Value *TripCount = nullptr;
336 
337   /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
338   InnerLoopVectorizer *ILV;
339 
340   /// Pointer to the VPlan code is generated for.
341   VPlan *Plan;
342 };
343 
344 /// VPUsers instance used by VPBlockBase to manage CondBit and the block
345 /// predicate. Currently VPBlockUsers are used in VPBlockBase for historical
346 /// reasons, but in the future the only VPUsers should either be recipes or
347 /// live-outs.VPBlockBase uses.
348 struct VPBlockUser : public VPUser {
349   VPBlockUser() : VPUser({}, VPUserID::Block) {}
350 
351   VPValue *getSingleOperandOrNull() {
352     if (getNumOperands() == 1)
353       return getOperand(0);
354 
355     return nullptr;
356   }
357   const VPValue *getSingleOperandOrNull() const {
358     if (getNumOperands() == 1)
359       return getOperand(0);
360 
361     return nullptr;
362   }
363 
364   void resetSingleOpUser(VPValue *NewVal) {
365     assert(getNumOperands() <= 1 && "Didn't expect more than one operand!");
366     if (!NewVal) {
367       if (getNumOperands() == 1)
368         removeLastOperand();
369       return;
370     }
371 
372     if (getNumOperands() == 1)
373       setOperand(0, NewVal);
374     else
375       addOperand(NewVal);
376   }
377 };
378 
379 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
380 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
381 class VPBlockBase {
382   friend class VPBlockUtils;
383 
384   const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
385 
386   /// An optional name for the block.
387   std::string Name;
388 
389   /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
390   /// it is a topmost VPBlockBase.
391   VPRegionBlock *Parent = nullptr;
392 
393   /// List of predecessor blocks.
394   SmallVector<VPBlockBase *, 1> Predecessors;
395 
396   /// List of successor blocks.
397   SmallVector<VPBlockBase *, 1> Successors;
398 
399   /// Successor selector managed by a VPUser. For blocks with zero or one
400   /// successors, there is no operand. Otherwise there is exactly one operand
401   /// which is the branch condition.
402   VPBlockUser CondBitUser;
403 
404   /// If the block is predicated, its predicate is stored as an operand of this
405   /// VPUser to maintain the def-use relations. Otherwise there is no operand
406   /// here.
407   VPBlockUser PredicateUser;
408 
409   /// VPlan containing the block. Can only be set on the entry block of the
410   /// plan.
411   VPlan *Plan = nullptr;
412 
413   /// Add \p Successor as the last successor to this block.
414   void appendSuccessor(VPBlockBase *Successor) {
415     assert(Successor && "Cannot add nullptr successor!");
416     Successors.push_back(Successor);
417   }
418 
419   /// Add \p Predecessor as the last predecessor to this block.
420   void appendPredecessor(VPBlockBase *Predecessor) {
421     assert(Predecessor && "Cannot add nullptr predecessor!");
422     Predecessors.push_back(Predecessor);
423   }
424 
425   /// Remove \p Predecessor from the predecessors of this block.
426   void removePredecessor(VPBlockBase *Predecessor) {
427     auto Pos = find(Predecessors, Predecessor);
428     assert(Pos && "Predecessor does not exist");
429     Predecessors.erase(Pos);
430   }
431 
432   /// Remove \p Successor from the successors of this block.
433   void removeSuccessor(VPBlockBase *Successor) {
434     auto Pos = find(Successors, Successor);
435     assert(Pos && "Successor does not exist");
436     Successors.erase(Pos);
437   }
438 
439 protected:
440   VPBlockBase(const unsigned char SC, const std::string &N)
441       : SubclassID(SC), Name(N) {}
442 
443 public:
444   /// An enumeration for keeping track of the concrete subclass of VPBlockBase
445   /// that are actually instantiated. Values of this enumeration are kept in the
446   /// SubclassID field of the VPBlockBase objects. They are used for concrete
447   /// type identification.
448   using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
449 
450   using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
451 
452   virtual ~VPBlockBase() = default;
453 
454   const std::string &getName() const { return Name; }
455 
456   void setName(const Twine &newName) { Name = newName.str(); }
457 
458   /// \return an ID for the concrete type of this object.
459   /// This is used to implement the classof checks. This should not be used
460   /// for any other purpose, as the values may change as LLVM evolves.
461   unsigned getVPBlockID() const { return SubclassID; }
462 
463   VPRegionBlock *getParent() { return Parent; }
464   const VPRegionBlock *getParent() const { return Parent; }
465 
466   /// \return A pointer to the plan containing the current block.
467   VPlan *getPlan();
468   const VPlan *getPlan() const;
469 
470   /// Sets the pointer of the plan containing the block. The block must be the
471   /// entry block into the VPlan.
472   void setPlan(VPlan *ParentPlan);
473 
474   void setParent(VPRegionBlock *P) { Parent = P; }
475 
476   /// \return the VPBasicBlock that is the entry of this VPBlockBase,
477   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
478   /// VPBlockBase is a VPBasicBlock, it is returned.
479   const VPBasicBlock *getEntryBasicBlock() const;
480   VPBasicBlock *getEntryBasicBlock();
481 
482   /// \return the VPBasicBlock that is the exit of this VPBlockBase,
483   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
484   /// VPBlockBase is a VPBasicBlock, it is returned.
485   const VPBasicBlock *getExitBasicBlock() const;
486   VPBasicBlock *getExitBasicBlock();
487 
488   const VPBlocksTy &getSuccessors() const { return Successors; }
489   VPBlocksTy &getSuccessors() { return Successors; }
490 
491   const VPBlocksTy &getPredecessors() const { return Predecessors; }
492   VPBlocksTy &getPredecessors() { return Predecessors; }
493 
494   /// \return the successor of this VPBlockBase if it has a single successor.
495   /// Otherwise return a null pointer.
496   VPBlockBase *getSingleSuccessor() const {
497     return (Successors.size() == 1 ? *Successors.begin() : nullptr);
498   }
499 
500   /// \return the predecessor of this VPBlockBase if it has a single
501   /// predecessor. Otherwise return a null pointer.
502   VPBlockBase *getSinglePredecessor() const {
503     return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
504   }
505 
506   size_t getNumSuccessors() const { return Successors.size(); }
507   size_t getNumPredecessors() const { return Predecessors.size(); }
508 
509   /// An Enclosing Block of a block B is any block containing B, including B
510   /// itself. \return the closest enclosing block starting from "this", which
511   /// has successors. \return the root enclosing block if all enclosing blocks
512   /// have no successors.
513   VPBlockBase *getEnclosingBlockWithSuccessors();
514 
515   /// \return the closest enclosing block starting from "this", which has
516   /// predecessors. \return the root enclosing block if all enclosing blocks
517   /// have no predecessors.
518   VPBlockBase *getEnclosingBlockWithPredecessors();
519 
520   /// \return the successors either attached directly to this VPBlockBase or, if
521   /// this VPBlockBase is the exit block of a VPRegionBlock and has no
522   /// successors of its own, search recursively for the first enclosing
523   /// VPRegionBlock that has successors and return them. If no such
524   /// VPRegionBlock exists, return the (empty) successors of the topmost
525   /// VPBlockBase reached.
526   const VPBlocksTy &getHierarchicalSuccessors() {
527     return getEnclosingBlockWithSuccessors()->getSuccessors();
528   }
529 
530   /// \return the hierarchical successor of this VPBlockBase if it has a single
531   /// hierarchical successor. Otherwise return a null pointer.
532   VPBlockBase *getSingleHierarchicalSuccessor() {
533     return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
534   }
535 
536   /// \return the predecessors either attached directly to this VPBlockBase or,
537   /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
538   /// predecessors of its own, search recursively for the first enclosing
539   /// VPRegionBlock that has predecessors and return them. If no such
540   /// VPRegionBlock exists, return the (empty) predecessors of the topmost
541   /// VPBlockBase reached.
542   const VPBlocksTy &getHierarchicalPredecessors() {
543     return getEnclosingBlockWithPredecessors()->getPredecessors();
544   }
545 
546   /// \return the hierarchical predecessor of this VPBlockBase if it has a
547   /// single hierarchical predecessor. Otherwise return a null pointer.
548   VPBlockBase *getSingleHierarchicalPredecessor() {
549     return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
550   }
551 
552   /// \return the condition bit selecting the successor.
553   VPValue *getCondBit();
554   /// \return the condition bit selecting the successor.
555   const VPValue *getCondBit() const;
556   /// Set the condition bit selecting the successor.
557   void setCondBit(VPValue *CV);
558 
559   /// \return the block's predicate.
560   VPValue *getPredicate();
561   /// \return the block's predicate.
562   const VPValue *getPredicate() const;
563   /// Set the block's predicate.
564   void setPredicate(VPValue *Pred);
565 
566   /// Set a given VPBlockBase \p Successor as the single successor of this
567   /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
568   /// This VPBlockBase must have no successors.
569   void setOneSuccessor(VPBlockBase *Successor) {
570     assert(Successors.empty() && "Setting one successor when others exist.");
571     appendSuccessor(Successor);
572   }
573 
574   /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
575   /// successors of this VPBlockBase. \p Condition is set as the successor
576   /// selector. This VPBlockBase is not added as predecessor of \p IfTrue or \p
577   /// IfFalse. This VPBlockBase must have no successors.
578   void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
579                         VPValue *Condition) {
580     assert(Successors.empty() && "Setting two successors when others exist.");
581     assert(Condition && "Setting two successors without condition!");
582     setCondBit(Condition);
583     appendSuccessor(IfTrue);
584     appendSuccessor(IfFalse);
585   }
586 
587   /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
588   /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
589   /// as successor of any VPBasicBlock in \p NewPreds.
590   void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
591     assert(Predecessors.empty() && "Block predecessors already set.");
592     for (auto *Pred : NewPreds)
593       appendPredecessor(Pred);
594   }
595 
596   /// Remove all the predecessor of this block.
597   void clearPredecessors() { Predecessors.clear(); }
598 
599   /// Remove all the successors of this block and set to null its condition bit
600   void clearSuccessors() {
601     Successors.clear();
602     setCondBit(nullptr);
603   }
604 
605   /// The method which generates the output IR that correspond to this
606   /// VPBlockBase, thereby "executing" the VPlan.
607   virtual void execute(struct VPTransformState *State) = 0;
608 
609   /// Delete all blocks reachable from a given VPBlockBase, inclusive.
610   static void deleteCFG(VPBlockBase *Entry);
611 
612   /// Return true if it is legal to hoist instructions into this block.
613   bool isLegalToHoistInto() {
614     // There are currently no constraints that prevent an instruction to be
615     // hoisted into a VPBlockBase.
616     return true;
617   }
618 
619   /// Replace all operands of VPUsers in the block with \p NewValue and also
620   /// replaces all uses of VPValues defined in the block with NewValue.
621   virtual void dropAllReferences(VPValue *NewValue) = 0;
622 
623 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
624   void printAsOperand(raw_ostream &OS, bool PrintType) const {
625     OS << getName();
626   }
627 
628   /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
629   /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
630   /// consequtive numbers.
631   ///
632   /// Note that the numbering is applied to the whole VPlan, so printing
633   /// individual blocks is consistent with the whole VPlan printing.
634   virtual void print(raw_ostream &O, const Twine &Indent,
635                      VPSlotTracker &SlotTracker) const = 0;
636 
637   /// Print plain-text dump of this VPlan to \p O.
638   void print(raw_ostream &O) const {
639     VPSlotTracker SlotTracker(getPlan());
640     print(O, "", SlotTracker);
641   }
642 
643   /// Print the successors of this block to \p O, prefixing all lines with \p
644   /// Indent.
645   void printSuccessors(raw_ostream &O, const Twine &Indent) const;
646 
647   /// Dump this VPBlockBase to dbgs().
648   LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
649 #endif
650 };
651 
652 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
653 /// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
654 /// and is responsible for deleting its defined values. Single-value
655 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
656 /// VPRecipeBase before VPValue.
657 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
658                      public VPDef,
659                      public VPUser {
660   friend VPBasicBlock;
661   friend class VPBlockUtils;
662 
663   /// Each VPRecipe belongs to a single VPBasicBlock.
664   VPBasicBlock *Parent = nullptr;
665 
666 public:
667   VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
668       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
669 
670   template <typename IterT>
671   VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
672       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
673   virtual ~VPRecipeBase() = default;
674 
675   /// \return the VPBasicBlock which this VPRecipe belongs to.
676   VPBasicBlock *getParent() { return Parent; }
677   const VPBasicBlock *getParent() const { return Parent; }
678 
679   /// The method which generates the output IR instructions that correspond to
680   /// this VPRecipe, thereby "executing" the VPlan.
681   virtual void execute(struct VPTransformState &State) = 0;
682 
683   /// Insert an unlinked recipe into a basic block immediately before
684   /// the specified recipe.
685   void insertBefore(VPRecipeBase *InsertPos);
686 
687   /// Insert an unlinked Recipe into a basic block immediately after
688   /// the specified Recipe.
689   void insertAfter(VPRecipeBase *InsertPos);
690 
691   /// Unlink this recipe from its current VPBasicBlock and insert it into
692   /// the VPBasicBlock that MovePos lives in, right after MovePos.
693   void moveAfter(VPRecipeBase *MovePos);
694 
695   /// Unlink this recipe and insert into BB before I.
696   ///
697   /// \pre I is a valid iterator into BB.
698   void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
699 
700   /// This method unlinks 'this' from the containing basic block, but does not
701   /// delete it.
702   void removeFromParent();
703 
704   /// This method unlinks 'this' from the containing basic block and deletes it.
705   ///
706   /// \returns an iterator pointing to the element after the erased one
707   iplist<VPRecipeBase>::iterator eraseFromParent();
708 
709   /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
710   /// otherwise.
711   Instruction *getUnderlyingInstr() {
712     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
713   }
714   const Instruction *getUnderlyingInstr() const {
715     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
716   }
717 
718   /// Method to support type inquiry through isa, cast, and dyn_cast.
719   static inline bool classof(const VPDef *D) {
720     // All VPDefs are also VPRecipeBases.
721     return true;
722   }
723 
724   static inline bool classof(const VPUser *U) {
725     return U->getVPUserID() == VPUser::VPUserID::Recipe;
726   }
727 
728   /// Returns true if the recipe may have side-effects.
729   bool mayHaveSideEffects() const;
730 
731   /// Returns true for PHI-like recipes.
732   bool isPhi() const {
733     return getVPDefID() == VPWidenIntOrFpInductionSC || getVPDefID() == VPWidenPHISC ||
734       getVPDefID() == VPPredInstPHISC || getVPDefID() == VPWidenCanonicalIVSC;
735   }
736 
737   /// Returns true if the recipe may read from memory.
738   bool mayReadFromMemory() const;
739 
740   /// Returns true if the recipe may write to memory.
741   bool mayWriteToMemory() const;
742 
743   /// Returns true if the recipe may read from or write to memory.
744   bool mayReadOrWriteMemory() const {
745     return mayReadFromMemory() || mayWriteToMemory();
746   }
747 };
748 
749 inline bool VPUser::classof(const VPDef *Def) {
750   return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
751          Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
752          Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
753          Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
754          Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
755          Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
756          Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
757          Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
758          Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
759          Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
760          Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
761 }
762 
763 /// This is a concrete Recipe that models a single VPlan-level instruction.
764 /// While as any Recipe it may generate a sequence of IR instructions when
765 /// executed, these instructions would always form a single-def expression as
766 /// the VPInstruction is also a single def-use vertex.
767 class VPInstruction : public VPRecipeBase, public VPValue {
768   friend class VPlanSlp;
769 
770 public:
771   /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
772   enum {
773     Not = Instruction::OtherOpsEnd + 1,
774     ICmpULE,
775     SLPLoad,
776     SLPStore,
777     ActiveLaneMask,
778   };
779 
780 private:
781   typedef unsigned char OpcodeTy;
782   OpcodeTy Opcode;
783 
784   /// Utility method serving execute(): generates a single instance of the
785   /// modeled instruction.
786   void generateInstruction(VPTransformState &State, unsigned Part);
787 
788 protected:
789   void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
790 
791 public:
792   VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands)
793       : VPRecipeBase(VPRecipeBase::VPInstructionSC, Operands),
794         VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {}
795 
796   VPInstruction(unsigned Opcode, ArrayRef<VPInstruction *> Operands)
797       : VPRecipeBase(VPRecipeBase::VPInstructionSC, {}),
798         VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {
799     for (auto *I : Operands)
800       addOperand(I->getVPSingleValue());
801   }
802 
803   VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands)
804       : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands)) {}
805 
806   /// Method to support type inquiry through isa, cast, and dyn_cast.
807   static inline bool classof(const VPValue *V) {
808     return V->getVPValueID() == VPValue::VPVInstructionSC;
809   }
810 
811   VPInstruction *clone() const {
812     SmallVector<VPValue *, 2> Operands(operands());
813     return new VPInstruction(Opcode, Operands);
814   }
815 
816   /// Method to support type inquiry through isa, cast, and dyn_cast.
817   static inline bool classof(const VPDef *R) {
818     return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
819   }
820 
821   unsigned getOpcode() const { return Opcode; }
822 
823   /// Generate the instruction.
824   /// TODO: We currently execute only per-part unless a specific instance is
825   /// provided.
826   void execute(VPTransformState &State) override;
827 
828 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
829   /// Print the VPInstruction to \p O.
830   void print(raw_ostream &O, const Twine &Indent,
831              VPSlotTracker &SlotTracker) const override;
832 
833   /// Print the VPInstruction to dbgs() (for debugging).
834   LLVM_DUMP_METHOD void dump() const;
835 #endif
836 
837   /// Return true if this instruction may modify memory.
838   bool mayWriteToMemory() const {
839     // TODO: we can use attributes of the called function to rule out memory
840     //       modifications.
841     return Opcode == Instruction::Store || Opcode == Instruction::Call ||
842            Opcode == Instruction::Invoke || Opcode == SLPStore;
843   }
844 
845   bool hasResult() const {
846     // CallInst may or may not have a result, depending on the called function.
847     // Conservatively return calls have results for now.
848     switch (getOpcode()) {
849     case Instruction::Ret:
850     case Instruction::Br:
851     case Instruction::Store:
852     case Instruction::Switch:
853     case Instruction::IndirectBr:
854     case Instruction::Resume:
855     case Instruction::CatchRet:
856     case Instruction::Unreachable:
857     case Instruction::Fence:
858     case Instruction::AtomicRMW:
859       return false;
860     default:
861       return true;
862     }
863   }
864 };
865 
866 /// VPWidenRecipe is a recipe for producing a copy of vector type its
867 /// ingredient. This recipe covers most of the traditional vectorization cases
868 /// where each ingredient transforms into a vectorized version of itself.
869 class VPWidenRecipe : public VPRecipeBase, public VPValue {
870 public:
871   template <typename IterT>
872   VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
873       : VPRecipeBase(VPRecipeBase::VPWidenSC, Operands),
874         VPValue(VPValue::VPVWidenSC, &I, this) {}
875 
876   ~VPWidenRecipe() override = default;
877 
878   /// Method to support type inquiry through isa, cast, and dyn_cast.
879   static inline bool classof(const VPDef *D) {
880     return D->getVPDefID() == VPRecipeBase::VPWidenSC;
881   }
882   static inline bool classof(const VPValue *V) {
883     return V->getVPValueID() == VPValue::VPVWidenSC;
884   }
885 
886   /// Produce widened copies of all Ingredients.
887   void execute(VPTransformState &State) override;
888 
889 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
890   /// Print the recipe.
891   void print(raw_ostream &O, const Twine &Indent,
892              VPSlotTracker &SlotTracker) const override;
893 #endif
894 };
895 
896 /// A recipe for widening Call instructions.
897 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
898 
899 public:
900   template <typename IterT>
901   VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
902       : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments),
903         VPValue(VPValue::VPVWidenCallSC, &I, this) {}
904 
905   ~VPWidenCallRecipe() override = default;
906 
907   /// Method to support type inquiry through isa, cast, and dyn_cast.
908   static inline bool classof(const VPDef *D) {
909     return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
910   }
911 
912   /// Produce a widened version of the call instruction.
913   void execute(VPTransformState &State) override;
914 
915 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
916   /// Print the recipe.
917   void print(raw_ostream &O, const Twine &Indent,
918              VPSlotTracker &SlotTracker) const override;
919 #endif
920 };
921 
922 /// A recipe for widening select instructions.
923 class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
924 
925   /// Is the condition of the select loop invariant?
926   bool InvariantCond;
927 
928 public:
929   template <typename IterT>
930   VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
931                       bool InvariantCond)
932       : VPRecipeBase(VPRecipeBase::VPWidenSelectSC, Operands),
933         VPValue(VPValue::VPVWidenSelectSC, &I, this),
934         InvariantCond(InvariantCond) {}
935 
936   ~VPWidenSelectRecipe() override = default;
937 
938   /// Method to support type inquiry through isa, cast, and dyn_cast.
939   static inline bool classof(const VPDef *D) {
940     return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
941   }
942 
943   /// Produce a widened version of the select instruction.
944   void execute(VPTransformState &State) override;
945 
946 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
947   /// Print the recipe.
948   void print(raw_ostream &O, const Twine &Indent,
949              VPSlotTracker &SlotTracker) const override;
950 #endif
951 };
952 
953 /// A recipe for handling GEP instructions.
954 class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
955   bool IsPtrLoopInvariant;
956   SmallBitVector IsIndexLoopInvariant;
957 
958 public:
959   template <typename IterT>
960   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
961       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
962         VPValue(VPWidenGEPSC, GEP, this),
963         IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
964 
965   template <typename IterT>
966   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
967                    Loop *OrigLoop)
968       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
969         VPValue(VPValue::VPVWidenGEPSC, GEP, this),
970         IsIndexLoopInvariant(GEP->getNumIndices(), false) {
971     IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
972     for (auto Index : enumerate(GEP->indices()))
973       IsIndexLoopInvariant[Index.index()] =
974           OrigLoop->isLoopInvariant(Index.value().get());
975   }
976   ~VPWidenGEPRecipe() override = default;
977 
978   /// Method to support type inquiry through isa, cast, and dyn_cast.
979   static inline bool classof(const VPDef *D) {
980     return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
981   }
982 
983   /// Generate the gep nodes.
984   void execute(VPTransformState &State) override;
985 
986 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
987   /// Print the recipe.
988   void print(raw_ostream &O, const Twine &Indent,
989              VPSlotTracker &SlotTracker) const override;
990 #endif
991 };
992 
993 /// A recipe for handling phi nodes of integer and floating-point inductions,
994 /// producing their vector and scalar values.
995 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase {
996   PHINode *IV;
997 
998 public:
999   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, Instruction *Cast,
1000                                 TruncInst *Trunc = nullptr)
1001       : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start}), IV(IV) {
1002     if (Trunc)
1003       new VPValue(Trunc, this);
1004     else
1005       new VPValue(IV, this);
1006 
1007     if (Cast)
1008       new VPValue(Cast, this);
1009   }
1010   ~VPWidenIntOrFpInductionRecipe() override = default;
1011 
1012   /// Method to support type inquiry through isa, cast, and dyn_cast.
1013   static inline bool classof(const VPDef *D) {
1014     return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
1015   }
1016 
1017   /// Generate the vectorized and scalarized versions of the phi node as
1018   /// needed by their users.
1019   void execute(VPTransformState &State) override;
1020 
1021 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1022   /// Print the recipe.
1023   void print(raw_ostream &O, const Twine &Indent,
1024              VPSlotTracker &SlotTracker) const override;
1025 #endif
1026 
1027   /// Returns the start value of the induction.
1028   VPValue *getStartValue() { return getOperand(0); }
1029 
1030   /// Returns the cast VPValue, if one is attached, or nullptr otherwise.
1031   VPValue *getCastValue() {
1032     if (getNumDefinedValues() != 2)
1033       return nullptr;
1034     return getVPValue(1);
1035   }
1036 
1037   /// Returns the first defined value as TruncInst, if it is one or nullptr
1038   /// otherwise.
1039   TruncInst *getTruncInst() {
1040     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1041   }
1042   const TruncInst *getTruncInst() const {
1043     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1044   }
1045 };
1046 
1047 /// A recipe for handling all phi nodes except for integer and FP inductions.
1048 /// For reduction PHIs, RdxDesc must point to the corresponding recurrence
1049 /// descriptor, the start value is the first operand of the recipe and the
1050 /// incoming value from the backedge is the second operand. In the VPlan native
1051 /// path, all incoming VPValues & VPBasicBlock pairs are managed in the recipe
1052 /// directly.
1053 class VPWidenPHIRecipe : public VPRecipeBase, public VPValue {
1054   /// Descriptor for a reduction PHI.
1055   RecurrenceDescriptor *RdxDesc = nullptr;
1056 
1057   /// List of incoming blocks. Only used in the VPlan native path.
1058   SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1059 
1060 public:
1061   /// Create a new VPWidenPHIRecipe for the reduction \p Phi described by \p
1062   /// RdxDesc.
1063   VPWidenPHIRecipe(PHINode *Phi, RecurrenceDescriptor &RdxDesc, VPValue &Start)
1064       : VPWidenPHIRecipe(Phi) {
1065     this->RdxDesc = &RdxDesc;
1066     addOperand(&Start);
1067   }
1068 
1069   /// Create a VPWidenPHIRecipe for \p Phi
1070   VPWidenPHIRecipe(PHINode *Phi)
1071       : VPRecipeBase(VPWidenPHISC, {}),
1072         VPValue(VPValue::VPVWidenPHISC, Phi, this) {}
1073   ~VPWidenPHIRecipe() override = default;
1074 
1075   /// Method to support type inquiry through isa, cast, and dyn_cast.
1076   static inline bool classof(const VPDef *D) {
1077     return D->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1078   }
1079   static inline bool classof(const VPValue *V) {
1080     return V->getVPValueID() == VPValue::VPVWidenPHISC;
1081   }
1082 
1083   /// Generate the phi/select nodes.
1084   void execute(VPTransformState &State) override;
1085 
1086 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1087   /// Print the recipe.
1088   void print(raw_ostream &O, const Twine &Indent,
1089              VPSlotTracker &SlotTracker) const override;
1090 #endif
1091 
1092   /// Returns the start value of the phi, if it is a reduction.
1093   VPValue *getStartValue() {
1094     return getNumOperands() == 0 ? nullptr : getOperand(0);
1095   }
1096 
1097   /// Returns the incoming value from the loop backedge, if it is a reduction.
1098   VPValue *getBackedgeValue() {
1099     assert(RdxDesc && "second incoming value is only guaranteed to be backedge "
1100                       "value for reductions");
1101     return getOperand(1);
1102   }
1103 
1104   /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
1105   void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1106     addOperand(IncomingV);
1107     IncomingBlocks.push_back(IncomingBlock);
1108   }
1109 
1110   /// Returns the \p I th incoming VPValue.
1111   VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1112 
1113   /// Returns the \p I th incoming VPBasicBlock.
1114   VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1115 
1116   RecurrenceDescriptor *getRecurrenceDescriptor() { return RdxDesc; }
1117 };
1118 
1119 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1120 /// instructions.
1121 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1122   PHINode *Phi;
1123 
1124 public:
1125   /// The blend operation is a User of the incoming values and of their
1126   /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1127   /// might be incoming with a full mask for which there is no VPValue.
1128   VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1129       : VPRecipeBase(VPBlendSC, Operands),
1130         VPValue(VPValue::VPVBlendSC, Phi, this), Phi(Phi) {
1131     assert(Operands.size() > 0 &&
1132            ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1133            "Expected either a single incoming value or a positive even number "
1134            "of operands");
1135   }
1136 
1137   /// Method to support type inquiry through isa, cast, and dyn_cast.
1138   static inline bool classof(const VPDef *D) {
1139     return D->getVPDefID() == VPRecipeBase::VPBlendSC;
1140   }
1141 
1142   /// Return the number of incoming values, taking into account that a single
1143   /// incoming value has no mask.
1144   unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1145 
1146   /// Return incoming value number \p Idx.
1147   VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1148 
1149   /// Return mask number \p Idx.
1150   VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1151 
1152   /// Generate the phi/select nodes.
1153   void execute(VPTransformState &State) override;
1154 
1155 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1156   /// Print the recipe.
1157   void print(raw_ostream &O, const Twine &Indent,
1158              VPSlotTracker &SlotTracker) const override;
1159 #endif
1160 };
1161 
1162 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1163 /// or stores into one wide load/store and shuffles. The first operand of a
1164 /// VPInterleave recipe is the address, followed by the stored values, followed
1165 /// by an optional mask.
1166 class VPInterleaveRecipe : public VPRecipeBase {
1167   const InterleaveGroup<Instruction> *IG;
1168 
1169   bool HasMask = false;
1170 
1171 public:
1172   VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1173                      ArrayRef<VPValue *> StoredValues, VPValue *Mask)
1174       : VPRecipeBase(VPInterleaveSC, {Addr}), IG(IG) {
1175     for (unsigned i = 0; i < IG->getFactor(); ++i)
1176       if (Instruction *I = IG->getMember(i)) {
1177         if (I->getType()->isVoidTy())
1178           continue;
1179         new VPValue(I, this);
1180       }
1181 
1182     for (auto *SV : StoredValues)
1183       addOperand(SV);
1184     if (Mask) {
1185       HasMask = true;
1186       addOperand(Mask);
1187     }
1188   }
1189   ~VPInterleaveRecipe() override = default;
1190 
1191   /// Method to support type inquiry through isa, cast, and dyn_cast.
1192   static inline bool classof(const VPDef *D) {
1193     return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
1194   }
1195 
1196   /// Return the address accessed by this recipe.
1197   VPValue *getAddr() const {
1198     return getOperand(0); // Address is the 1st, mandatory operand.
1199   }
1200 
1201   /// Return the mask used by this recipe. Note that a full mask is represented
1202   /// by a nullptr.
1203   VPValue *getMask() const {
1204     // Mask is optional and therefore the last, currently 2nd operand.
1205     return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1206   }
1207 
1208   /// Return the VPValues stored by this interleave group. If it is a load
1209   /// interleave group, return an empty ArrayRef.
1210   ArrayRef<VPValue *> getStoredValues() const {
1211     // The first operand is the address, followed by the stored values, followed
1212     // by an optional mask.
1213     return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1214         .slice(1, getNumOperands() - (HasMask ? 2 : 1));
1215   }
1216 
1217   /// Generate the wide load or store, and shuffles.
1218   void execute(VPTransformState &State) override;
1219 
1220 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1221   /// Print the recipe.
1222   void print(raw_ostream &O, const Twine &Indent,
1223              VPSlotTracker &SlotTracker) const override;
1224 #endif
1225 
1226   const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1227 };
1228 
1229 /// A recipe to represent inloop reduction operations, performing a reduction on
1230 /// a vector operand into a scalar value, and adding the result to a chain.
1231 /// The Operands are {ChainOp, VecOp, [Condition]}.
1232 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1233   /// The recurrence decriptor for the reduction in question.
1234   RecurrenceDescriptor *RdxDesc;
1235   /// Pointer to the TTI, needed to create the target reduction
1236   const TargetTransformInfo *TTI;
1237 
1238 public:
1239   VPReductionRecipe(RecurrenceDescriptor *R, Instruction *I, VPValue *ChainOp,
1240                     VPValue *VecOp, VPValue *CondOp,
1241                     const TargetTransformInfo *TTI)
1242       : VPRecipeBase(VPRecipeBase::VPReductionSC, {ChainOp, VecOp}),
1243         VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), TTI(TTI) {
1244     if (CondOp)
1245       addOperand(CondOp);
1246   }
1247 
1248   ~VPReductionRecipe() override = default;
1249 
1250   /// Method to support type inquiry through isa, cast, and dyn_cast.
1251   static inline bool classof(const VPValue *V) {
1252     return V->getVPValueID() == VPValue::VPVReductionSC;
1253   }
1254 
1255   static inline bool classof(const VPDef *D) {
1256     return D->getVPDefID() == VPRecipeBase::VPReductionSC;
1257   }
1258 
1259   /// Generate the reduction in the loop
1260   void execute(VPTransformState &State) override;
1261 
1262 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1263   /// Print the recipe.
1264   void print(raw_ostream &O, const Twine &Indent,
1265              VPSlotTracker &SlotTracker) const override;
1266 #endif
1267 
1268   /// The VPValue of the scalar Chain being accumulated.
1269   VPValue *getChainOp() const { return getOperand(0); }
1270   /// The VPValue of the vector value to be reduced.
1271   VPValue *getVecOp() const { return getOperand(1); }
1272   /// The VPValue of the condition for the block.
1273   VPValue *getCondOp() const {
1274     return getNumOperands() > 2 ? getOperand(2) : nullptr;
1275   }
1276 };
1277 
1278 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1279 /// copies of the original scalar type, one per lane, instead of producing a
1280 /// single copy of widened type for all lanes. If the instruction is known to be
1281 /// uniform only one copy, per lane zero, will be generated.
1282 class VPReplicateRecipe : public VPRecipeBase, public VPValue {
1283   /// Indicator if only a single replica per lane is needed.
1284   bool IsUniform;
1285 
1286   /// Indicator if the replicas are also predicated.
1287   bool IsPredicated;
1288 
1289   /// Indicator if the scalar values should also be packed into a vector.
1290   bool AlsoPack;
1291 
1292 public:
1293   template <typename IterT>
1294   VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1295                     bool IsUniform, bool IsPredicated = false)
1296       : VPRecipeBase(VPReplicateSC, Operands), VPValue(VPVReplicateSC, I, this),
1297         IsUniform(IsUniform), IsPredicated(IsPredicated) {
1298     // Retain the previous behavior of predicateInstructions(), where an
1299     // insert-element of a predicated instruction got hoisted into the
1300     // predicated basic block iff it was its only user. This is achieved by
1301     // having predicated instructions also pack their values into a vector by
1302     // default unless they have a replicated user which uses their scalar value.
1303     AlsoPack = IsPredicated && !I->use_empty();
1304   }
1305 
1306   ~VPReplicateRecipe() override = default;
1307 
1308   /// Method to support type inquiry through isa, cast, and dyn_cast.
1309   static inline bool classof(const VPDef *D) {
1310     return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
1311   }
1312 
1313   static inline bool classof(const VPValue *V) {
1314     return V->getVPValueID() == VPValue::VPVReplicateSC;
1315   }
1316 
1317   /// Generate replicas of the desired Ingredient. Replicas will be generated
1318   /// for all parts and lanes unless a specific part and lane are specified in
1319   /// the \p State.
1320   void execute(VPTransformState &State) override;
1321 
1322   void setAlsoPack(bool Pack) { AlsoPack = Pack; }
1323 
1324 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1325   /// Print the recipe.
1326   void print(raw_ostream &O, const Twine &Indent,
1327              VPSlotTracker &SlotTracker) const override;
1328 #endif
1329 
1330   bool isUniform() const { return IsUniform; }
1331 
1332   bool isPacked() const { return AlsoPack; }
1333 
1334   bool isPredicated() const { return IsPredicated; }
1335 };
1336 
1337 /// A recipe for generating conditional branches on the bits of a mask.
1338 class VPBranchOnMaskRecipe : public VPRecipeBase {
1339 public:
1340   VPBranchOnMaskRecipe(VPValue *BlockInMask)
1341       : VPRecipeBase(VPBranchOnMaskSC, {}) {
1342     if (BlockInMask) // nullptr means all-one mask.
1343       addOperand(BlockInMask);
1344   }
1345 
1346   /// Method to support type inquiry through isa, cast, and dyn_cast.
1347   static inline bool classof(const VPDef *D) {
1348     return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
1349   }
1350 
1351   /// Generate the extraction of the appropriate bit from the block mask and the
1352   /// conditional branch.
1353   void execute(VPTransformState &State) override;
1354 
1355 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1356   /// Print the recipe.
1357   void print(raw_ostream &O, const Twine &Indent,
1358              VPSlotTracker &SlotTracker) const override {
1359     O << Indent << "BRANCH-ON-MASK ";
1360     if (VPValue *Mask = getMask())
1361       Mask->printAsOperand(O, SlotTracker);
1362     else
1363       O << " All-One";
1364   }
1365 #endif
1366 
1367   /// Return the mask used by this recipe. Note that a full mask is represented
1368   /// by a nullptr.
1369   VPValue *getMask() const {
1370     assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1371     // Mask is optional.
1372     return getNumOperands() == 1 ? getOperand(0) : nullptr;
1373   }
1374 };
1375 
1376 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1377 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1378 /// order to merge values that are set under such a branch and feed their uses.
1379 /// The phi nodes can be scalar or vector depending on the users of the value.
1380 /// This recipe works in concert with VPBranchOnMaskRecipe.
1381 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1382 public:
1383   /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1384   /// nodes after merging back from a Branch-on-Mask.
1385   VPPredInstPHIRecipe(VPValue *PredV)
1386       : VPRecipeBase(VPPredInstPHISC, PredV),
1387         VPValue(VPValue::VPVPredInstPHI, nullptr, this) {}
1388   ~VPPredInstPHIRecipe() override = default;
1389 
1390   /// Method to support type inquiry through isa, cast, and dyn_cast.
1391   static inline bool classof(const VPDef *D) {
1392     return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
1393   }
1394 
1395   /// Generates phi nodes for live-outs as needed to retain SSA form.
1396   void execute(VPTransformState &State) override;
1397 
1398 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1399   /// Print the recipe.
1400   void print(raw_ostream &O, const Twine &Indent,
1401              VPSlotTracker &SlotTracker) const override;
1402 #endif
1403 };
1404 
1405 /// A Recipe for widening load/store operations.
1406 /// The recipe uses the following VPValues:
1407 /// - For load: Address, optional mask
1408 /// - For store: Address, stored value, optional mask
1409 /// TODO: We currently execute only per-part unless a specific instance is
1410 /// provided.
1411 class VPWidenMemoryInstructionRecipe : public VPRecipeBase {
1412   Instruction &Ingredient;
1413 
1414   void setMask(VPValue *Mask) {
1415     if (!Mask)
1416       return;
1417     addOperand(Mask);
1418   }
1419 
1420   bool isMasked() const {
1421     return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
1422   }
1423 
1424 public:
1425   VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask)
1426       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr}), Ingredient(Load) {
1427     new VPValue(VPValue::VPVMemoryInstructionSC, &Load, this);
1428     setMask(Mask);
1429   }
1430 
1431   VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
1432                                  VPValue *StoredValue, VPValue *Mask)
1433       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr, StoredValue}),
1434         Ingredient(Store) {
1435     setMask(Mask);
1436   }
1437 
1438   /// Method to support type inquiry through isa, cast, and dyn_cast.
1439   static inline bool classof(const VPDef *D) {
1440     return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
1441   }
1442 
1443   /// Return the address accessed by this recipe.
1444   VPValue *getAddr() const {
1445     return getOperand(0); // Address is the 1st, mandatory operand.
1446   }
1447 
1448   /// Return the mask used by this recipe. Note that a full mask is represented
1449   /// by a nullptr.
1450   VPValue *getMask() const {
1451     // Mask is optional and therefore the last operand.
1452     return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
1453   }
1454 
1455   /// Returns true if this recipe is a store.
1456   bool isStore() const { return isa<StoreInst>(Ingredient); }
1457 
1458   /// Return the address accessed by this recipe.
1459   VPValue *getStoredValue() const {
1460     assert(isStore() && "Stored value only available for store instructions");
1461     return getOperand(1); // Stored value is the 2nd, mandatory operand.
1462   }
1463 
1464   /// Generate the wide load/store.
1465   void execute(VPTransformState &State) override;
1466 
1467 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1468   /// Print the recipe.
1469   void print(raw_ostream &O, const Twine &Indent,
1470              VPSlotTracker &SlotTracker) const override;
1471 #endif
1472 };
1473 
1474 /// A Recipe for widening the canonical induction variable of the vector loop.
1475 class VPWidenCanonicalIVRecipe : public VPRecipeBase {
1476 public:
1477   VPWidenCanonicalIVRecipe() : VPRecipeBase(VPWidenCanonicalIVSC, {}) {
1478     new VPValue(nullptr, this);
1479   }
1480 
1481   ~VPWidenCanonicalIVRecipe() override = default;
1482 
1483   /// Method to support type inquiry through isa, cast, and dyn_cast.
1484   static inline bool classof(const VPDef *D) {
1485     return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1486   }
1487 
1488   /// Generate a canonical vector induction variable of the vector loop, with
1489   /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
1490   /// step = <VF*UF, VF*UF, ..., VF*UF>.
1491   void execute(VPTransformState &State) override;
1492 
1493 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1494   /// Print the recipe.
1495   void print(raw_ostream &O, const Twine &Indent,
1496              VPSlotTracker &SlotTracker) const override;
1497 #endif
1498 };
1499 
1500 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
1501 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
1502 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
1503 class VPBasicBlock : public VPBlockBase {
1504 public:
1505   using RecipeListTy = iplist<VPRecipeBase>;
1506 
1507 private:
1508   /// The VPRecipes held in the order of output instructions to generate.
1509   RecipeListTy Recipes;
1510 
1511 public:
1512   VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
1513       : VPBlockBase(VPBasicBlockSC, Name.str()) {
1514     if (Recipe)
1515       appendRecipe(Recipe);
1516   }
1517 
1518   ~VPBasicBlock() override {
1519     while (!Recipes.empty())
1520       Recipes.pop_back();
1521   }
1522 
1523   /// Instruction iterators...
1524   using iterator = RecipeListTy::iterator;
1525   using const_iterator = RecipeListTy::const_iterator;
1526   using reverse_iterator = RecipeListTy::reverse_iterator;
1527   using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
1528 
1529   //===--------------------------------------------------------------------===//
1530   /// Recipe iterator methods
1531   ///
1532   inline iterator begin() { return Recipes.begin(); }
1533   inline const_iterator begin() const { return Recipes.begin(); }
1534   inline iterator end() { return Recipes.end(); }
1535   inline const_iterator end() const { return Recipes.end(); }
1536 
1537   inline reverse_iterator rbegin() { return Recipes.rbegin(); }
1538   inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
1539   inline reverse_iterator rend() { return Recipes.rend(); }
1540   inline const_reverse_iterator rend() const { return Recipes.rend(); }
1541 
1542   inline size_t size() const { return Recipes.size(); }
1543   inline bool empty() const { return Recipes.empty(); }
1544   inline const VPRecipeBase &front() const { return Recipes.front(); }
1545   inline VPRecipeBase &front() { return Recipes.front(); }
1546   inline const VPRecipeBase &back() const { return Recipes.back(); }
1547   inline VPRecipeBase &back() { return Recipes.back(); }
1548 
1549   /// Returns a reference to the list of recipes.
1550   RecipeListTy &getRecipeList() { return Recipes; }
1551 
1552   /// Returns a pointer to a member of the recipe list.
1553   static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
1554     return &VPBasicBlock::Recipes;
1555   }
1556 
1557   /// Method to support type inquiry through isa, cast, and dyn_cast.
1558   static inline bool classof(const VPBlockBase *V) {
1559     return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
1560   }
1561 
1562   void insert(VPRecipeBase *Recipe, iterator InsertPt) {
1563     assert(Recipe && "No recipe to append.");
1564     assert(!Recipe->Parent && "Recipe already in VPlan");
1565     Recipe->Parent = this;
1566     Recipes.insert(InsertPt, Recipe);
1567   }
1568 
1569   /// Augment the existing recipes of a VPBasicBlock with an additional
1570   /// \p Recipe as the last recipe.
1571   void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
1572 
1573   /// The method which generates the output IR instructions that correspond to
1574   /// this VPBasicBlock, thereby "executing" the VPlan.
1575   void execute(struct VPTransformState *State) override;
1576 
1577   /// Return the position of the first non-phi node recipe in the block.
1578   iterator getFirstNonPhi();
1579 
1580   /// Returns an iterator range over the PHI-like recipes in the block.
1581   iterator_range<iterator> phis() {
1582     return make_range(begin(), getFirstNonPhi());
1583   }
1584 
1585   void dropAllReferences(VPValue *NewValue) override;
1586 
1587   /// Split current block at \p SplitAt by inserting a new block between the
1588   /// current block and its successors and moving all recipes starting at
1589   /// SplitAt to the new block. Returns the new block.
1590   VPBasicBlock *splitAt(iterator SplitAt);
1591 
1592 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1593   /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
1594   /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
1595   ///
1596   /// Note that the numbering is applied to the whole VPlan, so printing
1597   /// individual blocks is consistent with the whole VPlan printing.
1598   void print(raw_ostream &O, const Twine &Indent,
1599              VPSlotTracker &SlotTracker) const override;
1600   using VPBlockBase::print; // Get the print(raw_stream &O) version.
1601 #endif
1602 
1603 private:
1604   /// Create an IR BasicBlock to hold the output instructions generated by this
1605   /// VPBasicBlock, and return it. Update the CFGState accordingly.
1606   BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
1607 };
1608 
1609 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
1610 /// which form a Single-Entry-Single-Exit subgraph of the output IR CFG.
1611 /// A VPRegionBlock may indicate that its contents are to be replicated several
1612 /// times. This is designed to support predicated scalarization, in which a
1613 /// scalar if-then code structure needs to be generated VF * UF times. Having
1614 /// this replication indicator helps to keep a single model for multiple
1615 /// candidate VF's. The actual replication takes place only once the desired VF
1616 /// and UF have been determined.
1617 class VPRegionBlock : public VPBlockBase {
1618   /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
1619   VPBlockBase *Entry;
1620 
1621   /// Hold the Single Exit of the SESE region modelled by the VPRegionBlock.
1622   VPBlockBase *Exit;
1623 
1624   /// An indicator whether this region is to generate multiple replicated
1625   /// instances of output IR corresponding to its VPBlockBases.
1626   bool IsReplicator;
1627 
1628 public:
1629   VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exit,
1630                 const std::string &Name = "", bool IsReplicator = false)
1631       : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exit(Exit),
1632         IsReplicator(IsReplicator) {
1633     assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
1634     assert(Exit->getSuccessors().empty() && "Exit block has successors.");
1635     Entry->setParent(this);
1636     Exit->setParent(this);
1637   }
1638   VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
1639       : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exit(nullptr),
1640         IsReplicator(IsReplicator) {}
1641 
1642   ~VPRegionBlock() override {
1643     if (Entry) {
1644       VPValue DummyValue;
1645       Entry->dropAllReferences(&DummyValue);
1646       deleteCFG(Entry);
1647     }
1648   }
1649 
1650   /// Method to support type inquiry through isa, cast, and dyn_cast.
1651   static inline bool classof(const VPBlockBase *V) {
1652     return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
1653   }
1654 
1655   const VPBlockBase *getEntry() const { return Entry; }
1656   VPBlockBase *getEntry() { return Entry; }
1657 
1658   /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
1659   /// EntryBlock must have no predecessors.
1660   void setEntry(VPBlockBase *EntryBlock) {
1661     assert(EntryBlock->getPredecessors().empty() &&
1662            "Entry block cannot have predecessors.");
1663     Entry = EntryBlock;
1664     EntryBlock->setParent(this);
1665   }
1666 
1667   // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
1668   // specific interface of llvm::Function, instead of using
1669   // GraphTraints::getEntryNode. We should add a new template parameter to
1670   // DominatorTreeBase representing the Graph type.
1671   VPBlockBase &front() const { return *Entry; }
1672 
1673   const VPBlockBase *getExit() const { return Exit; }
1674   VPBlockBase *getExit() { return Exit; }
1675 
1676   /// Set \p ExitBlock as the exit VPBlockBase of this VPRegionBlock. \p
1677   /// ExitBlock must have no successors.
1678   void setExit(VPBlockBase *ExitBlock) {
1679     assert(ExitBlock->getSuccessors().empty() &&
1680            "Exit block cannot have successors.");
1681     Exit = ExitBlock;
1682     ExitBlock->setParent(this);
1683   }
1684 
1685   /// An indicator whether this region is to generate multiple replicated
1686   /// instances of output IR corresponding to its VPBlockBases.
1687   bool isReplicator() const { return IsReplicator; }
1688 
1689   /// The method which generates the output IR instructions that correspond to
1690   /// this VPRegionBlock, thereby "executing" the VPlan.
1691   void execute(struct VPTransformState *State) override;
1692 
1693   void dropAllReferences(VPValue *NewValue) override;
1694 
1695 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1696   /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
1697   /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
1698   /// consequtive numbers.
1699   ///
1700   /// Note that the numbering is applied to the whole VPlan, so printing
1701   /// individual regions is consistent with the whole VPlan printing.
1702   void print(raw_ostream &O, const Twine &Indent,
1703              VPSlotTracker &SlotTracker) const override;
1704   using VPBlockBase::print; // Get the print(raw_stream &O) version.
1705 #endif
1706 };
1707 
1708 //===----------------------------------------------------------------------===//
1709 // GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs     //
1710 //===----------------------------------------------------------------------===//
1711 
1712 // The following set of template specializations implement GraphTraits to treat
1713 // any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
1714 // that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
1715 // VPBlockBase is a VPRegionBlock, this specialization provides access to its
1716 // successors/predecessors but not to the blocks inside the region.
1717 
1718 template <> struct GraphTraits<VPBlockBase *> {
1719   using NodeRef = VPBlockBase *;
1720   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1721 
1722   static NodeRef getEntryNode(NodeRef N) { return N; }
1723 
1724   static inline ChildIteratorType child_begin(NodeRef N) {
1725     return N->getSuccessors().begin();
1726   }
1727 
1728   static inline ChildIteratorType child_end(NodeRef N) {
1729     return N->getSuccessors().end();
1730   }
1731 };
1732 
1733 template <> struct GraphTraits<const VPBlockBase *> {
1734   using NodeRef = const VPBlockBase *;
1735   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;
1736 
1737   static NodeRef getEntryNode(NodeRef N) { return N; }
1738 
1739   static inline ChildIteratorType child_begin(NodeRef N) {
1740     return N->getSuccessors().begin();
1741   }
1742 
1743   static inline ChildIteratorType child_end(NodeRef N) {
1744     return N->getSuccessors().end();
1745   }
1746 };
1747 
1748 // Inverse order specialization for VPBasicBlocks. Predecessors are used instead
1749 // of successors for the inverse traversal.
1750 template <> struct GraphTraits<Inverse<VPBlockBase *>> {
1751   using NodeRef = VPBlockBase *;
1752   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1753 
1754   static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }
1755 
1756   static inline ChildIteratorType child_begin(NodeRef N) {
1757     return N->getPredecessors().begin();
1758   }
1759 
1760   static inline ChildIteratorType child_end(NodeRef N) {
1761     return N->getPredecessors().end();
1762   }
1763 };
1764 
1765 // The following set of template specializations implement GraphTraits to
1766 // treat VPRegionBlock as a graph and recurse inside its nodes. It's important
1767 // to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
1768 // (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
1769 // there won't be automatic recursion into other VPBlockBases that turn to be
1770 // VPRegionBlocks.
1771 
1772 template <>
1773 struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
1774   using GraphRef = VPRegionBlock *;
1775   using nodes_iterator = df_iterator<NodeRef>;
1776 
1777   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1778 
1779   static nodes_iterator nodes_begin(GraphRef N) {
1780     return nodes_iterator::begin(N->getEntry());
1781   }
1782 
1783   static nodes_iterator nodes_end(GraphRef N) {
1784     // df_iterator::end() returns an empty iterator so the node used doesn't
1785     // matter.
1786     return nodes_iterator::end(N);
1787   }
1788 };
1789 
1790 template <>
1791 struct GraphTraits<const VPRegionBlock *>
1792     : public GraphTraits<const VPBlockBase *> {
1793   using GraphRef = const VPRegionBlock *;
1794   using nodes_iterator = df_iterator<NodeRef>;
1795 
1796   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1797 
1798   static nodes_iterator nodes_begin(GraphRef N) {
1799     return nodes_iterator::begin(N->getEntry());
1800   }
1801 
1802   static nodes_iterator nodes_end(GraphRef N) {
1803     // df_iterator::end() returns an empty iterator so the node used doesn't
1804     // matter.
1805     return nodes_iterator::end(N);
1806   }
1807 };
1808 
1809 template <>
1810 struct GraphTraits<Inverse<VPRegionBlock *>>
1811     : public GraphTraits<Inverse<VPBlockBase *>> {
1812   using GraphRef = VPRegionBlock *;
1813   using nodes_iterator = df_iterator<NodeRef>;
1814 
1815   static NodeRef getEntryNode(Inverse<GraphRef> N) {
1816     return N.Graph->getExit();
1817   }
1818 
1819   static nodes_iterator nodes_begin(GraphRef N) {
1820     return nodes_iterator::begin(N->getExit());
1821   }
1822 
1823   static nodes_iterator nodes_end(GraphRef N) {
1824     // df_iterator::end() returns an empty iterator so the node used doesn't
1825     // matter.
1826     return nodes_iterator::end(N);
1827   }
1828 };
1829 
1830 /// Iterator to traverse all successors of a VPBlockBase node. This includes the
1831 /// entry node of VPRegionBlocks. Exit blocks of a region implicitly have their
1832 /// parent region's successors. This ensures all blocks in a region are visited
1833 /// before any blocks in a successor region when doing a reverse post-order
1834 // traversal of the graph.
1835 template <typename BlockPtrTy>
1836 class VPAllSuccessorsIterator
1837     : public iterator_facade_base<VPAllSuccessorsIterator<BlockPtrTy>,
1838                                   std::forward_iterator_tag, VPBlockBase> {
1839   BlockPtrTy Block;
1840   /// Index of the current successor. For VPBasicBlock nodes, this simply is the
1841   /// index for the successor array. For VPRegionBlock, SuccessorIdx == 0 is
1842   /// used for the region's entry block, and SuccessorIdx - 1 are the indices
1843   /// for the successor array.
1844   size_t SuccessorIdx;
1845 
1846   static BlockPtrTy getBlockWithSuccs(BlockPtrTy Current) {
1847     while (Current && Current->getNumSuccessors() == 0)
1848       Current = Current->getParent();
1849     return Current;
1850   }
1851 
1852   /// Templated helper to dereference successor \p SuccIdx of \p Block. Used by
1853   /// both the const and non-const operator* implementations.
1854   template <typename T1> static T1 deref(T1 Block, unsigned SuccIdx) {
1855     if (auto *R = dyn_cast<VPRegionBlock>(Block)) {
1856       if (SuccIdx == 0)
1857         return R->getEntry();
1858       SuccIdx--;
1859     }
1860 
1861     // For exit blocks, use the next parent region with successors.
1862     return getBlockWithSuccs(Block)->getSuccessors()[SuccIdx];
1863   }
1864 
1865 public:
1866   VPAllSuccessorsIterator(BlockPtrTy Block, size_t Idx = 0)
1867       : Block(Block), SuccessorIdx(Idx) {}
1868   VPAllSuccessorsIterator(const VPAllSuccessorsIterator &Other)
1869       : Block(Other.Block), SuccessorIdx(Other.SuccessorIdx) {}
1870 
1871   VPAllSuccessorsIterator &operator=(const VPAllSuccessorsIterator &R) {
1872     Block = R.Block;
1873     SuccessorIdx = R.SuccessorIdx;
1874     return *this;
1875   }
1876 
1877   static VPAllSuccessorsIterator end(BlockPtrTy Block) {
1878     BlockPtrTy ParentWithSuccs = getBlockWithSuccs(Block);
1879     unsigned NumSuccessors = ParentWithSuccs
1880                                  ? ParentWithSuccs->getNumSuccessors()
1881                                  : Block->getNumSuccessors();
1882 
1883     if (auto *R = dyn_cast<VPRegionBlock>(Block))
1884       return {R, NumSuccessors + 1};
1885     return {Block, NumSuccessors};
1886   }
1887 
1888   bool operator==(const VPAllSuccessorsIterator &R) const {
1889     return Block == R.Block && SuccessorIdx == R.SuccessorIdx;
1890   }
1891 
1892   const VPBlockBase *operator*() const { return deref(Block, SuccessorIdx); }
1893 
1894   BlockPtrTy operator*() { return deref(Block, SuccessorIdx); }
1895 
1896   VPAllSuccessorsIterator &operator++() {
1897     SuccessorIdx++;
1898     return *this;
1899   }
1900 
1901   VPAllSuccessorsIterator operator++(int X) {
1902     VPAllSuccessorsIterator Orig = *this;
1903     SuccessorIdx++;
1904     return Orig;
1905   }
1906 };
1907 
1908 /// Helper for GraphTraits specialization that traverses through VPRegionBlocks.
1909 template <typename BlockTy> class VPBlockRecursiveTraversalWrapper {
1910   BlockTy Entry;
1911 
1912 public:
1913   VPBlockRecursiveTraversalWrapper(BlockTy Entry) : Entry(Entry) {}
1914   BlockTy getEntry() { return Entry; }
1915 };
1916 
1917 /// GraphTraits specialization to recursively traverse VPBlockBase nodes,
1918 /// including traversing through VPRegionBlocks.  Exit blocks of a region
1919 /// implicitly have their parent region's successors. This ensures all blocks in
1920 /// a region are visited before any blocks in a successor region when doing a
1921 /// reverse post-order traversal of the graph.
1922 template <>
1923 struct GraphTraits<VPBlockRecursiveTraversalWrapper<VPBlockBase *>> {
1924   using NodeRef = VPBlockBase *;
1925   using ChildIteratorType = VPAllSuccessorsIterator<VPBlockBase *>;
1926 
1927   static NodeRef
1928   getEntryNode(VPBlockRecursiveTraversalWrapper<VPBlockBase *> N) {
1929     return N.getEntry();
1930   }
1931 
1932   static inline ChildIteratorType child_begin(NodeRef N) {
1933     return ChildIteratorType(N);
1934   }
1935 
1936   static inline ChildIteratorType child_end(NodeRef N) {
1937     return ChildIteratorType::end(N);
1938   }
1939 };
1940 
1941 template <>
1942 struct GraphTraits<VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> {
1943   using NodeRef = const VPBlockBase *;
1944   using ChildIteratorType = VPAllSuccessorsIterator<const VPBlockBase *>;
1945 
1946   static NodeRef
1947   getEntryNode(VPBlockRecursiveTraversalWrapper<const VPBlockBase *> N) {
1948     return N.getEntry();
1949   }
1950 
1951   static inline ChildIteratorType child_begin(NodeRef N) {
1952     return ChildIteratorType(N);
1953   }
1954 
1955   static inline ChildIteratorType child_end(NodeRef N) {
1956     return ChildIteratorType::end(N);
1957   }
1958 };
1959 
1960 /// VPlan models a candidate for vectorization, encoding various decisions take
1961 /// to produce efficient output IR, including which branches, basic-blocks and
1962 /// output IR instructions to generate, and their cost. VPlan holds a
1963 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
1964 /// VPBlock.
1965 class VPlan {
1966   friend class VPlanPrinter;
1967   friend class VPSlotTracker;
1968 
1969   /// Hold the single entry to the Hierarchical CFG of the VPlan.
1970   VPBlockBase *Entry;
1971 
1972   /// Holds the VFs applicable to this VPlan.
1973   SmallSetVector<ElementCount, 2> VFs;
1974 
1975   /// Holds the name of the VPlan, for printing.
1976   std::string Name;
1977 
1978   /// Holds all the external definitions created for this VPlan.
1979   // TODO: Introduce a specific representation for external definitions in
1980   // VPlan. External definitions must be immutable and hold a pointer to its
1981   // underlying IR that will be used to implement its structural comparison
1982   // (operators '==' and '<').
1983   SetVector<VPValue *> VPExternalDefs;
1984 
1985   /// Represents the backedge taken count of the original loop, for folding
1986   /// the tail.
1987   VPValue *BackedgeTakenCount = nullptr;
1988 
1989   /// Holds a mapping between Values and their corresponding VPValue inside
1990   /// VPlan.
1991   Value2VPValueTy Value2VPValue;
1992 
1993   /// Contains all VPValues that been allocated by addVPValue directly and need
1994   /// to be free when the plan's destructor is called.
1995   SmallVector<VPValue *, 16> VPValuesToFree;
1996 
1997   /// Holds the VPLoopInfo analysis for this VPlan.
1998   VPLoopInfo VPLInfo;
1999 
2000 public:
2001   VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
2002     if (Entry)
2003       Entry->setPlan(this);
2004   }
2005 
2006   ~VPlan() {
2007     if (Entry) {
2008       VPValue DummyValue;
2009       for (VPBlockBase *Block : depth_first(Entry))
2010         Block->dropAllReferences(&DummyValue);
2011 
2012       VPBlockBase::deleteCFG(Entry);
2013     }
2014     for (VPValue *VPV : VPValuesToFree)
2015       delete VPV;
2016     if (BackedgeTakenCount)
2017       delete BackedgeTakenCount;
2018     for (VPValue *Def : VPExternalDefs)
2019       delete Def;
2020   }
2021 
2022   /// Generate the IR code for this VPlan.
2023   void execute(struct VPTransformState *State);
2024 
2025   VPBlockBase *getEntry() { return Entry; }
2026   const VPBlockBase *getEntry() const { return Entry; }
2027 
2028   VPBlockBase *setEntry(VPBlockBase *Block) {
2029     Entry = Block;
2030     Block->setPlan(this);
2031     return Entry;
2032   }
2033 
2034   /// The backedge taken count of the original loop.
2035   VPValue *getOrCreateBackedgeTakenCount() {
2036     if (!BackedgeTakenCount)
2037       BackedgeTakenCount = new VPValue();
2038     return BackedgeTakenCount;
2039   }
2040 
2041   void addVF(ElementCount VF) { VFs.insert(VF); }
2042 
2043   bool hasVF(ElementCount VF) { return VFs.count(VF); }
2044 
2045   const std::string &getName() const { return Name; }
2046 
2047   void setName(const Twine &newName) { Name = newName.str(); }
2048 
2049   /// Add \p VPVal to the pool of external definitions if it's not already
2050   /// in the pool.
2051   void addExternalDef(VPValue *VPVal) { VPExternalDefs.insert(VPVal); }
2052 
2053   void addVPValue(Value *V) {
2054     assert(V && "Trying to add a null Value to VPlan");
2055     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2056     VPValue *VPV = new VPValue(V);
2057     Value2VPValue[V] = VPV;
2058     VPValuesToFree.push_back(VPV);
2059   }
2060 
2061   void addVPValue(Value *V, VPValue *VPV) {
2062     assert(V && "Trying to add a null Value to VPlan");
2063     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2064     Value2VPValue[V] = VPV;
2065   }
2066 
2067   VPValue *getVPValue(Value *V) {
2068     assert(V && "Trying to get the VPValue of a null Value");
2069     assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2070     return Value2VPValue[V];
2071   }
2072 
2073   VPValue *getOrAddVPValue(Value *V) {
2074     assert(V && "Trying to get or add the VPValue of a null Value");
2075     if (!Value2VPValue.count(V))
2076       addVPValue(V);
2077     return getVPValue(V);
2078   }
2079 
2080   void removeVPValueFor(Value *V) { Value2VPValue.erase(V); }
2081 
2082   /// Return the VPLoopInfo analysis for this VPlan.
2083   VPLoopInfo &getVPLoopInfo() { return VPLInfo; }
2084   const VPLoopInfo &getVPLoopInfo() const { return VPLInfo; }
2085 
2086 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2087   /// Print this VPlan to \p O.
2088   void print(raw_ostream &O) const;
2089 
2090   /// Print this VPlan in DOT format to \p O.
2091   void printDOT(raw_ostream &O) const;
2092 
2093   /// Dump the plan to stderr (for debugging).
2094   LLVM_DUMP_METHOD void dump() const;
2095 #endif
2096 
2097   /// Returns a range mapping the values the range \p Operands to their
2098   /// corresponding VPValues.
2099   iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2100   mapToVPValues(User::op_range Operands) {
2101     std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2102       return getOrAddVPValue(Op);
2103     };
2104     return map_range(Operands, Fn);
2105   }
2106 
2107 private:
2108   /// Add to the given dominator tree the header block and every new basic block
2109   /// that was created between it and the latch block, inclusive.
2110   static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2111                                   BasicBlock *LoopPreHeaderBB,
2112                                   BasicBlock *LoopExitBB);
2113 };
2114 
2115 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2116 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2117 /// indented and follows the dot format.
2118 class VPlanPrinter {
2119   raw_ostream &OS;
2120   const VPlan &Plan;
2121   unsigned Depth = 0;
2122   unsigned TabWidth = 2;
2123   std::string Indent;
2124   unsigned BID = 0;
2125   SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2126 
2127   VPSlotTracker SlotTracker;
2128 
2129   /// Handle indentation.
2130   void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2131 
2132   /// Print a given \p Block of the Plan.
2133   void dumpBlock(const VPBlockBase *Block);
2134 
2135   /// Print the information related to the CFG edges going out of a given
2136   /// \p Block, followed by printing the successor blocks themselves.
2137   void dumpEdges(const VPBlockBase *Block);
2138 
2139   /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2140   /// its successor blocks.
2141   void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2142 
2143   /// Print a given \p Region of the Plan.
2144   void dumpRegion(const VPRegionBlock *Region);
2145 
2146   unsigned getOrCreateBID(const VPBlockBase *Block) {
2147     return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2148   }
2149 
2150   const Twine getOrCreateName(const VPBlockBase *Block);
2151 
2152   const Twine getUID(const VPBlockBase *Block);
2153 
2154   /// Print the information related to a CFG edge between two VPBlockBases.
2155   void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2156                 const Twine &Label);
2157 
2158 public:
2159   VPlanPrinter(raw_ostream &O, const VPlan &P)
2160       : OS(O), Plan(P), SlotTracker(&P) {}
2161 
2162   LLVM_DUMP_METHOD void dump();
2163 };
2164 
2165 struct VPlanIngredient {
2166   const Value *V;
2167 
2168   VPlanIngredient(const Value *V) : V(V) {}
2169 
2170   void print(raw_ostream &O) const;
2171 };
2172 
2173 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2174   I.print(OS);
2175   return OS;
2176 }
2177 
2178 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2179   Plan.print(OS);
2180   return OS;
2181 }
2182 #endif
2183 
2184 //===----------------------------------------------------------------------===//
2185 // VPlan Utilities
2186 //===----------------------------------------------------------------------===//
2187 
2188 /// Class that provides utilities for VPBlockBases in VPlan.
2189 class VPBlockUtils {
2190 public:
2191   VPBlockUtils() = delete;
2192 
2193   /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2194   /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2195   /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. If \p BlockPtr
2196   /// has more than one successor, its conditional bit is propagated to \p
2197   /// NewBlock. \p NewBlock must have neither successors nor predecessors.
2198   static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2199     assert(NewBlock->getSuccessors().empty() &&
2200            "Can't insert new block with successors.");
2201     // TODO: move successors from BlockPtr to NewBlock when this functionality
2202     // is necessary. For now, setBlockSingleSuccessor will assert if BlockPtr
2203     // already has successors.
2204     BlockPtr->setOneSuccessor(NewBlock);
2205     NewBlock->setPredecessors({BlockPtr});
2206     NewBlock->setParent(BlockPtr->getParent());
2207   }
2208 
2209   /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2210   /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2211   /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2212   /// parent to \p IfTrue and \p IfFalse. \p Condition is set as the successor
2213   /// selector. \p BlockPtr must have no successors and \p IfTrue and \p IfFalse
2214   /// must have neither successors nor predecessors.
2215   static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2216                                    VPValue *Condition, VPBlockBase *BlockPtr) {
2217     assert(IfTrue->getSuccessors().empty() &&
2218            "Can't insert IfTrue with successors.");
2219     assert(IfFalse->getSuccessors().empty() &&
2220            "Can't insert IfFalse with successors.");
2221     BlockPtr->setTwoSuccessors(IfTrue, IfFalse, Condition);
2222     IfTrue->setPredecessors({BlockPtr});
2223     IfFalse->setPredecessors({BlockPtr});
2224     IfTrue->setParent(BlockPtr->getParent());
2225     IfFalse->setParent(BlockPtr->getParent());
2226   }
2227 
2228   /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2229   /// the successors of \p From and \p From to the predecessors of \p To. Both
2230   /// VPBlockBases must have the same parent, which can be null. Both
2231   /// VPBlockBases can be already connected to other VPBlockBases.
2232   static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2233     assert((From->getParent() == To->getParent()) &&
2234            "Can't connect two block with different parents");
2235     assert(From->getNumSuccessors() < 2 &&
2236            "Blocks can't have more than two successors.");
2237     From->appendSuccessor(To);
2238     To->appendPredecessor(From);
2239   }
2240 
2241   /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2242   /// from the successors of \p From and \p From from the predecessors of \p To.
2243   static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2244     assert(To && "Successor to disconnect is null.");
2245     From->removeSuccessor(To);
2246     To->removePredecessor(From);
2247   }
2248 
2249   /// Returns true if the edge \p FromBlock -> \p ToBlock is a back-edge.
2250   static bool isBackEdge(const VPBlockBase *FromBlock,
2251                          const VPBlockBase *ToBlock, const VPLoopInfo *VPLI) {
2252     assert(FromBlock->getParent() == ToBlock->getParent() &&
2253            FromBlock->getParent() && "Must be in same region");
2254     const VPLoop *FromLoop = VPLI->getLoopFor(FromBlock);
2255     const VPLoop *ToLoop = VPLI->getLoopFor(ToBlock);
2256     if (!FromLoop || !ToLoop || FromLoop != ToLoop)
2257       return false;
2258 
2259     // A back-edge is a branch from the loop latch to its header.
2260     return ToLoop->isLoopLatch(FromBlock) && ToBlock == ToLoop->getHeader();
2261   }
2262 
2263   /// Returns true if \p Block is a loop latch
2264   static bool blockIsLoopLatch(const VPBlockBase *Block,
2265                                const VPLoopInfo *VPLInfo) {
2266     if (const VPLoop *ParentVPL = VPLInfo->getLoopFor(Block))
2267       return ParentVPL->isLoopLatch(Block);
2268 
2269     return false;
2270   }
2271 
2272   /// Count and return the number of succesors of \p PredBlock excluding any
2273   /// backedges.
2274   static unsigned countSuccessorsNoBE(VPBlockBase *PredBlock,
2275                                       VPLoopInfo *VPLI) {
2276     unsigned Count = 0;
2277     for (VPBlockBase *SuccBlock : PredBlock->getSuccessors()) {
2278       if (!VPBlockUtils::isBackEdge(PredBlock, SuccBlock, VPLI))
2279         Count++;
2280     }
2281     return Count;
2282   }
2283 
2284   /// Return an iterator range over \p Range which only includes \p BlockTy
2285   /// blocks. The accesses are casted to \p BlockTy.
2286   template <typename BlockTy, typename T>
2287   static auto blocksOnly(const T &Range) {
2288     // Create BaseTy with correct const-ness based on BlockTy.
2289     using BaseTy =
2290         typename std::conditional<std::is_const<BlockTy>::value,
2291                                   const VPBlockBase, VPBlockBase>::type;
2292 
2293     // We need to first create an iterator range over (const) BlocktTy & instead
2294     // of (const) BlockTy * for filter_range to work properly.
2295     auto Mapped =
2296         map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2297     auto Filter = make_filter_range(
2298         Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2299     return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2300       return cast<BlockTy>(&Block);
2301     });
2302   }
2303 };
2304 
2305 class VPInterleavedAccessInfo {
2306   DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2307       InterleaveGroupMap;
2308 
2309   /// Type for mapping of instruction based interleave groups to VPInstruction
2310   /// interleave groups
2311   using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2312                              InterleaveGroup<VPInstruction> *>;
2313 
2314   /// Recursively \p Region and populate VPlan based interleave groups based on
2315   /// \p IAI.
2316   void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2317                    InterleavedAccessInfo &IAI);
2318   /// Recursively traverse \p Block and populate VPlan based interleave groups
2319   /// based on \p IAI.
2320   void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
2321                   InterleavedAccessInfo &IAI);
2322 
2323 public:
2324   VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
2325 
2326   ~VPInterleavedAccessInfo() {
2327     SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
2328     // Avoid releasing a pointer twice.
2329     for (auto &I : InterleaveGroupMap)
2330       DelSet.insert(I.second);
2331     for (auto *Ptr : DelSet)
2332       delete Ptr;
2333   }
2334 
2335   /// Get the interleave group that \p Instr belongs to.
2336   ///
2337   /// \returns nullptr if doesn't have such group.
2338   InterleaveGroup<VPInstruction> *
2339   getInterleaveGroup(VPInstruction *Instr) const {
2340     return InterleaveGroupMap.lookup(Instr);
2341   }
2342 };
2343 
2344 /// Class that maps (parts of) an existing VPlan to trees of combined
2345 /// VPInstructions.
2346 class VPlanSlp {
2347   enum class OpMode { Failed, Load, Opcode };
2348 
2349   /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
2350   /// DenseMap keys.
2351   struct BundleDenseMapInfo {
2352     static SmallVector<VPValue *, 4> getEmptyKey() {
2353       return {reinterpret_cast<VPValue *>(-1)};
2354     }
2355 
2356     static SmallVector<VPValue *, 4> getTombstoneKey() {
2357       return {reinterpret_cast<VPValue *>(-2)};
2358     }
2359 
2360     static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
2361       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2362     }
2363 
2364     static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
2365                         const SmallVector<VPValue *, 4> &RHS) {
2366       return LHS == RHS;
2367     }
2368   };
2369 
2370   /// Mapping of values in the original VPlan to a combined VPInstruction.
2371   DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
2372       BundleToCombined;
2373 
2374   VPInterleavedAccessInfo &IAI;
2375 
2376   /// Basic block to operate on. For now, only instructions in a single BB are
2377   /// considered.
2378   const VPBasicBlock &BB;
2379 
2380   /// Indicates whether we managed to combine all visited instructions or not.
2381   bool CompletelySLP = true;
2382 
2383   /// Width of the widest combined bundle in bits.
2384   unsigned WidestBundleBits = 0;
2385 
2386   using MultiNodeOpTy =
2387       typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
2388 
2389   // Input operand bundles for the current multi node. Each multi node operand
2390   // bundle contains values not matching the multi node's opcode. They will
2391   // be reordered in reorderMultiNodeOps, once we completed building a
2392   // multi node.
2393   SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
2394 
2395   /// Indicates whether we are building a multi node currently.
2396   bool MultiNodeActive = false;
2397 
2398   /// Check if we can vectorize Operands together.
2399   bool areVectorizable(ArrayRef<VPValue *> Operands) const;
2400 
2401   /// Add combined instruction \p New for the bundle \p Operands.
2402   void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
2403 
2404   /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
2405   VPInstruction *markFailed();
2406 
2407   /// Reorder operands in the multi node to maximize sequential memory access
2408   /// and commutative operations.
2409   SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
2410 
2411   /// Choose the best candidate to use for the lane after \p Last. The set of
2412   /// candidates to choose from are values with an opcode matching \p Last's
2413   /// or loads consecutive to \p Last.
2414   std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
2415                                        SmallPtrSetImpl<VPValue *> &Candidates,
2416                                        VPInterleavedAccessInfo &IAI);
2417 
2418 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2419   /// Print bundle \p Values to dbgs().
2420   void dumpBundle(ArrayRef<VPValue *> Values);
2421 #endif
2422 
2423 public:
2424   VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
2425 
2426   ~VPlanSlp() = default;
2427 
2428   /// Tries to build an SLP tree rooted at \p Operands and returns a
2429   /// VPInstruction combining \p Operands, if they can be combined.
2430   VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
2431 
2432   /// Return the width of the widest combined bundle in bits.
2433   unsigned getWidestBundleBits() const { return WidestBundleBits; }
2434 
2435   /// Return true if all visited instruction can be combined.
2436   bool isCompletelySLP() const { return CompletelySLP; }
2437 };
2438 } // end namespace llvm
2439 
2440 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
2441