1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 /// VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
14 /// treated as proper graphs for generic algorithms;
15 /// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
16 /// within VPBasicBlocks;
17 /// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
18 /// instruction;
19 /// 5. The VPlan class holding a candidate for vectorization;
20 /// 6. The VPlanPrinter class providing a way to print a plan in dot format;
21 /// These are documented in docs/VectorizationPlan.rst.
22 //
23 //===----------------------------------------------------------------------===//
24
25 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
26 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
27
28 #include "VPlanValue.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/DepthFirstIterator.h"
31 #include "llvm/ADT/GraphTraits.h"
32 #include "llvm/ADT/MapVector.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/SmallBitVector.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallVector.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/ADT/ilist.h"
39 #include "llvm/ADT/ilist_node.h"
40 #include "llvm/Analysis/LoopInfo.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/FMF.h"
44 #include "llvm/Transforms/Utils/LoopVersioning.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstddef>
48 #include <string>
49
50 namespace llvm {
51
52 class BasicBlock;
53 class DominatorTree;
54 class InductionDescriptor;
55 class InnerLoopVectorizer;
56 class IRBuilderBase;
57 class LoopInfo;
58 class raw_ostream;
59 class RecurrenceDescriptor;
60 class Value;
61 class VPBasicBlock;
62 class VPRegionBlock;
63 class VPlan;
64 class VPReplicateRecipe;
65 class VPlanSlp;
66
67 /// Returns a calculation for the total number of elements for a given \p VF.
68 /// For fixed width vectors this value is a constant, whereas for scalable
69 /// vectors it is an expression determined at runtime.
70 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF);
71
72 /// Return a value for Step multiplied by VF.
73 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
74 int64_t Step);
75
76 /// A range of powers-of-2 vectorization factors with fixed start and
77 /// adjustable end. The range includes start and excludes end, e.g.,:
78 /// [1, 9) = {1, 2, 4, 8}
79 struct VFRange {
80 // A power of 2.
81 const ElementCount Start;
82
83 // Need not be a power of 2. If End <= Start range is empty.
84 ElementCount End;
85
isEmptyVFRange86 bool isEmpty() const {
87 return End.getKnownMinValue() <= Start.getKnownMinValue();
88 }
89
VFRangeVFRange90 VFRange(const ElementCount &Start, const ElementCount &End)
91 : Start(Start), End(End) {
92 assert(Start.isScalable() == End.isScalable() &&
93 "Both Start and End should have the same scalable flag");
94 assert(isPowerOf2_32(Start.getKnownMinValue()) &&
95 "Expected Start to be a power of 2");
96 }
97 };
98
99 using VPlanPtr = std::unique_ptr<VPlan>;
100
101 /// In what follows, the term "input IR" refers to code that is fed into the
102 /// vectorizer whereas the term "output IR" refers to code that is generated by
103 /// the vectorizer.
104
105 /// VPLane provides a way to access lanes in both fixed width and scalable
106 /// vectors, where for the latter the lane index sometimes needs calculating
107 /// as a runtime expression.
108 class VPLane {
109 public:
110 /// Kind describes how to interpret Lane.
111 enum class Kind : uint8_t {
112 /// For First, Lane is the index into the first N elements of a
113 /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
114 First,
115 /// For ScalableLast, Lane is the offset from the start of the last
116 /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
117 /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
118 /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
119 ScalableLast
120 };
121
122 private:
123 /// in [0..VF)
124 unsigned Lane;
125
126 /// Indicates how the Lane should be interpreted, as described above.
127 Kind LaneKind;
128
129 public:
VPLane(unsigned Lane,Kind LaneKind)130 VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
131
getFirstLane()132 static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
133
getLastLaneForVF(const ElementCount & VF)134 static VPLane getLastLaneForVF(const ElementCount &VF) {
135 unsigned LaneOffset = VF.getKnownMinValue() - 1;
136 Kind LaneKind;
137 if (VF.isScalable())
138 // In this case 'LaneOffset' refers to the offset from the start of the
139 // last subvector with VF.getKnownMinValue() elements.
140 LaneKind = VPLane::Kind::ScalableLast;
141 else
142 LaneKind = VPLane::Kind::First;
143 return VPLane(LaneOffset, LaneKind);
144 }
145
146 /// Returns a compile-time known value for the lane index and asserts if the
147 /// lane can only be calculated at runtime.
getKnownLane()148 unsigned getKnownLane() const {
149 assert(LaneKind == Kind::First);
150 return Lane;
151 }
152
153 /// Returns an expression describing the lane index that can be used at
154 /// runtime.
155 Value *getAsRuntimeExpr(IRBuilderBase &Builder, const ElementCount &VF) const;
156
157 /// Returns the Kind of lane offset.
getKind()158 Kind getKind() const { return LaneKind; }
159
160 /// Returns true if this is the first lane of the whole vector.
isFirstLane()161 bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
162
163 /// Maps the lane to a cache index based on \p VF.
mapToCacheIndex(const ElementCount & VF)164 unsigned mapToCacheIndex(const ElementCount &VF) const {
165 switch (LaneKind) {
166 case VPLane::Kind::ScalableLast:
167 assert(VF.isScalable() && Lane < VF.getKnownMinValue());
168 return VF.getKnownMinValue() + Lane;
169 default:
170 assert(Lane < VF.getKnownMinValue());
171 return Lane;
172 }
173 }
174
175 /// Returns the maxmimum number of lanes that we are able to consider
176 /// caching for \p VF.
getNumCachedLanes(const ElementCount & VF)177 static unsigned getNumCachedLanes(const ElementCount &VF) {
178 return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
179 }
180 };
181
182 /// VPIteration represents a single point in the iteration space of the output
183 /// (vectorized and/or unrolled) IR loop.
184 struct VPIteration {
185 /// in [0..UF)
186 unsigned Part;
187
188 VPLane Lane;
189
190 VPIteration(unsigned Part, unsigned Lane,
191 VPLane::Kind Kind = VPLane::Kind::First)
PartVPIteration192 : Part(Part), Lane(Lane, Kind) {}
193
VPIterationVPIteration194 VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
195
isFirstIterationVPIteration196 bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
197 };
198
199 /// VPTransformState holds information passed down when "executing" a VPlan,
200 /// needed for generating the output IR.
201 struct VPTransformState {
VPTransformStateVPTransformState202 VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
203 DominatorTree *DT, IRBuilderBase &Builder,
204 InnerLoopVectorizer *ILV, VPlan *Plan)
205 : VF(VF), UF(UF), LI(LI), DT(DT), Builder(Builder), ILV(ILV), Plan(Plan),
206 LVer(nullptr) {}
207
208 /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
209 ElementCount VF;
210 unsigned UF;
211
212 /// Hold the indices to generate specific scalar instructions. Null indicates
213 /// that all instances are to be generated, using either scalar or vector
214 /// instructions.
215 Optional<VPIteration> Instance;
216
217 struct DataState {
218 /// A type for vectorized values in the new loop. Each value from the
219 /// original loop, when vectorized, is represented by UF vector values in
220 /// the new unrolled loop, where UF is the unroll factor.
221 typedef SmallVector<Value *, 2> PerPartValuesTy;
222
223 DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
224
225 using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
226 DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
227 } Data;
228
229 /// Get the generated Value for a given VPValue and a given Part. Note that
230 /// as some Defs are still created by ILV and managed in its ValueMap, this
231 /// method will delegate the call to ILV in such cases in order to provide
232 /// callers a consistent API.
233 /// \see set.
234 Value *get(VPValue *Def, unsigned Part);
235
236 /// Get the generated Value for a given VPValue and given Part and Lane.
237 Value *get(VPValue *Def, const VPIteration &Instance);
238
hasVectorValueVPTransformState239 bool hasVectorValue(VPValue *Def, unsigned Part) {
240 auto I = Data.PerPartOutput.find(Def);
241 return I != Data.PerPartOutput.end() && Part < I->second.size() &&
242 I->second[Part];
243 }
244
hasAnyVectorValueVPTransformState245 bool hasAnyVectorValue(VPValue *Def) const {
246 return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
247 }
248
hasScalarValueVPTransformState249 bool hasScalarValue(VPValue *Def, VPIteration Instance) {
250 auto I = Data.PerPartScalars.find(Def);
251 if (I == Data.PerPartScalars.end())
252 return false;
253 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
254 return Instance.Part < I->second.size() &&
255 CacheIdx < I->second[Instance.Part].size() &&
256 I->second[Instance.Part][CacheIdx];
257 }
258
259 /// Set the generated Value for a given VPValue and a given Part.
setVPTransformState260 void set(VPValue *Def, Value *V, unsigned Part) {
261 if (!Data.PerPartOutput.count(Def)) {
262 DataState::PerPartValuesTy Entry(UF);
263 Data.PerPartOutput[Def] = Entry;
264 }
265 Data.PerPartOutput[Def][Part] = V;
266 }
267 /// Reset an existing vector value for \p Def and a given \p Part.
resetVPTransformState268 void reset(VPValue *Def, Value *V, unsigned Part) {
269 auto Iter = Data.PerPartOutput.find(Def);
270 assert(Iter != Data.PerPartOutput.end() &&
271 "need to overwrite existing value");
272 Iter->second[Part] = V;
273 }
274
275 /// Set the generated scalar \p V for \p Def and the given \p Instance.
setVPTransformState276 void set(VPValue *Def, Value *V, const VPIteration &Instance) {
277 auto Iter = Data.PerPartScalars.insert({Def, {}});
278 auto &PerPartVec = Iter.first->second;
279 while (PerPartVec.size() <= Instance.Part)
280 PerPartVec.emplace_back();
281 auto &Scalars = PerPartVec[Instance.Part];
282 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
283 while (Scalars.size() <= CacheIdx)
284 Scalars.push_back(nullptr);
285 assert(!Scalars[CacheIdx] && "should overwrite existing value");
286 Scalars[CacheIdx] = V;
287 }
288
289 /// Reset an existing scalar value for \p Def and a given \p Instance.
resetVPTransformState290 void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
291 auto Iter = Data.PerPartScalars.find(Def);
292 assert(Iter != Data.PerPartScalars.end() &&
293 "need to overwrite existing value");
294 assert(Instance.Part < Iter->second.size() &&
295 "need to overwrite existing value");
296 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
297 assert(CacheIdx < Iter->second[Instance.Part].size() &&
298 "need to overwrite existing value");
299 Iter->second[Instance.Part][CacheIdx] = V;
300 }
301
302 /// Add additional metadata to \p To that was not present on \p Orig.
303 ///
304 /// Currently this is used to add the noalias annotations based on the
305 /// inserted memchecks. Use this for instructions that are *cloned* into the
306 /// vector loop.
307 void addNewMetadata(Instruction *To, const Instruction *Orig);
308
309 /// Add metadata from one instruction to another.
310 ///
311 /// This includes both the original MDs from \p From and additional ones (\see
312 /// addNewMetadata). Use this for *newly created* instructions in the vector
313 /// loop.
314 void addMetadata(Instruction *To, Instruction *From);
315
316 /// Similar to the previous function but it adds the metadata to a
317 /// vector of instructions.
318 void addMetadata(ArrayRef<Value *> To, Instruction *From);
319
320 /// Set the debug location in the builder using the debug location in \p V.
321 void setDebugLocFromInst(const Value *V);
322
323 /// Hold state information used when constructing the CFG of the output IR,
324 /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
325 struct CFGState {
326 /// The previous VPBasicBlock visited. Initially set to null.
327 VPBasicBlock *PrevVPBB = nullptr;
328
329 /// The previous IR BasicBlock created or used. Initially set to the new
330 /// header BasicBlock.
331 BasicBlock *PrevBB = nullptr;
332
333 /// The last IR BasicBlock in the output IR. Set to the exit block of the
334 /// vector loop.
335 BasicBlock *ExitBB = nullptr;
336
337 /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
338 /// of replication, maps the BasicBlock of the last replica created.
339 SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
340
341 CFGState() = default;
342
343 /// Returns the BasicBlock* mapped to the pre-header of the loop region
344 /// containing \p R.
345 BasicBlock *getPreheaderBBFor(VPRecipeBase *R);
346 } CFG;
347
348 /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
349 LoopInfo *LI;
350
351 /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
352 DominatorTree *DT;
353
354 /// Hold a reference to the IRBuilder used to generate output IR code.
355 IRBuilderBase &Builder;
356
357 VPValue2ValueTy VPValue2Value;
358
359 /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
360 Value *CanonicalIV = nullptr;
361
362 /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
363 InnerLoopVectorizer *ILV;
364
365 /// Pointer to the VPlan code is generated for.
366 VPlan *Plan;
367
368 /// Holds recipes that may generate a poison value that is used after
369 /// vectorization, even when their operands are not poison.
370 SmallPtrSet<VPRecipeBase *, 16> MayGeneratePoisonRecipes;
371
372 /// The loop object for the current parent region, or nullptr.
373 Loop *CurrentVectorLoop = nullptr;
374
375 /// LoopVersioning. It's only set up (non-null) if memchecks were
376 /// used.
377 ///
378 /// This is currently only used to add no-alias metadata based on the
379 /// memchecks. The actually versioning is performed manually.
380 std::unique_ptr<LoopVersioning> LVer;
381 };
382
383 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
384 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
385 class VPBlockBase {
386 friend class VPBlockUtils;
387
388 const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
389
390 /// An optional name for the block.
391 std::string Name;
392
393 /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
394 /// it is a topmost VPBlockBase.
395 VPRegionBlock *Parent = nullptr;
396
397 /// List of predecessor blocks.
398 SmallVector<VPBlockBase *, 1> Predecessors;
399
400 /// List of successor blocks.
401 SmallVector<VPBlockBase *, 1> Successors;
402
403 /// VPlan containing the block. Can only be set on the entry block of the
404 /// plan.
405 VPlan *Plan = nullptr;
406
407 /// Add \p Successor as the last successor to this block.
appendSuccessor(VPBlockBase * Successor)408 void appendSuccessor(VPBlockBase *Successor) {
409 assert(Successor && "Cannot add nullptr successor!");
410 Successors.push_back(Successor);
411 }
412
413 /// Add \p Predecessor as the last predecessor to this block.
appendPredecessor(VPBlockBase * Predecessor)414 void appendPredecessor(VPBlockBase *Predecessor) {
415 assert(Predecessor && "Cannot add nullptr predecessor!");
416 Predecessors.push_back(Predecessor);
417 }
418
419 /// Remove \p Predecessor from the predecessors of this block.
removePredecessor(VPBlockBase * Predecessor)420 void removePredecessor(VPBlockBase *Predecessor) {
421 auto Pos = find(Predecessors, Predecessor);
422 assert(Pos && "Predecessor does not exist");
423 Predecessors.erase(Pos);
424 }
425
426 /// Remove \p Successor from the successors of this block.
removeSuccessor(VPBlockBase * Successor)427 void removeSuccessor(VPBlockBase *Successor) {
428 auto Pos = find(Successors, Successor);
429 assert(Pos && "Successor does not exist");
430 Successors.erase(Pos);
431 }
432
433 protected:
VPBlockBase(const unsigned char SC,const std::string & N)434 VPBlockBase(const unsigned char SC, const std::string &N)
435 : SubclassID(SC), Name(N) {}
436
437 public:
438 /// An enumeration for keeping track of the concrete subclass of VPBlockBase
439 /// that are actually instantiated. Values of this enumeration are kept in the
440 /// SubclassID field of the VPBlockBase objects. They are used for concrete
441 /// type identification.
442 using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
443
444 using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
445
446 virtual ~VPBlockBase() = default;
447
getName()448 const std::string &getName() const { return Name; }
449
setName(const Twine & newName)450 void setName(const Twine &newName) { Name = newName.str(); }
451
452 /// \return an ID for the concrete type of this object.
453 /// This is used to implement the classof checks. This should not be used
454 /// for any other purpose, as the values may change as LLVM evolves.
getVPBlockID()455 unsigned getVPBlockID() const { return SubclassID; }
456
getParent()457 VPRegionBlock *getParent() { return Parent; }
getParent()458 const VPRegionBlock *getParent() const { return Parent; }
459
460 /// \return A pointer to the plan containing the current block.
461 VPlan *getPlan();
462 const VPlan *getPlan() const;
463
464 /// Sets the pointer of the plan containing the block. The block must be the
465 /// entry block into the VPlan.
466 void setPlan(VPlan *ParentPlan);
467
setParent(VPRegionBlock * P)468 void setParent(VPRegionBlock *P) { Parent = P; }
469
470 /// \return the VPBasicBlock that is the entry of this VPBlockBase,
471 /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
472 /// VPBlockBase is a VPBasicBlock, it is returned.
473 const VPBasicBlock *getEntryBasicBlock() const;
474 VPBasicBlock *getEntryBasicBlock();
475
476 /// \return the VPBasicBlock that is the exiting this VPBlockBase,
477 /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
478 /// VPBlockBase is a VPBasicBlock, it is returned.
479 const VPBasicBlock *getExitingBasicBlock() const;
480 VPBasicBlock *getExitingBasicBlock();
481
getSuccessors()482 const VPBlocksTy &getSuccessors() const { return Successors; }
getSuccessors()483 VPBlocksTy &getSuccessors() { return Successors; }
484
successors()485 iterator_range<VPBlockBase **> successors() { return Successors; }
486
getPredecessors()487 const VPBlocksTy &getPredecessors() const { return Predecessors; }
getPredecessors()488 VPBlocksTy &getPredecessors() { return Predecessors; }
489
490 /// \return the successor of this VPBlockBase if it has a single successor.
491 /// Otherwise return a null pointer.
getSingleSuccessor()492 VPBlockBase *getSingleSuccessor() const {
493 return (Successors.size() == 1 ? *Successors.begin() : nullptr);
494 }
495
496 /// \return the predecessor of this VPBlockBase if it has a single
497 /// predecessor. Otherwise return a null pointer.
getSinglePredecessor()498 VPBlockBase *getSinglePredecessor() const {
499 return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
500 }
501
getNumSuccessors()502 size_t getNumSuccessors() const { return Successors.size(); }
getNumPredecessors()503 size_t getNumPredecessors() const { return Predecessors.size(); }
504
505 /// An Enclosing Block of a block B is any block containing B, including B
506 /// itself. \return the closest enclosing block starting from "this", which
507 /// has successors. \return the root enclosing block if all enclosing blocks
508 /// have no successors.
509 VPBlockBase *getEnclosingBlockWithSuccessors();
510
511 /// \return the closest enclosing block starting from "this", which has
512 /// predecessors. \return the root enclosing block if all enclosing blocks
513 /// have no predecessors.
514 VPBlockBase *getEnclosingBlockWithPredecessors();
515
516 /// \return the successors either attached directly to this VPBlockBase or, if
517 /// this VPBlockBase is the exit block of a VPRegionBlock and has no
518 /// successors of its own, search recursively for the first enclosing
519 /// VPRegionBlock that has successors and return them. If no such
520 /// VPRegionBlock exists, return the (empty) successors of the topmost
521 /// VPBlockBase reached.
getHierarchicalSuccessors()522 const VPBlocksTy &getHierarchicalSuccessors() {
523 return getEnclosingBlockWithSuccessors()->getSuccessors();
524 }
525
526 /// \return the hierarchical successor of this VPBlockBase if it has a single
527 /// hierarchical successor. Otherwise return a null pointer.
getSingleHierarchicalSuccessor()528 VPBlockBase *getSingleHierarchicalSuccessor() {
529 return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
530 }
531
532 /// \return the predecessors either attached directly to this VPBlockBase or,
533 /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
534 /// predecessors of its own, search recursively for the first enclosing
535 /// VPRegionBlock that has predecessors and return them. If no such
536 /// VPRegionBlock exists, return the (empty) predecessors of the topmost
537 /// VPBlockBase reached.
getHierarchicalPredecessors()538 const VPBlocksTy &getHierarchicalPredecessors() {
539 return getEnclosingBlockWithPredecessors()->getPredecessors();
540 }
541
542 /// \return the hierarchical predecessor of this VPBlockBase if it has a
543 /// single hierarchical predecessor. Otherwise return a null pointer.
getSingleHierarchicalPredecessor()544 VPBlockBase *getSingleHierarchicalPredecessor() {
545 return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
546 }
547
548 /// Set a given VPBlockBase \p Successor as the single successor of this
549 /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
550 /// This VPBlockBase must have no successors.
setOneSuccessor(VPBlockBase * Successor)551 void setOneSuccessor(VPBlockBase *Successor) {
552 assert(Successors.empty() && "Setting one successor when others exist.");
553 appendSuccessor(Successor);
554 }
555
556 /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
557 /// successors of this VPBlockBase. This VPBlockBase is not added as
558 /// predecessor of \p IfTrue or \p IfFalse. This VPBlockBase must have no
559 /// successors.
setTwoSuccessors(VPBlockBase * IfTrue,VPBlockBase * IfFalse)560 void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse) {
561 assert(Successors.empty() && "Setting two successors when others exist.");
562 appendSuccessor(IfTrue);
563 appendSuccessor(IfFalse);
564 }
565
566 /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
567 /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
568 /// as successor of any VPBasicBlock in \p NewPreds.
setPredecessors(ArrayRef<VPBlockBase * > NewPreds)569 void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
570 assert(Predecessors.empty() && "Block predecessors already set.");
571 for (auto *Pred : NewPreds)
572 appendPredecessor(Pred);
573 }
574
575 /// Remove all the predecessor of this block.
clearPredecessors()576 void clearPredecessors() { Predecessors.clear(); }
577
578 /// Remove all the successors of this block.
clearSuccessors()579 void clearSuccessors() { Successors.clear(); }
580
581 /// The method which generates the output IR that correspond to this
582 /// VPBlockBase, thereby "executing" the VPlan.
583 virtual void execute(struct VPTransformState *State) = 0;
584
585 /// Delete all blocks reachable from a given VPBlockBase, inclusive.
586 static void deleteCFG(VPBlockBase *Entry);
587
588 /// Return true if it is legal to hoist instructions into this block.
isLegalToHoistInto()589 bool isLegalToHoistInto() {
590 // There are currently no constraints that prevent an instruction to be
591 // hoisted into a VPBlockBase.
592 return true;
593 }
594
595 /// Replace all operands of VPUsers in the block with \p NewValue and also
596 /// replaces all uses of VPValues defined in the block with NewValue.
597 virtual void dropAllReferences(VPValue *NewValue) = 0;
598
599 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
printAsOperand(raw_ostream & OS,bool PrintType)600 void printAsOperand(raw_ostream &OS, bool PrintType) const {
601 OS << getName();
602 }
603
604 /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
605 /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
606 /// consequtive numbers.
607 ///
608 /// Note that the numbering is applied to the whole VPlan, so printing
609 /// individual blocks is consistent with the whole VPlan printing.
610 virtual void print(raw_ostream &O, const Twine &Indent,
611 VPSlotTracker &SlotTracker) const = 0;
612
613 /// Print plain-text dump of this VPlan to \p O.
print(raw_ostream & O)614 void print(raw_ostream &O) const {
615 VPSlotTracker SlotTracker(getPlan());
616 print(O, "", SlotTracker);
617 }
618
619 /// Print the successors of this block to \p O, prefixing all lines with \p
620 /// Indent.
621 void printSuccessors(raw_ostream &O, const Twine &Indent) const;
622
623 /// Dump this VPBlockBase to dbgs().
dump()624 LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
625 #endif
626 };
627
628 /// A value that is used outside the VPlan. The operand of the user needs to be
629 /// added to the associated LCSSA phi node.
630 class VPLiveOut : public VPUser {
631 PHINode *Phi;
632
633 public:
VPLiveOut(PHINode * Phi,VPValue * Op)634 VPLiveOut(PHINode *Phi, VPValue *Op)
635 : VPUser({Op}, VPUser::VPUserID::LiveOut), Phi(Phi) {}
636
637 /// Fixup the wrapped LCSSA phi node in the unique exit block. This simply
638 /// means we need to add the appropriate incoming value from the middle
639 /// block as exiting edges from the scalar epilogue loop (if present) are
640 /// already in place, and we exit the vector loop exclusively to the middle
641 /// block.
642 void fixPhi(VPlan &Plan, VPTransformState &State);
643
644 /// Returns true if the VPLiveOut uses scalars of operand \p Op.
usesScalars(const VPValue * Op)645 bool usesScalars(const VPValue *Op) const override {
646 assert(is_contained(operands(), Op) &&
647 "Op must be an operand of the recipe");
648 return true;
649 }
650
getPhi()651 PHINode *getPhi() const { return Phi; }
652 };
653
654 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
655 /// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
656 /// and is responsible for deleting its defined values. Single-value
657 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
658 /// VPRecipeBase before VPValue.
659 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
660 public VPDef,
661 public VPUser {
662 friend VPBasicBlock;
663 friend class VPBlockUtils;
664
665 /// Each VPRecipe belongs to a single VPBasicBlock.
666 VPBasicBlock *Parent = nullptr;
667
668 public:
VPRecipeBase(const unsigned char SC,ArrayRef<VPValue * > Operands)669 VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
670 : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
671
672 template <typename IterT>
VPRecipeBase(const unsigned char SC,iterator_range<IterT> Operands)673 VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
674 : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
675 virtual ~VPRecipeBase() = default;
676
677 /// \return the VPBasicBlock which this VPRecipe belongs to.
getParent()678 VPBasicBlock *getParent() { return Parent; }
getParent()679 const VPBasicBlock *getParent() const { return Parent; }
680
681 /// The method which generates the output IR instructions that correspond to
682 /// this VPRecipe, thereby "executing" the VPlan.
683 virtual void execute(struct VPTransformState &State) = 0;
684
685 /// Insert an unlinked recipe into a basic block immediately before
686 /// the specified recipe.
687 void insertBefore(VPRecipeBase *InsertPos);
688 /// Insert an unlinked recipe into \p BB immediately before the insertion
689 /// point \p IP;
690 void insertBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator IP);
691
692 /// Insert an unlinked Recipe into a basic block immediately after
693 /// the specified Recipe.
694 void insertAfter(VPRecipeBase *InsertPos);
695
696 /// Unlink this recipe from its current VPBasicBlock and insert it into
697 /// the VPBasicBlock that MovePos lives in, right after MovePos.
698 void moveAfter(VPRecipeBase *MovePos);
699
700 /// Unlink this recipe and insert into BB before I.
701 ///
702 /// \pre I is a valid iterator into BB.
703 void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
704
705 /// This method unlinks 'this' from the containing basic block, but does not
706 /// delete it.
707 void removeFromParent();
708
709 /// This method unlinks 'this' from the containing basic block and deletes it.
710 ///
711 /// \returns an iterator pointing to the element after the erased one
712 iplist<VPRecipeBase>::iterator eraseFromParent();
713
714 /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
715 /// otherwise.
getUnderlyingInstr()716 Instruction *getUnderlyingInstr() {
717 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
718 }
getUnderlyingInstr()719 const Instruction *getUnderlyingInstr() const {
720 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
721 }
722
723 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)724 static inline bool classof(const VPDef *D) {
725 // All VPDefs are also VPRecipeBases.
726 return true;
727 }
728
classof(const VPUser * U)729 static inline bool classof(const VPUser *U) {
730 return U->getVPUserID() == VPUser::VPUserID::Recipe;
731 }
732
733 /// Returns true if the recipe may have side-effects.
734 bool mayHaveSideEffects() const;
735
736 /// Returns true for PHI-like recipes.
isPhi()737 bool isPhi() const {
738 return getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC;
739 }
740
741 /// Returns true if the recipe may read from memory.
742 bool mayReadFromMemory() const;
743
744 /// Returns true if the recipe may write to memory.
745 bool mayWriteToMemory() const;
746
747 /// Returns true if the recipe may read from or write to memory.
mayReadOrWriteMemory()748 bool mayReadOrWriteMemory() const {
749 return mayReadFromMemory() || mayWriteToMemory();
750 }
751 };
752
classof(const VPDef * Def)753 inline bool VPUser::classof(const VPDef *Def) {
754 return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
755 Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
756 Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
757 Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
758 Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
759 Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
760 Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
761 Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
762 Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
763 Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
764 Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
765 }
766
767 /// This is a concrete Recipe that models a single VPlan-level instruction.
768 /// While as any Recipe it may generate a sequence of IR instructions when
769 /// executed, these instructions would always form a single-def expression as
770 /// the VPInstruction is also a single def-use vertex.
771 class VPInstruction : public VPRecipeBase, public VPValue {
772 friend class VPlanSlp;
773
774 public:
775 /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
776 enum {
777 FirstOrderRecurrenceSplice =
778 Instruction::OtherOpsEnd + 1, // Combines the incoming and previous
779 // values of a first-order recurrence.
780 Not,
781 ICmpULE,
782 SLPLoad,
783 SLPStore,
784 ActiveLaneMask,
785 CanonicalIVIncrement,
786 CanonicalIVIncrementNUW,
787 // The next two are similar to the above, but instead increment the
788 // canonical IV separately for each unrolled part.
789 CanonicalIVIncrementForPart,
790 CanonicalIVIncrementForPartNUW,
791 BranchOnCount,
792 BranchOnCond
793 };
794
795 private:
796 typedef unsigned char OpcodeTy;
797 OpcodeTy Opcode;
798 FastMathFlags FMF;
799 DebugLoc DL;
800
801 /// An optional name that can be used for the generated IR instruction.
802 const std::string Name;
803
804 /// Utility method serving execute(): generates a single instance of the
805 /// modeled instruction.
806 void generateInstruction(VPTransformState &State, unsigned Part);
807
808 protected:
setUnderlyingInstr(Instruction * I)809 void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
810
811 public:
812 VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands, DebugLoc DL,
813 const Twine &Name = "")
VPRecipeBase(VPRecipeBase::VPInstructionSC,Operands)814 : VPRecipeBase(VPRecipeBase::VPInstructionSC, Operands),
815 VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode),
816 DL(DL), Name(Name.str()) {}
817
818 VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands,
819 DebugLoc DL = {}, const Twine &Name = "")
VPInstruction(Opcode,ArrayRef<VPValue * > (Operands),DL,Name)820 : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands), DL, Name) {}
821
822 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPValue * V)823 static inline bool classof(const VPValue *V) {
824 return V->getVPValueID() == VPValue::VPVInstructionSC;
825 }
826
clone()827 VPInstruction *clone() const {
828 SmallVector<VPValue *, 2> Operands(operands());
829 return new VPInstruction(Opcode, Operands, DL, Name);
830 }
831
832 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * R)833 static inline bool classof(const VPDef *R) {
834 return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
835 }
836
837 /// Extra classof implementations to allow directly casting from VPUser ->
838 /// VPInstruction.
classof(const VPUser * U)839 static inline bool classof(const VPUser *U) {
840 auto *R = dyn_cast<VPRecipeBase>(U);
841 return R && R->getVPDefID() == VPRecipeBase::VPInstructionSC;
842 }
classof(const VPRecipeBase * R)843 static inline bool classof(const VPRecipeBase *R) {
844 return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
845 }
846
getOpcode()847 unsigned getOpcode() const { return Opcode; }
848
849 /// Generate the instruction.
850 /// TODO: We currently execute only per-part unless a specific instance is
851 /// provided.
852 void execute(VPTransformState &State) override;
853
854 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
855 /// Print the VPInstruction to \p O.
856 void print(raw_ostream &O, const Twine &Indent,
857 VPSlotTracker &SlotTracker) const override;
858
859 /// Print the VPInstruction to dbgs() (for debugging).
860 LLVM_DUMP_METHOD void dump() const;
861 #endif
862
863 /// Return true if this instruction may modify memory.
mayWriteToMemory()864 bool mayWriteToMemory() const {
865 // TODO: we can use attributes of the called function to rule out memory
866 // modifications.
867 return Opcode == Instruction::Store || Opcode == Instruction::Call ||
868 Opcode == Instruction::Invoke || Opcode == SLPStore;
869 }
870
hasResult()871 bool hasResult() const {
872 // CallInst may or may not have a result, depending on the called function.
873 // Conservatively return calls have results for now.
874 switch (getOpcode()) {
875 case Instruction::Ret:
876 case Instruction::Br:
877 case Instruction::Store:
878 case Instruction::Switch:
879 case Instruction::IndirectBr:
880 case Instruction::Resume:
881 case Instruction::CatchRet:
882 case Instruction::Unreachable:
883 case Instruction::Fence:
884 case Instruction::AtomicRMW:
885 case VPInstruction::BranchOnCond:
886 case VPInstruction::BranchOnCount:
887 return false;
888 default:
889 return true;
890 }
891 }
892
893 /// Set the fast-math flags.
894 void setFastMathFlags(FastMathFlags FMFNew);
895
896 /// Returns true if the recipe only uses the first lane of operand \p Op.
onlyFirstLaneUsed(const VPValue * Op)897 bool onlyFirstLaneUsed(const VPValue *Op) const override {
898 assert(is_contained(operands(), Op) &&
899 "Op must be an operand of the recipe");
900 if (getOperand(0) != Op)
901 return false;
902 switch (getOpcode()) {
903 default:
904 return false;
905 case VPInstruction::ActiveLaneMask:
906 case VPInstruction::CanonicalIVIncrement:
907 case VPInstruction::CanonicalIVIncrementNUW:
908 case VPInstruction::CanonicalIVIncrementForPart:
909 case VPInstruction::CanonicalIVIncrementForPartNUW:
910 case VPInstruction::BranchOnCount:
911 return true;
912 };
913 llvm_unreachable("switch should return");
914 }
915 };
916
917 /// VPWidenRecipe is a recipe for producing a copy of vector type its
918 /// ingredient. This recipe covers most of the traditional vectorization cases
919 /// where each ingredient transforms into a vectorized version of itself.
920 class VPWidenRecipe : public VPRecipeBase, public VPValue {
921 public:
922 template <typename IterT>
VPWidenRecipe(Instruction & I,iterator_range<IterT> Operands)923 VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
924 : VPRecipeBase(VPRecipeBase::VPWidenSC, Operands),
925 VPValue(VPValue::VPVWidenSC, &I, this) {}
926
927 ~VPWidenRecipe() override = default;
928
929 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)930 static inline bool classof(const VPDef *D) {
931 return D->getVPDefID() == VPRecipeBase::VPWidenSC;
932 }
classof(const VPValue * V)933 static inline bool classof(const VPValue *V) {
934 return V->getVPValueID() == VPValue::VPVWidenSC;
935 }
936
937 /// Produce widened copies of all Ingredients.
938 void execute(VPTransformState &State) override;
939
940 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
941 /// Print the recipe.
942 void print(raw_ostream &O, const Twine &Indent,
943 VPSlotTracker &SlotTracker) const override;
944 #endif
945 };
946
947 /// A recipe for widening Call instructions.
948 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
949
950 public:
951 template <typename IterT>
VPWidenCallRecipe(CallInst & I,iterator_range<IterT> CallArguments)952 VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
953 : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments),
954 VPValue(VPValue::VPVWidenCallSC, &I, this) {}
955
956 ~VPWidenCallRecipe() override = default;
957
958 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)959 static inline bool classof(const VPDef *D) {
960 return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
961 }
962
963 /// Produce a widened version of the call instruction.
964 void execute(VPTransformState &State) override;
965
966 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
967 /// Print the recipe.
968 void print(raw_ostream &O, const Twine &Indent,
969 VPSlotTracker &SlotTracker) const override;
970 #endif
971 };
972
973 /// A recipe for widening select instructions.
974 class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
975
976 /// Is the condition of the select loop invariant?
977 bool InvariantCond;
978
979 public:
980 template <typename IterT>
VPWidenSelectRecipe(SelectInst & I,iterator_range<IterT> Operands,bool InvariantCond)981 VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
982 bool InvariantCond)
983 : VPRecipeBase(VPRecipeBase::VPWidenSelectSC, Operands),
984 VPValue(VPValue::VPVWidenSelectSC, &I, this),
985 InvariantCond(InvariantCond) {}
986
987 ~VPWidenSelectRecipe() override = default;
988
989 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)990 static inline bool classof(const VPDef *D) {
991 return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
992 }
993
994 /// Produce a widened version of the select instruction.
995 void execute(VPTransformState &State) override;
996
997 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
998 /// Print the recipe.
999 void print(raw_ostream &O, const Twine &Indent,
1000 VPSlotTracker &SlotTracker) const override;
1001 #endif
1002 };
1003
1004 /// A recipe for handling GEP instructions.
1005 class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
1006 bool IsPtrLoopInvariant;
1007 SmallBitVector IsIndexLoopInvariant;
1008
1009 public:
1010 template <typename IterT>
VPWidenGEPRecipe(GetElementPtrInst * GEP,iterator_range<IterT> Operands)1011 VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
1012 : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
1013 VPValue(VPWidenGEPSC, GEP, this),
1014 IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
1015
1016 template <typename IterT>
VPWidenGEPRecipe(GetElementPtrInst * GEP,iterator_range<IterT> Operands,Loop * OrigLoop)1017 VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
1018 Loop *OrigLoop)
1019 : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
1020 VPValue(VPValue::VPVWidenGEPSC, GEP, this),
1021 IsIndexLoopInvariant(GEP->getNumIndices(), false) {
1022 IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
1023 for (auto Index : enumerate(GEP->indices()))
1024 IsIndexLoopInvariant[Index.index()] =
1025 OrigLoop->isLoopInvariant(Index.value().get());
1026 }
1027 ~VPWidenGEPRecipe() override = default;
1028
1029 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1030 static inline bool classof(const VPDef *D) {
1031 return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
1032 }
1033
1034 /// Generate the gep nodes.
1035 void execute(VPTransformState &State) override;
1036
1037 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1038 /// Print the recipe.
1039 void print(raw_ostream &O, const Twine &Indent,
1040 VPSlotTracker &SlotTracker) const override;
1041 #endif
1042 };
1043
1044 /// A recipe for handling phi nodes of integer and floating-point inductions,
1045 /// producing their vector values.
1046 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase, public VPValue {
1047 PHINode *IV;
1048 const InductionDescriptor &IndDesc;
1049 bool NeedsVectorIV;
1050
1051 public:
VPWidenIntOrFpInductionRecipe(PHINode * IV,VPValue * Start,VPValue * Step,const InductionDescriptor & IndDesc,bool NeedsVectorIV)1052 VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, VPValue *Step,
1053 const InductionDescriptor &IndDesc,
1054 bool NeedsVectorIV)
1055 : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start, Step}),
1056 VPValue(IV, this), IV(IV), IndDesc(IndDesc),
1057 NeedsVectorIV(NeedsVectorIV) {}
1058
VPWidenIntOrFpInductionRecipe(PHINode * IV,VPValue * Start,VPValue * Step,const InductionDescriptor & IndDesc,TruncInst * Trunc,bool NeedsVectorIV)1059 VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, VPValue *Step,
1060 const InductionDescriptor &IndDesc,
1061 TruncInst *Trunc, bool NeedsVectorIV)
1062 : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start, Step}),
1063 VPValue(Trunc, this), IV(IV), IndDesc(IndDesc),
1064 NeedsVectorIV(NeedsVectorIV) {}
1065
1066 ~VPWidenIntOrFpInductionRecipe() override = default;
1067
1068 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1069 static inline bool classof(const VPDef *D) {
1070 return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
1071 }
1072
1073 /// Generate the vectorized and scalarized versions of the phi node as
1074 /// needed by their users.
1075 void execute(VPTransformState &State) override;
1076
1077 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1078 /// Print the recipe.
1079 void print(raw_ostream &O, const Twine &Indent,
1080 VPSlotTracker &SlotTracker) const override;
1081 #endif
1082
1083 /// Returns the start value of the induction.
getStartValue()1084 VPValue *getStartValue() { return getOperand(0); }
getStartValue()1085 const VPValue *getStartValue() const { return getOperand(0); }
1086
1087 /// Returns the step value of the induction.
getStepValue()1088 VPValue *getStepValue() { return getOperand(1); }
getStepValue()1089 const VPValue *getStepValue() const { return getOperand(1); }
1090
1091 /// Returns the first defined value as TruncInst, if it is one or nullptr
1092 /// otherwise.
getTruncInst()1093 TruncInst *getTruncInst() {
1094 return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1095 }
getTruncInst()1096 const TruncInst *getTruncInst() const {
1097 return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1098 }
1099
getPHINode()1100 PHINode *getPHINode() { return IV; }
1101
1102 /// Returns the induction descriptor for the recipe.
getInductionDescriptor()1103 const InductionDescriptor &getInductionDescriptor() const { return IndDesc; }
1104
1105 /// Returns true if the induction is canonical, i.e. starting at 0 and
1106 /// incremented by UF * VF (= the original IV is incremented by 1).
1107 bool isCanonical() const;
1108
1109 /// Returns the scalar type of the induction.
getScalarType()1110 const Type *getScalarType() const {
1111 const TruncInst *TruncI = getTruncInst();
1112 return TruncI ? TruncI->getType() : IV->getType();
1113 }
1114
1115 /// Returns true if a vector phi needs to be created for the induction.
needsVectorIV()1116 bool needsVectorIV() const { return NeedsVectorIV; }
1117 };
1118
1119 /// A pure virtual base class for all recipes modeling header phis, including
1120 /// phis for first order recurrences, pointer inductions and reductions. The
1121 /// start value is the first operand of the recipe and the incoming value from
1122 /// the backedge is the second operand.
1123 class VPHeaderPHIRecipe : public VPRecipeBase, public VPValue {
1124 protected:
1125 VPHeaderPHIRecipe(unsigned char VPVID, unsigned char VPDefID, PHINode *Phi,
1126 VPValue *Start = nullptr)
1127 : VPRecipeBase(VPDefID, {}), VPValue(VPVID, Phi, this) {
1128 if (Start)
1129 addOperand(Start);
1130 }
1131
1132 public:
1133 ~VPHeaderPHIRecipe() override = default;
1134
1135 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPRecipeBase * B)1136 static inline bool classof(const VPRecipeBase *B) {
1137 return B->getVPDefID() == VPRecipeBase::VPCanonicalIVPHISC ||
1138 B->getVPDefID() == VPRecipeBase::VPActiveLaneMaskPHISC ||
1139 B->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC ||
1140 B->getVPDefID() == VPRecipeBase::VPReductionPHISC ||
1141 B->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC ||
1142 B->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1143 }
classof(const VPValue * V)1144 static inline bool classof(const VPValue *V) {
1145 return V->getVPValueID() == VPValue::VPVCanonicalIVPHISC ||
1146 V->getVPValueID() == VPValue::VPVActiveLaneMaskPHISC ||
1147 V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC ||
1148 V->getVPValueID() == VPValue::VPVReductionPHISC ||
1149 V->getVPValueID() == VPValue::VPVWidenIntOrFpInductionSC ||
1150 V->getVPValueID() == VPValue::VPVWidenPHISC;
1151 }
1152
1153 /// Generate the phi nodes.
1154 void execute(VPTransformState &State) override = 0;
1155
1156 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1157 /// Print the recipe.
1158 void print(raw_ostream &O, const Twine &Indent,
1159 VPSlotTracker &SlotTracker) const override = 0;
1160 #endif
1161
1162 /// Returns the start value of the phi, if one is set.
getStartValue()1163 VPValue *getStartValue() {
1164 return getNumOperands() == 0 ? nullptr : getOperand(0);
1165 }
getStartValue()1166 VPValue *getStartValue() const {
1167 return getNumOperands() == 0 ? nullptr : getOperand(0);
1168 }
1169
1170 /// Returns the incoming value from the loop backedge.
getBackedgeValue()1171 VPValue *getBackedgeValue() {
1172 return getOperand(1);
1173 }
1174
1175 /// Returns the backedge value as a recipe. The backedge value is guaranteed
1176 /// to be a recipe.
getBackedgeRecipe()1177 VPRecipeBase *getBackedgeRecipe() {
1178 return cast<VPRecipeBase>(getBackedgeValue()->getDef());
1179 }
1180 };
1181
1182 class VPWidenPointerInductionRecipe : public VPHeaderPHIRecipe {
1183 const InductionDescriptor &IndDesc;
1184
1185 /// SCEV used to expand step.
1186 /// FIXME: move expansion of step to the pre-header, once it is modeled
1187 /// explicitly.
1188 ScalarEvolution &SE;
1189
1190 bool IsScalarAfterVectorization;
1191
1192 public:
1193 /// Create a new VPWidenPointerInductionRecipe for \p Phi with start value \p
1194 /// Start.
VPWidenPointerInductionRecipe(PHINode * Phi,VPValue * Start,const InductionDescriptor & IndDesc,ScalarEvolution & SE,bool IsScalarAfterVectorization)1195 VPWidenPointerInductionRecipe(PHINode *Phi, VPValue *Start,
1196 const InductionDescriptor &IndDesc,
1197 ScalarEvolution &SE,
1198 bool IsScalarAfterVectorization)
1199 : VPHeaderPHIRecipe(VPVWidenPointerInductionSC, VPWidenPointerInductionSC,
1200 Phi),
1201 IndDesc(IndDesc), SE(SE),
1202 IsScalarAfterVectorization(IsScalarAfterVectorization) {
1203 addOperand(Start);
1204 }
1205
1206 ~VPWidenPointerInductionRecipe() override = default;
1207
1208 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPRecipeBase * B)1209 static inline bool classof(const VPRecipeBase *B) {
1210 return B->getVPDefID() == VPRecipeBase::VPWidenPointerInductionSC;
1211 }
classof(const VPHeaderPHIRecipe * R)1212 static inline bool classof(const VPHeaderPHIRecipe *R) {
1213 return R->getVPDefID() == VPRecipeBase::VPWidenPointerInductionSC;
1214 }
classof(const VPValue * V)1215 static inline bool classof(const VPValue *V) {
1216 return V->getVPValueID() == VPValue::VPVWidenPointerInductionSC;
1217 }
1218
1219 /// Generate vector values for the pointer induction.
1220 void execute(VPTransformState &State) override;
1221
1222 /// Returns true if only scalar values will be generated.
1223 bool onlyScalarsGenerated(ElementCount VF);
1224
1225 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1226 /// Print the recipe.
1227 void print(raw_ostream &O, const Twine &Indent,
1228 VPSlotTracker &SlotTracker) const override;
1229 #endif
1230 };
1231
1232 /// A recipe for handling header phis that are widened in the vector loop.
1233 /// In the VPlan native path, all incoming VPValues & VPBasicBlock pairs are
1234 /// managed in the recipe directly.
1235 class VPWidenPHIRecipe : public VPHeaderPHIRecipe {
1236 /// List of incoming blocks. Only used in the VPlan native path.
1237 SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1238
1239 public:
1240 /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
1241 VPWidenPHIRecipe(PHINode *Phi, VPValue *Start = nullptr)
VPHeaderPHIRecipe(VPVWidenPHISC,VPWidenPHISC,Phi)1242 : VPHeaderPHIRecipe(VPVWidenPHISC, VPWidenPHISC, Phi) {
1243 if (Start)
1244 addOperand(Start);
1245 }
1246
1247 ~VPWidenPHIRecipe() override = default;
1248
1249 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPRecipeBase * B)1250 static inline bool classof(const VPRecipeBase *B) {
1251 return B->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1252 }
classof(const VPHeaderPHIRecipe * R)1253 static inline bool classof(const VPHeaderPHIRecipe *R) {
1254 return R->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1255 }
classof(const VPValue * V)1256 static inline bool classof(const VPValue *V) {
1257 return V->getVPValueID() == VPValue::VPVWidenPHISC;
1258 }
1259
1260 /// Generate the phi/select nodes.
1261 void execute(VPTransformState &State) override;
1262
1263 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1264 /// Print the recipe.
1265 void print(raw_ostream &O, const Twine &Indent,
1266 VPSlotTracker &SlotTracker) const override;
1267 #endif
1268
1269 /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
addIncoming(VPValue * IncomingV,VPBasicBlock * IncomingBlock)1270 void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1271 addOperand(IncomingV);
1272 IncomingBlocks.push_back(IncomingBlock);
1273 }
1274
1275 /// Returns the \p I th incoming VPBasicBlock.
getIncomingBlock(unsigned I)1276 VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1277
1278 /// Returns the \p I th incoming VPValue.
getIncomingValue(unsigned I)1279 VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1280 };
1281
1282 /// A recipe for handling first-order recurrence phis. The start value is the
1283 /// first operand of the recipe and the incoming value from the backedge is the
1284 /// second operand.
1285 struct VPFirstOrderRecurrencePHIRecipe : public VPHeaderPHIRecipe {
VPFirstOrderRecurrencePHIRecipeVPFirstOrderRecurrencePHIRecipe1286 VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
1287 : VPHeaderPHIRecipe(VPVFirstOrderRecurrencePHISC,
1288 VPFirstOrderRecurrencePHISC, Phi, &Start) {}
1289
1290 /// Method to support type inquiry through isa, cast, and dyn_cast.
classofVPFirstOrderRecurrencePHIRecipe1291 static inline bool classof(const VPRecipeBase *R) {
1292 return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1293 }
classofVPFirstOrderRecurrencePHIRecipe1294 static inline bool classof(const VPHeaderPHIRecipe *R) {
1295 return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1296 }
classofVPFirstOrderRecurrencePHIRecipe1297 static inline bool classof(const VPValue *V) {
1298 return V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC;
1299 }
1300
1301 void execute(VPTransformState &State) override;
1302
1303 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1304 /// Print the recipe.
1305 void print(raw_ostream &O, const Twine &Indent,
1306 VPSlotTracker &SlotTracker) const override;
1307 #endif
1308 };
1309
1310 /// A recipe for handling reduction phis. The start value is the first operand
1311 /// of the recipe and the incoming value from the backedge is the second
1312 /// operand.
1313 class VPReductionPHIRecipe : public VPHeaderPHIRecipe {
1314 /// Descriptor for the reduction.
1315 const RecurrenceDescriptor &RdxDesc;
1316
1317 /// The phi is part of an in-loop reduction.
1318 bool IsInLoop;
1319
1320 /// The phi is part of an ordered reduction. Requires IsInLoop to be true.
1321 bool IsOrdered;
1322
1323 public:
1324 /// Create a new VPReductionPHIRecipe for the reduction \p Phi described by \p
1325 /// RdxDesc.
1326 VPReductionPHIRecipe(PHINode *Phi, const RecurrenceDescriptor &RdxDesc,
1327 VPValue &Start, bool IsInLoop = false,
1328 bool IsOrdered = false)
1329 : VPHeaderPHIRecipe(VPVReductionPHISC, VPReductionPHISC, Phi, &Start),
1330 RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
1331 assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
1332 }
1333
1334 ~VPReductionPHIRecipe() override = default;
1335
1336 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPRecipeBase * R)1337 static inline bool classof(const VPRecipeBase *R) {
1338 return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1339 }
classof(const VPHeaderPHIRecipe * R)1340 static inline bool classof(const VPHeaderPHIRecipe *R) {
1341 return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1342 }
classof(const VPValue * V)1343 static inline bool classof(const VPValue *V) {
1344 return V->getVPValueID() == VPValue::VPVReductionPHISC;
1345 }
1346
1347 /// Generate the phi/select nodes.
1348 void execute(VPTransformState &State) override;
1349
1350 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1351 /// Print the recipe.
1352 void print(raw_ostream &O, const Twine &Indent,
1353 VPSlotTracker &SlotTracker) const override;
1354 #endif
1355
getRecurrenceDescriptor()1356 const RecurrenceDescriptor &getRecurrenceDescriptor() const {
1357 return RdxDesc;
1358 }
1359
1360 /// Returns true, if the phi is part of an ordered reduction.
isOrdered()1361 bool isOrdered() const { return IsOrdered; }
1362
1363 /// Returns true, if the phi is part of an in-loop reduction.
isInLoop()1364 bool isInLoop() const { return IsInLoop; }
1365 };
1366
1367 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1368 /// instructions.
1369 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1370 PHINode *Phi;
1371
1372 public:
1373 /// The blend operation is a User of the incoming values and of their
1374 /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1375 /// might be incoming with a full mask for which there is no VPValue.
VPBlendRecipe(PHINode * Phi,ArrayRef<VPValue * > Operands)1376 VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1377 : VPRecipeBase(VPBlendSC, Operands),
1378 VPValue(VPValue::VPVBlendSC, Phi, this), Phi(Phi) {
1379 assert(Operands.size() > 0 &&
1380 ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1381 "Expected either a single incoming value or a positive even number "
1382 "of operands");
1383 }
1384
1385 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1386 static inline bool classof(const VPDef *D) {
1387 return D->getVPDefID() == VPRecipeBase::VPBlendSC;
1388 }
1389
1390 /// Return the number of incoming values, taking into account that a single
1391 /// incoming value has no mask.
getNumIncomingValues()1392 unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1393
1394 /// Return incoming value number \p Idx.
getIncomingValue(unsigned Idx)1395 VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1396
1397 /// Return mask number \p Idx.
getMask(unsigned Idx)1398 VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1399
1400 /// Generate the phi/select nodes.
1401 void execute(VPTransformState &State) override;
1402
1403 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1404 /// Print the recipe.
1405 void print(raw_ostream &O, const Twine &Indent,
1406 VPSlotTracker &SlotTracker) const override;
1407 #endif
1408
1409 /// Returns true if the recipe only uses the first lane of operand \p Op.
onlyFirstLaneUsed(const VPValue * Op)1410 bool onlyFirstLaneUsed(const VPValue *Op) const override {
1411 assert(is_contained(operands(), Op) &&
1412 "Op must be an operand of the recipe");
1413 // Recursing through Blend recipes only, must terminate at header phi's the
1414 // latest.
1415 return all_of(users(),
1416 [this](VPUser *U) { return U->onlyFirstLaneUsed(this); });
1417 }
1418 };
1419
1420 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1421 /// or stores into one wide load/store and shuffles. The first operand of a
1422 /// VPInterleave recipe is the address, followed by the stored values, followed
1423 /// by an optional mask.
1424 class VPInterleaveRecipe : public VPRecipeBase {
1425 const InterleaveGroup<Instruction> *IG;
1426
1427 bool HasMask = false;
1428
1429 public:
VPInterleaveRecipe(const InterleaveGroup<Instruction> * IG,VPValue * Addr,ArrayRef<VPValue * > StoredValues,VPValue * Mask)1430 VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1431 ArrayRef<VPValue *> StoredValues, VPValue *Mask)
1432 : VPRecipeBase(VPInterleaveSC, {Addr}), IG(IG) {
1433 for (unsigned i = 0; i < IG->getFactor(); ++i)
1434 if (Instruction *I = IG->getMember(i)) {
1435 if (I->getType()->isVoidTy())
1436 continue;
1437 new VPValue(I, this);
1438 }
1439
1440 for (auto *SV : StoredValues)
1441 addOperand(SV);
1442 if (Mask) {
1443 HasMask = true;
1444 addOperand(Mask);
1445 }
1446 }
1447 ~VPInterleaveRecipe() override = default;
1448
1449 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1450 static inline bool classof(const VPDef *D) {
1451 return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
1452 }
1453
1454 /// Return the address accessed by this recipe.
getAddr()1455 VPValue *getAddr() const {
1456 return getOperand(0); // Address is the 1st, mandatory operand.
1457 }
1458
1459 /// Return the mask used by this recipe. Note that a full mask is represented
1460 /// by a nullptr.
getMask()1461 VPValue *getMask() const {
1462 // Mask is optional and therefore the last, currently 2nd operand.
1463 return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1464 }
1465
1466 /// Return the VPValues stored by this interleave group. If it is a load
1467 /// interleave group, return an empty ArrayRef.
getStoredValues()1468 ArrayRef<VPValue *> getStoredValues() const {
1469 // The first operand is the address, followed by the stored values, followed
1470 // by an optional mask.
1471 return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1472 .slice(1, getNumStoreOperands());
1473 }
1474
1475 /// Generate the wide load or store, and shuffles.
1476 void execute(VPTransformState &State) override;
1477
1478 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1479 /// Print the recipe.
1480 void print(raw_ostream &O, const Twine &Indent,
1481 VPSlotTracker &SlotTracker) const override;
1482 #endif
1483
getInterleaveGroup()1484 const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1485
1486 /// Returns the number of stored operands of this interleave group. Returns 0
1487 /// for load interleave groups.
getNumStoreOperands()1488 unsigned getNumStoreOperands() const {
1489 return getNumOperands() - (HasMask ? 2 : 1);
1490 }
1491
1492 /// The recipe only uses the first lane of the address.
onlyFirstLaneUsed(const VPValue * Op)1493 bool onlyFirstLaneUsed(const VPValue *Op) const override {
1494 assert(is_contained(operands(), Op) &&
1495 "Op must be an operand of the recipe");
1496 return Op == getAddr() && all_of(getStoredValues(), [Op](VPValue *StoredV) {
1497 return Op != StoredV;
1498 });
1499 }
1500 };
1501
1502 /// A recipe to represent inloop reduction operations, performing a reduction on
1503 /// a vector operand into a scalar value, and adding the result to a chain.
1504 /// The Operands are {ChainOp, VecOp, [Condition]}.
1505 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1506 /// The recurrence decriptor for the reduction in question.
1507 const RecurrenceDescriptor *RdxDesc;
1508 /// Pointer to the TTI, needed to create the target reduction
1509 const TargetTransformInfo *TTI;
1510
1511 public:
VPReductionRecipe(const RecurrenceDescriptor * R,Instruction * I,VPValue * ChainOp,VPValue * VecOp,VPValue * CondOp,const TargetTransformInfo * TTI)1512 VPReductionRecipe(const RecurrenceDescriptor *R, Instruction *I,
1513 VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp,
1514 const TargetTransformInfo *TTI)
1515 : VPRecipeBase(VPRecipeBase::VPReductionSC, {ChainOp, VecOp}),
1516 VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), TTI(TTI) {
1517 if (CondOp)
1518 addOperand(CondOp);
1519 }
1520
1521 ~VPReductionRecipe() override = default;
1522
1523 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPValue * V)1524 static inline bool classof(const VPValue *V) {
1525 return V->getVPValueID() == VPValue::VPVReductionSC;
1526 }
1527
1528 /// Generate the reduction in the loop
1529 void execute(VPTransformState &State) override;
1530
1531 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1532 /// Print the recipe.
1533 void print(raw_ostream &O, const Twine &Indent,
1534 VPSlotTracker &SlotTracker) const override;
1535 #endif
1536
1537 /// The VPValue of the scalar Chain being accumulated.
getChainOp()1538 VPValue *getChainOp() const { return getOperand(0); }
1539 /// The VPValue of the vector value to be reduced.
getVecOp()1540 VPValue *getVecOp() const { return getOperand(1); }
1541 /// The VPValue of the condition for the block.
getCondOp()1542 VPValue *getCondOp() const {
1543 return getNumOperands() > 2 ? getOperand(2) : nullptr;
1544 }
1545 };
1546
1547 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1548 /// copies of the original scalar type, one per lane, instead of producing a
1549 /// single copy of widened type for all lanes. If the instruction is known to be
1550 /// uniform only one copy, per lane zero, will be generated.
1551 class VPReplicateRecipe : public VPRecipeBase, public VPValue {
1552 /// Indicator if only a single replica per lane is needed.
1553 bool IsUniform;
1554
1555 /// Indicator if the replicas are also predicated.
1556 bool IsPredicated;
1557
1558 /// Indicator if the scalar values should also be packed into a vector.
1559 bool AlsoPack;
1560
1561 public:
1562 template <typename IterT>
1563 VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1564 bool IsUniform, bool IsPredicated = false)
VPRecipeBase(VPReplicateSC,Operands)1565 : VPRecipeBase(VPReplicateSC, Operands), VPValue(VPVReplicateSC, I, this),
1566 IsUniform(IsUniform), IsPredicated(IsPredicated) {
1567 // Retain the previous behavior of predicateInstructions(), where an
1568 // insert-element of a predicated instruction got hoisted into the
1569 // predicated basic block iff it was its only user. This is achieved by
1570 // having predicated instructions also pack their values into a vector by
1571 // default unless they have a replicated user which uses their scalar value.
1572 AlsoPack = IsPredicated && !I->use_empty();
1573 }
1574
1575 ~VPReplicateRecipe() override = default;
1576
1577 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1578 static inline bool classof(const VPDef *D) {
1579 return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
1580 }
1581
classof(const VPValue * V)1582 static inline bool classof(const VPValue *V) {
1583 return V->getVPValueID() == VPValue::VPVReplicateSC;
1584 }
1585
1586 /// Generate replicas of the desired Ingredient. Replicas will be generated
1587 /// for all parts and lanes unless a specific part and lane are specified in
1588 /// the \p State.
1589 void execute(VPTransformState &State) override;
1590
setAlsoPack(bool Pack)1591 void setAlsoPack(bool Pack) { AlsoPack = Pack; }
1592
1593 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1594 /// Print the recipe.
1595 void print(raw_ostream &O, const Twine &Indent,
1596 VPSlotTracker &SlotTracker) const override;
1597 #endif
1598
isUniform()1599 bool isUniform() const { return IsUniform; }
1600
isPacked()1601 bool isPacked() const { return AlsoPack; }
1602
isPredicated()1603 bool isPredicated() const { return IsPredicated; }
1604
1605 /// Returns true if the recipe only uses the first lane of operand \p Op.
onlyFirstLaneUsed(const VPValue * Op)1606 bool onlyFirstLaneUsed(const VPValue *Op) const override {
1607 assert(is_contained(operands(), Op) &&
1608 "Op must be an operand of the recipe");
1609 return isUniform();
1610 }
1611
1612 /// Returns true if the recipe uses scalars of operand \p Op.
usesScalars(const VPValue * Op)1613 bool usesScalars(const VPValue *Op) const override {
1614 assert(is_contained(operands(), Op) &&
1615 "Op must be an operand of the recipe");
1616 return true;
1617 }
1618 };
1619
1620 /// A recipe for generating conditional branches on the bits of a mask.
1621 class VPBranchOnMaskRecipe : public VPRecipeBase {
1622 public:
VPBranchOnMaskRecipe(VPValue * BlockInMask)1623 VPBranchOnMaskRecipe(VPValue *BlockInMask)
1624 : VPRecipeBase(VPBranchOnMaskSC, {}) {
1625 if (BlockInMask) // nullptr means all-one mask.
1626 addOperand(BlockInMask);
1627 }
1628
1629 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1630 static inline bool classof(const VPDef *D) {
1631 return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
1632 }
1633
1634 /// Generate the extraction of the appropriate bit from the block mask and the
1635 /// conditional branch.
1636 void execute(VPTransformState &State) override;
1637
1638 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1639 /// Print the recipe.
print(raw_ostream & O,const Twine & Indent,VPSlotTracker & SlotTracker)1640 void print(raw_ostream &O, const Twine &Indent,
1641 VPSlotTracker &SlotTracker) const override {
1642 O << Indent << "BRANCH-ON-MASK ";
1643 if (VPValue *Mask = getMask())
1644 Mask->printAsOperand(O, SlotTracker);
1645 else
1646 O << " All-One";
1647 }
1648 #endif
1649
1650 /// Return the mask used by this recipe. Note that a full mask is represented
1651 /// by a nullptr.
getMask()1652 VPValue *getMask() const {
1653 assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1654 // Mask is optional.
1655 return getNumOperands() == 1 ? getOperand(0) : nullptr;
1656 }
1657
1658 /// Returns true if the recipe uses scalars of operand \p Op.
usesScalars(const VPValue * Op)1659 bool usesScalars(const VPValue *Op) const override {
1660 assert(is_contained(operands(), Op) &&
1661 "Op must be an operand of the recipe");
1662 return true;
1663 }
1664 };
1665
1666 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1667 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1668 /// order to merge values that are set under such a branch and feed their uses.
1669 /// The phi nodes can be scalar or vector depending on the users of the value.
1670 /// This recipe works in concert with VPBranchOnMaskRecipe.
1671 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1672 public:
1673 /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1674 /// nodes after merging back from a Branch-on-Mask.
VPPredInstPHIRecipe(VPValue * PredV)1675 VPPredInstPHIRecipe(VPValue *PredV)
1676 : VPRecipeBase(VPPredInstPHISC, PredV),
1677 VPValue(VPValue::VPVPredInstPHI, nullptr, this) {}
1678 ~VPPredInstPHIRecipe() override = default;
1679
1680 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1681 static inline bool classof(const VPDef *D) {
1682 return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
1683 }
1684
1685 /// Generates phi nodes for live-outs as needed to retain SSA form.
1686 void execute(VPTransformState &State) override;
1687
1688 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1689 /// Print the recipe.
1690 void print(raw_ostream &O, const Twine &Indent,
1691 VPSlotTracker &SlotTracker) const override;
1692 #endif
1693
1694 /// Returns true if the recipe uses scalars of operand \p Op.
usesScalars(const VPValue * Op)1695 bool usesScalars(const VPValue *Op) const override {
1696 assert(is_contained(operands(), Op) &&
1697 "Op must be an operand of the recipe");
1698 return true;
1699 }
1700 };
1701
1702 /// A Recipe for widening load/store operations.
1703 /// The recipe uses the following VPValues:
1704 /// - For load: Address, optional mask
1705 /// - For store: Address, stored value, optional mask
1706 /// TODO: We currently execute only per-part unless a specific instance is
1707 /// provided.
1708 class VPWidenMemoryInstructionRecipe : public VPRecipeBase {
1709 Instruction &Ingredient;
1710
1711 // Whether the loaded-from / stored-to addresses are consecutive.
1712 bool Consecutive;
1713
1714 // Whether the consecutive loaded/stored addresses are in reverse order.
1715 bool Reverse;
1716
setMask(VPValue * Mask)1717 void setMask(VPValue *Mask) {
1718 if (!Mask)
1719 return;
1720 addOperand(Mask);
1721 }
1722
isMasked()1723 bool isMasked() const {
1724 return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
1725 }
1726
1727 public:
VPWidenMemoryInstructionRecipe(LoadInst & Load,VPValue * Addr,VPValue * Mask,bool Consecutive,bool Reverse)1728 VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
1729 bool Consecutive, bool Reverse)
1730 : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr}), Ingredient(Load),
1731 Consecutive(Consecutive), Reverse(Reverse) {
1732 assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1733 new VPValue(VPValue::VPVMemoryInstructionSC, &Load, this);
1734 setMask(Mask);
1735 }
1736
VPWidenMemoryInstructionRecipe(StoreInst & Store,VPValue * Addr,VPValue * StoredValue,VPValue * Mask,bool Consecutive,bool Reverse)1737 VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
1738 VPValue *StoredValue, VPValue *Mask,
1739 bool Consecutive, bool Reverse)
1740 : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr, StoredValue}),
1741 Ingredient(Store), Consecutive(Consecutive), Reverse(Reverse) {
1742 assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1743 setMask(Mask);
1744 }
1745
1746 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1747 static inline bool classof(const VPDef *D) {
1748 return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
1749 }
1750
1751 /// Return the address accessed by this recipe.
getAddr()1752 VPValue *getAddr() const {
1753 return getOperand(0); // Address is the 1st, mandatory operand.
1754 }
1755
1756 /// Return the mask used by this recipe. Note that a full mask is represented
1757 /// by a nullptr.
getMask()1758 VPValue *getMask() const {
1759 // Mask is optional and therefore the last operand.
1760 return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
1761 }
1762
1763 /// Returns true if this recipe is a store.
isStore()1764 bool isStore() const { return isa<StoreInst>(Ingredient); }
1765
1766 /// Return the address accessed by this recipe.
getStoredValue()1767 VPValue *getStoredValue() const {
1768 assert(isStore() && "Stored value only available for store instructions");
1769 return getOperand(1); // Stored value is the 2nd, mandatory operand.
1770 }
1771
1772 // Return whether the loaded-from / stored-to addresses are consecutive.
isConsecutive()1773 bool isConsecutive() const { return Consecutive; }
1774
1775 // Return whether the consecutive loaded/stored addresses are in reverse
1776 // order.
isReverse()1777 bool isReverse() const { return Reverse; }
1778
1779 /// Generate the wide load/store.
1780 void execute(VPTransformState &State) override;
1781
1782 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1783 /// Print the recipe.
1784 void print(raw_ostream &O, const Twine &Indent,
1785 VPSlotTracker &SlotTracker) const override;
1786 #endif
1787
1788 /// Returns true if the recipe only uses the first lane of operand \p Op.
onlyFirstLaneUsed(const VPValue * Op)1789 bool onlyFirstLaneUsed(const VPValue *Op) const override {
1790 assert(is_contained(operands(), Op) &&
1791 "Op must be an operand of the recipe");
1792
1793 // Widened, consecutive memory operations only demand the first lane of
1794 // their address, unless the same operand is also stored. That latter can
1795 // happen with opaque pointers.
1796 return Op == getAddr() && isConsecutive() &&
1797 (!isStore() || Op != getStoredValue());
1798 }
1799
getIngredient()1800 Instruction &getIngredient() const { return Ingredient; }
1801 };
1802
1803 /// Recipe to expand a SCEV expression.
1804 class VPExpandSCEVRecipe : public VPRecipeBase, public VPValue {
1805 const SCEV *Expr;
1806 ScalarEvolution &SE;
1807
1808 public:
VPExpandSCEVRecipe(const SCEV * Expr,ScalarEvolution & SE)1809 VPExpandSCEVRecipe(const SCEV *Expr, ScalarEvolution &SE)
1810 : VPRecipeBase(VPExpandSCEVSC, {}), VPValue(nullptr, this), Expr(Expr),
1811 SE(SE) {}
1812
1813 ~VPExpandSCEVRecipe() override = default;
1814
1815 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1816 static inline bool classof(const VPDef *D) {
1817 return D->getVPDefID() == VPExpandSCEVSC;
1818 }
1819
1820 /// Generate a canonical vector induction variable of the vector loop, with
1821 void execute(VPTransformState &State) override;
1822
1823 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1824 /// Print the recipe.
1825 void print(raw_ostream &O, const Twine &Indent,
1826 VPSlotTracker &SlotTracker) const override;
1827 #endif
1828
getSCEV()1829 const SCEV *getSCEV() const { return Expr; }
1830 };
1831
1832 /// Canonical scalar induction phi of the vector loop. Starting at the specified
1833 /// start value (either 0 or the resume value when vectorizing the epilogue
1834 /// loop). VPWidenCanonicalIVRecipe represents the vector version of the
1835 /// canonical induction variable.
1836 class VPCanonicalIVPHIRecipe : public VPHeaderPHIRecipe {
1837 DebugLoc DL;
1838
1839 public:
VPCanonicalIVPHIRecipe(VPValue * StartV,DebugLoc DL)1840 VPCanonicalIVPHIRecipe(VPValue *StartV, DebugLoc DL)
1841 : VPHeaderPHIRecipe(VPValue::VPVCanonicalIVPHISC, VPCanonicalIVPHISC,
1842 nullptr, StartV),
1843 DL(DL) {}
1844
1845 ~VPCanonicalIVPHIRecipe() override = default;
1846
1847 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1848 static inline bool classof(const VPDef *D) {
1849 return D->getVPDefID() == VPCanonicalIVPHISC;
1850 }
classof(const VPHeaderPHIRecipe * D)1851 static inline bool classof(const VPHeaderPHIRecipe *D) {
1852 return D->getVPDefID() == VPCanonicalIVPHISC;
1853 }
classof(const VPValue * V)1854 static inline bool classof(const VPValue *V) {
1855 return V->getVPValueID() == VPValue::VPVCanonicalIVPHISC;
1856 }
1857
1858 /// Generate the canonical scalar induction phi of the vector loop.
1859 void execute(VPTransformState &State) override;
1860
1861 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1862 /// Print the recipe.
1863 void print(raw_ostream &O, const Twine &Indent,
1864 VPSlotTracker &SlotTracker) const override;
1865 #endif
1866
1867 /// Returns the scalar type of the induction.
getScalarType()1868 const Type *getScalarType() const {
1869 return getOperand(0)->getLiveInIRValue()->getType();
1870 }
1871
1872 /// Returns true if the recipe only uses the first lane of operand \p Op.
onlyFirstLaneUsed(const VPValue * Op)1873 bool onlyFirstLaneUsed(const VPValue *Op) const override {
1874 assert(is_contained(operands(), Op) &&
1875 "Op must be an operand of the recipe");
1876 return true;
1877 }
1878 };
1879
1880 /// A recipe for generating the active lane mask for the vector loop that is
1881 /// used to predicate the vector operations.
1882 /// TODO: It would be good to use the existing VPWidenPHIRecipe instead and
1883 /// remove VPActiveLaneMaskPHIRecipe.
1884 class VPActiveLaneMaskPHIRecipe : public VPHeaderPHIRecipe {
1885 DebugLoc DL;
1886
1887 public:
VPActiveLaneMaskPHIRecipe(VPValue * StartMask,DebugLoc DL)1888 VPActiveLaneMaskPHIRecipe(VPValue *StartMask, DebugLoc DL)
1889 : VPHeaderPHIRecipe(VPValue::VPVActiveLaneMaskPHISC,
1890 VPActiveLaneMaskPHISC, nullptr, StartMask),
1891 DL(DL) {}
1892
1893 ~VPActiveLaneMaskPHIRecipe() override = default;
1894
1895 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1896 static inline bool classof(const VPDef *D) {
1897 return D->getVPDefID() == VPActiveLaneMaskPHISC;
1898 }
classof(const VPHeaderPHIRecipe * D)1899 static inline bool classof(const VPHeaderPHIRecipe *D) {
1900 return D->getVPDefID() == VPActiveLaneMaskPHISC;
1901 }
classof(const VPValue * V)1902 static inline bool classof(const VPValue *V) {
1903 return V->getVPValueID() == VPValue::VPVActiveLaneMaskPHISC;
1904 }
1905
1906 /// Generate the active lane mask phi of the vector loop.
1907 void execute(VPTransformState &State) override;
1908
1909 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1910 /// Print the recipe.
1911 void print(raw_ostream &O, const Twine &Indent,
1912 VPSlotTracker &SlotTracker) const override;
1913 #endif
1914 };
1915
1916 /// A Recipe for widening the canonical induction variable of the vector loop.
1917 class VPWidenCanonicalIVRecipe : public VPRecipeBase, public VPValue {
1918 public:
VPWidenCanonicalIVRecipe(VPCanonicalIVPHIRecipe * CanonicalIV)1919 VPWidenCanonicalIVRecipe(VPCanonicalIVPHIRecipe *CanonicalIV)
1920 : VPRecipeBase(VPWidenCanonicalIVSC, {CanonicalIV}),
1921 VPValue(VPValue::VPVWidenCanonicalIVSC, nullptr, this) {}
1922
1923 ~VPWidenCanonicalIVRecipe() override = default;
1924
1925 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1926 static inline bool classof(const VPDef *D) {
1927 return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1928 }
1929
1930 /// Extra classof implementations to allow directly casting from VPUser ->
1931 /// VPWidenCanonicalIVRecipe.
classof(const VPUser * U)1932 static inline bool classof(const VPUser *U) {
1933 auto *R = dyn_cast<VPRecipeBase>(U);
1934 return R && R->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1935 }
classof(const VPRecipeBase * R)1936 static inline bool classof(const VPRecipeBase *R) {
1937 return R->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1938 }
1939
1940 /// Generate a canonical vector induction variable of the vector loop, with
1941 /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
1942 /// step = <VF*UF, VF*UF, ..., VF*UF>.
1943 void execute(VPTransformState &State) override;
1944
1945 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1946 /// Print the recipe.
1947 void print(raw_ostream &O, const Twine &Indent,
1948 VPSlotTracker &SlotTracker) const override;
1949 #endif
1950
1951 /// Returns the scalar type of the induction.
getScalarType()1952 const Type *getScalarType() const {
1953 return cast<VPCanonicalIVPHIRecipe>(getOperand(0)->getDef())
1954 ->getScalarType();
1955 }
1956 };
1957
1958 /// A recipe for handling phi nodes of integer and floating-point inductions,
1959 /// producing their scalar values.
1960 class VPScalarIVStepsRecipe : public VPRecipeBase, public VPValue {
1961 /// Scalar type to use for the generated values.
1962 Type *Ty;
1963 /// If not nullptr, truncate the generated values to TruncToTy.
1964 Type *TruncToTy;
1965 const InductionDescriptor &IndDesc;
1966
1967 public:
VPScalarIVStepsRecipe(Type * Ty,const InductionDescriptor & IndDesc,VPValue * CanonicalIV,VPValue * Start,VPValue * Step,Type * TruncToTy)1968 VPScalarIVStepsRecipe(Type *Ty, const InductionDescriptor &IndDesc,
1969 VPValue *CanonicalIV, VPValue *Start, VPValue *Step,
1970 Type *TruncToTy)
1971 : VPRecipeBase(VPScalarIVStepsSC, {CanonicalIV, Start, Step}),
1972 VPValue(nullptr, this), Ty(Ty), TruncToTy(TruncToTy), IndDesc(IndDesc) {
1973 }
1974
1975 ~VPScalarIVStepsRecipe() override = default;
1976
1977 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1978 static inline bool classof(const VPDef *D) {
1979 return D->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
1980 }
1981 /// Extra classof implementations to allow directly casting from VPUser ->
1982 /// VPScalarIVStepsRecipe.
classof(const VPUser * U)1983 static inline bool classof(const VPUser *U) {
1984 auto *R = dyn_cast<VPRecipeBase>(U);
1985 return R && R->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
1986 }
classof(const VPRecipeBase * R)1987 static inline bool classof(const VPRecipeBase *R) {
1988 return R->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
1989 }
1990
1991 /// Generate the scalarized versions of the phi node as needed by their users.
1992 void execute(VPTransformState &State) override;
1993
1994 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1995 /// Print the recipe.
1996 void print(raw_ostream &O, const Twine &Indent,
1997 VPSlotTracker &SlotTracker) const override;
1998 #endif
1999
2000 /// Returns true if the induction is canonical, i.e. starting at 0 and
2001 /// incremented by UF * VF (= the original IV is incremented by 1).
2002 bool isCanonical() const;
2003
2004 VPCanonicalIVPHIRecipe *getCanonicalIV() const;
getStartValue()2005 VPValue *getStartValue() const { return getOperand(1); }
getStepValue()2006 VPValue *getStepValue() const { return getOperand(2); }
2007
2008 /// Returns true if the recipe only uses the first lane of operand \p Op.
onlyFirstLaneUsed(const VPValue * Op)2009 bool onlyFirstLaneUsed(const VPValue *Op) const override {
2010 assert(is_contained(operands(), Op) &&
2011 "Op must be an operand of the recipe");
2012 return true;
2013 }
2014 };
2015
2016 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
2017 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
2018 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
2019 class VPBasicBlock : public VPBlockBase {
2020 public:
2021 using RecipeListTy = iplist<VPRecipeBase>;
2022
2023 private:
2024 /// The VPRecipes held in the order of output instructions to generate.
2025 RecipeListTy Recipes;
2026
2027 public:
2028 VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
2029 : VPBlockBase(VPBasicBlockSC, Name.str()) {
2030 if (Recipe)
2031 appendRecipe(Recipe);
2032 }
2033
~VPBasicBlock()2034 ~VPBasicBlock() override {
2035 while (!Recipes.empty())
2036 Recipes.pop_back();
2037 }
2038
2039 /// Instruction iterators...
2040 using iterator = RecipeListTy::iterator;
2041 using const_iterator = RecipeListTy::const_iterator;
2042 using reverse_iterator = RecipeListTy::reverse_iterator;
2043 using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
2044
2045 //===--------------------------------------------------------------------===//
2046 /// Recipe iterator methods
2047 ///
begin()2048 inline iterator begin() { return Recipes.begin(); }
begin()2049 inline const_iterator begin() const { return Recipes.begin(); }
end()2050 inline iterator end() { return Recipes.end(); }
end()2051 inline const_iterator end() const { return Recipes.end(); }
2052
rbegin()2053 inline reverse_iterator rbegin() { return Recipes.rbegin(); }
rbegin()2054 inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
rend()2055 inline reverse_iterator rend() { return Recipes.rend(); }
rend()2056 inline const_reverse_iterator rend() const { return Recipes.rend(); }
2057
size()2058 inline size_t size() const { return Recipes.size(); }
empty()2059 inline bool empty() const { return Recipes.empty(); }
front()2060 inline const VPRecipeBase &front() const { return Recipes.front(); }
front()2061 inline VPRecipeBase &front() { return Recipes.front(); }
back()2062 inline const VPRecipeBase &back() const { return Recipes.back(); }
back()2063 inline VPRecipeBase &back() { return Recipes.back(); }
2064
2065 /// Returns a reference to the list of recipes.
getRecipeList()2066 RecipeListTy &getRecipeList() { return Recipes; }
2067
2068 /// Returns a pointer to a member of the recipe list.
getSublistAccess(VPRecipeBase *)2069 static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
2070 return &VPBasicBlock::Recipes;
2071 }
2072
2073 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPBlockBase * V)2074 static inline bool classof(const VPBlockBase *V) {
2075 return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
2076 }
2077
insert(VPRecipeBase * Recipe,iterator InsertPt)2078 void insert(VPRecipeBase *Recipe, iterator InsertPt) {
2079 assert(Recipe && "No recipe to append.");
2080 assert(!Recipe->Parent && "Recipe already in VPlan");
2081 Recipe->Parent = this;
2082 Recipes.insert(InsertPt, Recipe);
2083 }
2084
2085 /// Augment the existing recipes of a VPBasicBlock with an additional
2086 /// \p Recipe as the last recipe.
appendRecipe(VPRecipeBase * Recipe)2087 void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
2088
2089 /// The method which generates the output IR instructions that correspond to
2090 /// this VPBasicBlock, thereby "executing" the VPlan.
2091 void execute(struct VPTransformState *State) override;
2092
2093 /// Return the position of the first non-phi node recipe in the block.
2094 iterator getFirstNonPhi();
2095
2096 /// Returns an iterator range over the PHI-like recipes in the block.
phis()2097 iterator_range<iterator> phis() {
2098 return make_range(begin(), getFirstNonPhi());
2099 }
2100
2101 void dropAllReferences(VPValue *NewValue) override;
2102
2103 /// Split current block at \p SplitAt by inserting a new block between the
2104 /// current block and its successors and moving all recipes starting at
2105 /// SplitAt to the new block. Returns the new block.
2106 VPBasicBlock *splitAt(iterator SplitAt);
2107
2108 VPRegionBlock *getEnclosingLoopRegion();
2109
2110 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2111 /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
2112 /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
2113 ///
2114 /// Note that the numbering is applied to the whole VPlan, so printing
2115 /// individual blocks is consistent with the whole VPlan printing.
2116 void print(raw_ostream &O, const Twine &Indent,
2117 VPSlotTracker &SlotTracker) const override;
2118 using VPBlockBase::print; // Get the print(raw_stream &O) version.
2119 #endif
2120
2121 /// If the block has multiple successors, return the branch recipe terminating
2122 /// the block. If there are no or only a single successor, return nullptr;
2123 VPRecipeBase *getTerminator();
2124 const VPRecipeBase *getTerminator() const;
2125
2126 /// Returns true if the block is exiting it's parent region.
2127 bool isExiting() const;
2128
2129 private:
2130 /// Create an IR BasicBlock to hold the output instructions generated by this
2131 /// VPBasicBlock, and return it. Update the CFGState accordingly.
2132 BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
2133 };
2134
2135 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
2136 /// which form a Single-Entry-Single-Exiting subgraph of the output IR CFG.
2137 /// A VPRegionBlock may indicate that its contents are to be replicated several
2138 /// times. This is designed to support predicated scalarization, in which a
2139 /// scalar if-then code structure needs to be generated VF * UF times. Having
2140 /// this replication indicator helps to keep a single model for multiple
2141 /// candidate VF's. The actual replication takes place only once the desired VF
2142 /// and UF have been determined.
2143 class VPRegionBlock : public VPBlockBase {
2144 /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
2145 VPBlockBase *Entry;
2146
2147 /// Hold the Single Exiting block of the SESE region modelled by the
2148 /// VPRegionBlock.
2149 VPBlockBase *Exiting;
2150
2151 /// An indicator whether this region is to generate multiple replicated
2152 /// instances of output IR corresponding to its VPBlockBases.
2153 bool IsReplicator;
2154
2155 public:
2156 VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exiting,
2157 const std::string &Name = "", bool IsReplicator = false)
VPBlockBase(VPRegionBlockSC,Name)2158 : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exiting(Exiting),
2159 IsReplicator(IsReplicator) {
2160 assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
2161 assert(Exiting->getSuccessors().empty() && "Exit block has successors.");
2162 Entry->setParent(this);
2163 Exiting->setParent(this);
2164 }
2165 VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
VPBlockBase(VPRegionBlockSC,Name)2166 : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exiting(nullptr),
2167 IsReplicator(IsReplicator) {}
2168
~VPRegionBlock()2169 ~VPRegionBlock() override {
2170 if (Entry) {
2171 VPValue DummyValue;
2172 Entry->dropAllReferences(&DummyValue);
2173 deleteCFG(Entry);
2174 }
2175 }
2176
2177 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPBlockBase * V)2178 static inline bool classof(const VPBlockBase *V) {
2179 return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
2180 }
2181
getEntry()2182 const VPBlockBase *getEntry() const { return Entry; }
getEntry()2183 VPBlockBase *getEntry() { return Entry; }
2184
2185 /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
2186 /// EntryBlock must have no predecessors.
setEntry(VPBlockBase * EntryBlock)2187 void setEntry(VPBlockBase *EntryBlock) {
2188 assert(EntryBlock->getPredecessors().empty() &&
2189 "Entry block cannot have predecessors.");
2190 Entry = EntryBlock;
2191 EntryBlock->setParent(this);
2192 }
2193
2194 // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
2195 // specific interface of llvm::Function, instead of using
2196 // GraphTraints::getEntryNode. We should add a new template parameter to
2197 // DominatorTreeBase representing the Graph type.
front()2198 VPBlockBase &front() const { return *Entry; }
2199
getExiting()2200 const VPBlockBase *getExiting() const { return Exiting; }
getExiting()2201 VPBlockBase *getExiting() { return Exiting; }
2202
2203 /// Set \p ExitingBlock as the exiting VPBlockBase of this VPRegionBlock. \p
2204 /// ExitingBlock must have no successors.
setExiting(VPBlockBase * ExitingBlock)2205 void setExiting(VPBlockBase *ExitingBlock) {
2206 assert(ExitingBlock->getSuccessors().empty() &&
2207 "Exit block cannot have successors.");
2208 Exiting = ExitingBlock;
2209 ExitingBlock->setParent(this);
2210 }
2211
2212 /// Returns the pre-header VPBasicBlock of the loop region.
getPreheaderVPBB()2213 VPBasicBlock *getPreheaderVPBB() {
2214 assert(!isReplicator() && "should only get pre-header of loop regions");
2215 return getSinglePredecessor()->getExitingBasicBlock();
2216 }
2217
2218 /// An indicator whether this region is to generate multiple replicated
2219 /// instances of output IR corresponding to its VPBlockBases.
isReplicator()2220 bool isReplicator() const { return IsReplicator; }
2221
2222 /// The method which generates the output IR instructions that correspond to
2223 /// this VPRegionBlock, thereby "executing" the VPlan.
2224 void execute(struct VPTransformState *State) override;
2225
2226 void dropAllReferences(VPValue *NewValue) override;
2227
2228 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2229 /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
2230 /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
2231 /// consequtive numbers.
2232 ///
2233 /// Note that the numbering is applied to the whole VPlan, so printing
2234 /// individual regions is consistent with the whole VPlan printing.
2235 void print(raw_ostream &O, const Twine &Indent,
2236 VPSlotTracker &SlotTracker) const override;
2237 using VPBlockBase::print; // Get the print(raw_stream &O) version.
2238 #endif
2239 };
2240
2241 //===----------------------------------------------------------------------===//
2242 // GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs //
2243 //===----------------------------------------------------------------------===//
2244
2245 // The following set of template specializations implement GraphTraits to treat
2246 // any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
2247 // that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
2248 // VPBlockBase is a VPRegionBlock, this specialization provides access to its
2249 // successors/predecessors but not to the blocks inside the region.
2250
2251 template <> struct GraphTraits<VPBlockBase *> {
2252 using NodeRef = VPBlockBase *;
2253 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
2254
2255 static NodeRef getEntryNode(NodeRef N) { return N; }
2256
2257 static inline ChildIteratorType child_begin(NodeRef N) {
2258 return N->getSuccessors().begin();
2259 }
2260
2261 static inline ChildIteratorType child_end(NodeRef N) {
2262 return N->getSuccessors().end();
2263 }
2264 };
2265
2266 template <> struct GraphTraits<const VPBlockBase *> {
2267 using NodeRef = const VPBlockBase *;
2268 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;
2269
2270 static NodeRef getEntryNode(NodeRef N) { return N; }
2271
2272 static inline ChildIteratorType child_begin(NodeRef N) {
2273 return N->getSuccessors().begin();
2274 }
2275
2276 static inline ChildIteratorType child_end(NodeRef N) {
2277 return N->getSuccessors().end();
2278 }
2279 };
2280
2281 // Inverse order specialization for VPBasicBlocks. Predecessors are used instead
2282 // of successors for the inverse traversal.
2283 template <> struct GraphTraits<Inverse<VPBlockBase *>> {
2284 using NodeRef = VPBlockBase *;
2285 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
2286
2287 static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }
2288
2289 static inline ChildIteratorType child_begin(NodeRef N) {
2290 return N->getPredecessors().begin();
2291 }
2292
2293 static inline ChildIteratorType child_end(NodeRef N) {
2294 return N->getPredecessors().end();
2295 }
2296 };
2297
2298 // The following set of template specializations implement GraphTraits to
2299 // treat VPRegionBlock as a graph and recurse inside its nodes. It's important
2300 // to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
2301 // (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
2302 // there won't be automatic recursion into other VPBlockBases that turn to be
2303 // VPRegionBlocks.
2304
2305 template <>
2306 struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
2307 using GraphRef = VPRegionBlock *;
2308 using nodes_iterator = df_iterator<NodeRef>;
2309
2310 static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
2311
2312 static nodes_iterator nodes_begin(GraphRef N) {
2313 return nodes_iterator::begin(N->getEntry());
2314 }
2315
2316 static nodes_iterator nodes_end(GraphRef N) {
2317 // df_iterator::end() returns an empty iterator so the node used doesn't
2318 // matter.
2319 return nodes_iterator::end(N);
2320 }
2321 };
2322
2323 template <>
2324 struct GraphTraits<const VPRegionBlock *>
2325 : public GraphTraits<const VPBlockBase *> {
2326 using GraphRef = const VPRegionBlock *;
2327 using nodes_iterator = df_iterator<NodeRef>;
2328
2329 static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
2330
2331 static nodes_iterator nodes_begin(GraphRef N) {
2332 return nodes_iterator::begin(N->getEntry());
2333 }
2334
2335 static nodes_iterator nodes_end(GraphRef N) {
2336 // df_iterator::end() returns an empty iterator so the node used doesn't
2337 // matter.
2338 return nodes_iterator::end(N);
2339 }
2340 };
2341
2342 template <>
2343 struct GraphTraits<Inverse<VPRegionBlock *>>
2344 : public GraphTraits<Inverse<VPBlockBase *>> {
2345 using GraphRef = VPRegionBlock *;
2346 using nodes_iterator = df_iterator<NodeRef>;
2347
2348 static NodeRef getEntryNode(Inverse<GraphRef> N) {
2349 return N.Graph->getExiting();
2350 }
2351
2352 static nodes_iterator nodes_begin(GraphRef N) {
2353 return nodes_iterator::begin(N->getExiting());
2354 }
2355
2356 static nodes_iterator nodes_end(GraphRef N) {
2357 // df_iterator::end() returns an empty iterator so the node used doesn't
2358 // matter.
2359 return nodes_iterator::end(N);
2360 }
2361 };
2362
2363 /// Iterator to traverse all successors of a VPBlockBase node. This includes the
2364 /// entry node of VPRegionBlocks. Exit blocks of a region implicitly have their
2365 /// parent region's successors. This ensures all blocks in a region are visited
2366 /// before any blocks in a successor region when doing a reverse post-order
2367 // traversal of the graph.
2368 template <typename BlockPtrTy>
2369 class VPAllSuccessorsIterator
2370 : public iterator_facade_base<VPAllSuccessorsIterator<BlockPtrTy>,
2371 std::forward_iterator_tag, VPBlockBase> {
2372 BlockPtrTy Block;
2373 /// Index of the current successor. For VPBasicBlock nodes, this simply is the
2374 /// index for the successor array. For VPRegionBlock, SuccessorIdx == 0 is
2375 /// used for the region's entry block, and SuccessorIdx - 1 are the indices
2376 /// for the successor array.
2377 size_t SuccessorIdx;
2378
2379 static BlockPtrTy getBlockWithSuccs(BlockPtrTy Current) {
2380 while (Current && Current->getNumSuccessors() == 0)
2381 Current = Current->getParent();
2382 return Current;
2383 }
2384
2385 /// Templated helper to dereference successor \p SuccIdx of \p Block. Used by
2386 /// both the const and non-const operator* implementations.
2387 template <typename T1> static T1 deref(T1 Block, unsigned SuccIdx) {
2388 if (auto *R = dyn_cast<VPRegionBlock>(Block)) {
2389 if (SuccIdx == 0)
2390 return R->getEntry();
2391 SuccIdx--;
2392 }
2393
2394 // For exit blocks, use the next parent region with successors.
2395 return getBlockWithSuccs(Block)->getSuccessors()[SuccIdx];
2396 }
2397
2398 public:
2399 VPAllSuccessorsIterator(BlockPtrTy Block, size_t Idx = 0)
2400 : Block(Block), SuccessorIdx(Idx) {}
2401 VPAllSuccessorsIterator(const VPAllSuccessorsIterator &Other)
2402 : Block(Other.Block), SuccessorIdx(Other.SuccessorIdx) {}
2403
2404 VPAllSuccessorsIterator &operator=(const VPAllSuccessorsIterator &R) {
2405 Block = R.Block;
2406 SuccessorIdx = R.SuccessorIdx;
2407 return *this;
2408 }
2409
2410 static VPAllSuccessorsIterator end(BlockPtrTy Block) {
2411 BlockPtrTy ParentWithSuccs = getBlockWithSuccs(Block);
2412 unsigned NumSuccessors = ParentWithSuccs
2413 ? ParentWithSuccs->getNumSuccessors()
2414 : Block->getNumSuccessors();
2415
2416 if (auto *R = dyn_cast<VPRegionBlock>(Block))
2417 return {R, NumSuccessors + 1};
2418 return {Block, NumSuccessors};
2419 }
2420
2421 bool operator==(const VPAllSuccessorsIterator &R) const {
2422 return Block == R.Block && SuccessorIdx == R.SuccessorIdx;
2423 }
2424
2425 const VPBlockBase *operator*() const { return deref(Block, SuccessorIdx); }
2426
2427 BlockPtrTy operator*() { return deref(Block, SuccessorIdx); }
2428
2429 VPAllSuccessorsIterator &operator++() {
2430 SuccessorIdx++;
2431 return *this;
2432 }
2433
2434 VPAllSuccessorsIterator operator++(int X) {
2435 VPAllSuccessorsIterator Orig = *this;
2436 SuccessorIdx++;
2437 return Orig;
2438 }
2439 };
2440
2441 /// Helper for GraphTraits specialization that traverses through VPRegionBlocks.
2442 template <typename BlockTy> class VPBlockRecursiveTraversalWrapper {
2443 BlockTy Entry;
2444
2445 public:
2446 VPBlockRecursiveTraversalWrapper(BlockTy Entry) : Entry(Entry) {}
2447 BlockTy getEntry() { return Entry; }
2448 };
2449
2450 /// GraphTraits specialization to recursively traverse VPBlockBase nodes,
2451 /// including traversing through VPRegionBlocks. Exit blocks of a region
2452 /// implicitly have their parent region's successors. This ensures all blocks in
2453 /// a region are visited before any blocks in a successor region when doing a
2454 /// reverse post-order traversal of the graph.
2455 template <>
2456 struct GraphTraits<VPBlockRecursiveTraversalWrapper<VPBlockBase *>> {
2457 using NodeRef = VPBlockBase *;
2458 using ChildIteratorType = VPAllSuccessorsIterator<VPBlockBase *>;
2459
2460 static NodeRef
2461 getEntryNode(VPBlockRecursiveTraversalWrapper<VPBlockBase *> N) {
2462 return N.getEntry();
2463 }
2464
2465 static inline ChildIteratorType child_begin(NodeRef N) {
2466 return ChildIteratorType(N);
2467 }
2468
2469 static inline ChildIteratorType child_end(NodeRef N) {
2470 return ChildIteratorType::end(N);
2471 }
2472 };
2473
2474 template <>
2475 struct GraphTraits<VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> {
2476 using NodeRef = const VPBlockBase *;
2477 using ChildIteratorType = VPAllSuccessorsIterator<const VPBlockBase *>;
2478
2479 static NodeRef
2480 getEntryNode(VPBlockRecursiveTraversalWrapper<const VPBlockBase *> N) {
2481 return N.getEntry();
2482 }
2483
2484 static inline ChildIteratorType child_begin(NodeRef N) {
2485 return ChildIteratorType(N);
2486 }
2487
2488 static inline ChildIteratorType child_end(NodeRef N) {
2489 return ChildIteratorType::end(N);
2490 }
2491 };
2492
2493 /// VPlan models a candidate for vectorization, encoding various decisions take
2494 /// to produce efficient output IR, including which branches, basic-blocks and
2495 /// output IR instructions to generate, and their cost. VPlan holds a
2496 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
2497 /// VPBlock.
2498 class VPlan {
2499 friend class VPlanPrinter;
2500 friend class VPSlotTracker;
2501
2502 /// Hold the single entry to the Hierarchical CFG of the VPlan.
2503 VPBlockBase *Entry;
2504
2505 /// Holds the VFs applicable to this VPlan.
2506 SmallSetVector<ElementCount, 2> VFs;
2507
2508 /// Holds the name of the VPlan, for printing.
2509 std::string Name;
2510
2511 /// Holds all the external definitions created for this VPlan. External
2512 /// definitions must be immutable and hold a pointer to their underlying IR.
2513 DenseMap<Value *, VPValue *> VPExternalDefs;
2514
2515 /// Represents the trip count of the original loop, for folding
2516 /// the tail.
2517 VPValue *TripCount = nullptr;
2518
2519 /// Represents the backedge taken count of the original loop, for folding
2520 /// the tail. It equals TripCount - 1.
2521 VPValue *BackedgeTakenCount = nullptr;
2522
2523 /// Represents the vector trip count.
2524 VPValue VectorTripCount;
2525
2526 /// Holds a mapping between Values and their corresponding VPValue inside
2527 /// VPlan.
2528 Value2VPValueTy Value2VPValue;
2529
2530 /// Contains all VPValues that been allocated by addVPValue directly and need
2531 /// to be free when the plan's destructor is called.
2532 SmallVector<VPValue *, 16> VPValuesToFree;
2533
2534 /// Indicates whether it is safe use the Value2VPValue mapping or if the
2535 /// mapping cannot be used any longer, because it is stale.
2536 bool Value2VPValueEnabled = true;
2537
2538 /// Values used outside the plan.
2539 MapVector<PHINode *, VPLiveOut *> LiveOuts;
2540
2541 public:
2542 VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
2543 if (Entry)
2544 Entry->setPlan(this);
2545 }
2546
2547 ~VPlan() {
2548 clearLiveOuts();
2549
2550 if (Entry) {
2551 VPValue DummyValue;
2552 for (VPBlockBase *Block : depth_first(Entry))
2553 Block->dropAllReferences(&DummyValue);
2554
2555 VPBlockBase::deleteCFG(Entry);
2556 }
2557 for (VPValue *VPV : VPValuesToFree)
2558 delete VPV;
2559 if (TripCount)
2560 delete TripCount;
2561 if (BackedgeTakenCount)
2562 delete BackedgeTakenCount;
2563 for (auto &P : VPExternalDefs)
2564 delete P.second;
2565 }
2566
2567 /// Prepare the plan for execution, setting up the required live-in values.
2568 void prepareToExecute(Value *TripCount, Value *VectorTripCount,
2569 Value *CanonicalIVStartValue, VPTransformState &State,
2570 bool IsEpilogueVectorization);
2571
2572 /// Generate the IR code for this VPlan.
2573 void execute(struct VPTransformState *State);
2574
2575 VPBlockBase *getEntry() { return Entry; }
2576 const VPBlockBase *getEntry() const { return Entry; }
2577
2578 VPBlockBase *setEntry(VPBlockBase *Block) {
2579 Entry = Block;
2580 Block->setPlan(this);
2581 return Entry;
2582 }
2583
2584 /// The trip count of the original loop.
2585 VPValue *getOrCreateTripCount() {
2586 if (!TripCount)
2587 TripCount = new VPValue();
2588 return TripCount;
2589 }
2590
2591 /// The backedge taken count of the original loop.
2592 VPValue *getOrCreateBackedgeTakenCount() {
2593 if (!BackedgeTakenCount)
2594 BackedgeTakenCount = new VPValue();
2595 return BackedgeTakenCount;
2596 }
2597
2598 /// The vector trip count.
2599 VPValue &getVectorTripCount() { return VectorTripCount; }
2600
2601 /// Mark the plan to indicate that using Value2VPValue is not safe any
2602 /// longer, because it may be stale.
2603 void disableValue2VPValue() { Value2VPValueEnabled = false; }
2604
2605 void addVF(ElementCount VF) { VFs.insert(VF); }
2606
2607 bool hasVF(ElementCount VF) { return VFs.count(VF); }
2608
2609 const std::string &getName() const { return Name; }
2610
2611 void setName(const Twine &newName) { Name = newName.str(); }
2612
2613 /// Get the existing or add a new external definition for \p V.
2614 VPValue *getOrAddExternalDef(Value *V) {
2615 auto I = VPExternalDefs.insert({V, nullptr});
2616 if (I.second)
2617 I.first->second = new VPValue(V);
2618 return I.first->second;
2619 }
2620
2621 void addVPValue(Value *V) {
2622 assert(Value2VPValueEnabled &&
2623 "IR value to VPValue mapping may be out of date!");
2624 assert(V && "Trying to add a null Value to VPlan");
2625 assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2626 VPValue *VPV = new VPValue(V);
2627 Value2VPValue[V] = VPV;
2628 VPValuesToFree.push_back(VPV);
2629 }
2630
2631 void addVPValue(Value *V, VPValue *VPV) {
2632 assert(Value2VPValueEnabled && "Value2VPValue mapping may be out of date!");
2633 assert(V && "Trying to add a null Value to VPlan");
2634 assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2635 Value2VPValue[V] = VPV;
2636 }
2637
2638 /// Returns the VPValue for \p V. \p OverrideAllowed can be used to disable
2639 /// checking whether it is safe to query VPValues using IR Values.
2640 VPValue *getVPValue(Value *V, bool OverrideAllowed = false) {
2641 assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2642 "Value2VPValue mapping may be out of date!");
2643 assert(V && "Trying to get the VPValue of a null Value");
2644 assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2645 return Value2VPValue[V];
2646 }
2647
2648 /// Gets the VPValue or adds a new one (if none exists yet) for \p V. \p
2649 /// OverrideAllowed can be used to disable checking whether it is safe to
2650 /// query VPValues using IR Values.
2651 VPValue *getOrAddVPValue(Value *V, bool OverrideAllowed = false) {
2652 assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2653 "Value2VPValue mapping may be out of date!");
2654 assert(V && "Trying to get or add the VPValue of a null Value");
2655 if (!Value2VPValue.count(V))
2656 addVPValue(V);
2657 return getVPValue(V);
2658 }
2659
2660 void removeVPValueFor(Value *V) {
2661 assert(Value2VPValueEnabled &&
2662 "IR value to VPValue mapping may be out of date!");
2663 Value2VPValue.erase(V);
2664 }
2665
2666 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2667 /// Print this VPlan to \p O.
2668 void print(raw_ostream &O) const;
2669
2670 /// Print this VPlan in DOT format to \p O.
2671 void printDOT(raw_ostream &O) const;
2672
2673 /// Dump the plan to stderr (for debugging).
2674 LLVM_DUMP_METHOD void dump() const;
2675 #endif
2676
2677 /// Returns a range mapping the values the range \p Operands to their
2678 /// corresponding VPValues.
2679 iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2680 mapToVPValues(User::op_range Operands) {
2681 std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2682 return getOrAddVPValue(Op);
2683 };
2684 return map_range(Operands, Fn);
2685 }
2686
2687 /// Returns true if \p VPV is uniform after vectorization.
2688 bool isUniformAfterVectorization(VPValue *VPV) const {
2689 auto RepR = dyn_cast_or_null<VPReplicateRecipe>(VPV->getDef());
2690 return !VPV->getDef() || (RepR && RepR->isUniform());
2691 }
2692
2693 /// Returns the VPRegionBlock of the vector loop.
2694 VPRegionBlock *getVectorLoopRegion() {
2695 return cast<VPRegionBlock>(getEntry()->getSingleSuccessor());
2696 }
2697 const VPRegionBlock *getVectorLoopRegion() const {
2698 return cast<VPRegionBlock>(getEntry()->getSingleSuccessor());
2699 }
2700
2701 /// Returns the canonical induction recipe of the vector loop.
2702 VPCanonicalIVPHIRecipe *getCanonicalIV() {
2703 VPBasicBlock *EntryVPBB = getVectorLoopRegion()->getEntryBasicBlock();
2704 if (EntryVPBB->empty()) {
2705 // VPlan native path.
2706 EntryVPBB = cast<VPBasicBlock>(EntryVPBB->getSingleSuccessor());
2707 }
2708 return cast<VPCanonicalIVPHIRecipe>(&*EntryVPBB->begin());
2709 }
2710
2711 /// Find and return the VPActiveLaneMaskPHIRecipe from the header - there
2712 /// be only one at most. If there isn't one, then return nullptr.
2713 VPActiveLaneMaskPHIRecipe *getActiveLaneMaskPhi();
2714
2715 void addLiveOut(PHINode *PN, VPValue *V);
2716
2717 void clearLiveOuts() {
2718 for (auto &KV : LiveOuts)
2719 delete KV.second;
2720 LiveOuts.clear();
2721 }
2722
2723 void removeLiveOut(PHINode *PN) {
2724 delete LiveOuts[PN];
2725 LiveOuts.erase(PN);
2726 }
2727
2728 const MapVector<PHINode *, VPLiveOut *> &getLiveOuts() const {
2729 return LiveOuts;
2730 }
2731
2732 private:
2733 /// Add to the given dominator tree the header block and every new basic block
2734 /// that was created between it and the latch block, inclusive.
2735 static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2736 BasicBlock *LoopPreHeaderBB,
2737 BasicBlock *LoopExitBB);
2738 };
2739
2740 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2741 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2742 /// indented and follows the dot format.
2743 class VPlanPrinter {
2744 raw_ostream &OS;
2745 const VPlan &Plan;
2746 unsigned Depth = 0;
2747 unsigned TabWidth = 2;
2748 std::string Indent;
2749 unsigned BID = 0;
2750 SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2751
2752 VPSlotTracker SlotTracker;
2753
2754 /// Handle indentation.
2755 void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2756
2757 /// Print a given \p Block of the Plan.
2758 void dumpBlock(const VPBlockBase *Block);
2759
2760 /// Print the information related to the CFG edges going out of a given
2761 /// \p Block, followed by printing the successor blocks themselves.
2762 void dumpEdges(const VPBlockBase *Block);
2763
2764 /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2765 /// its successor blocks.
2766 void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2767
2768 /// Print a given \p Region of the Plan.
2769 void dumpRegion(const VPRegionBlock *Region);
2770
2771 unsigned getOrCreateBID(const VPBlockBase *Block) {
2772 return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2773 }
2774
2775 Twine getOrCreateName(const VPBlockBase *Block);
2776
2777 Twine getUID(const VPBlockBase *Block);
2778
2779 /// Print the information related to a CFG edge between two VPBlockBases.
2780 void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2781 const Twine &Label);
2782
2783 public:
2784 VPlanPrinter(raw_ostream &O, const VPlan &P)
2785 : OS(O), Plan(P), SlotTracker(&P) {}
2786
2787 LLVM_DUMP_METHOD void dump();
2788 };
2789
2790 struct VPlanIngredient {
2791 const Value *V;
2792
2793 VPlanIngredient(const Value *V) : V(V) {}
2794
2795 void print(raw_ostream &O) const;
2796 };
2797
2798 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2799 I.print(OS);
2800 return OS;
2801 }
2802
2803 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2804 Plan.print(OS);
2805 return OS;
2806 }
2807 #endif
2808
2809 //===----------------------------------------------------------------------===//
2810 // VPlan Utilities
2811 //===----------------------------------------------------------------------===//
2812
2813 /// Class that provides utilities for VPBlockBases in VPlan.
2814 class VPBlockUtils {
2815 public:
2816 VPBlockUtils() = delete;
2817
2818 /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2819 /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2820 /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. \p BlockPtr's
2821 /// successors are moved from \p BlockPtr to \p NewBlock. \p NewBlock must
2822 /// have neither successors nor predecessors.
2823 static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2824 assert(NewBlock->getSuccessors().empty() &&
2825 NewBlock->getPredecessors().empty() &&
2826 "Can't insert new block with predecessors or successors.");
2827 NewBlock->setParent(BlockPtr->getParent());
2828 SmallVector<VPBlockBase *> Succs(BlockPtr->successors());
2829 for (VPBlockBase *Succ : Succs) {
2830 disconnectBlocks(BlockPtr, Succ);
2831 connectBlocks(NewBlock, Succ);
2832 }
2833 connectBlocks(BlockPtr, NewBlock);
2834 }
2835
2836 /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2837 /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2838 /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2839 /// parent to \p IfTrue and \p IfFalse. \p BlockPtr must have no successors
2840 /// and \p IfTrue and \p IfFalse must have neither successors nor
2841 /// predecessors.
2842 static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2843 VPBlockBase *BlockPtr) {
2844 assert(IfTrue->getSuccessors().empty() &&
2845 "Can't insert IfTrue with successors.");
2846 assert(IfFalse->getSuccessors().empty() &&
2847 "Can't insert IfFalse with successors.");
2848 BlockPtr->setTwoSuccessors(IfTrue, IfFalse);
2849 IfTrue->setPredecessors({BlockPtr});
2850 IfFalse->setPredecessors({BlockPtr});
2851 IfTrue->setParent(BlockPtr->getParent());
2852 IfFalse->setParent(BlockPtr->getParent());
2853 }
2854
2855 /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2856 /// the successors of \p From and \p From to the predecessors of \p To. Both
2857 /// VPBlockBases must have the same parent, which can be null. Both
2858 /// VPBlockBases can be already connected to other VPBlockBases.
2859 static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2860 assert((From->getParent() == To->getParent()) &&
2861 "Can't connect two block with different parents");
2862 assert(From->getNumSuccessors() < 2 &&
2863 "Blocks can't have more than two successors.");
2864 From->appendSuccessor(To);
2865 To->appendPredecessor(From);
2866 }
2867
2868 /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2869 /// from the successors of \p From and \p From from the predecessors of \p To.
2870 static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2871 assert(To && "Successor to disconnect is null.");
2872 From->removeSuccessor(To);
2873 To->removePredecessor(From);
2874 }
2875
2876 /// Try to merge \p Block into its single predecessor, if \p Block is a
2877 /// VPBasicBlock and its predecessor has a single successor. Returns a pointer
2878 /// to the predecessor \p Block was merged into or nullptr otherwise.
2879 static VPBasicBlock *tryToMergeBlockIntoPredecessor(VPBlockBase *Block) {
2880 auto *VPBB = dyn_cast<VPBasicBlock>(Block);
2881 auto *PredVPBB =
2882 dyn_cast_or_null<VPBasicBlock>(Block->getSinglePredecessor());
2883 if (!VPBB || !PredVPBB || PredVPBB->getNumSuccessors() != 1)
2884 return nullptr;
2885
2886 for (VPRecipeBase &R : make_early_inc_range(*VPBB))
2887 R.moveBefore(*PredVPBB, PredVPBB->end());
2888 VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
2889 auto *ParentRegion = cast<VPRegionBlock>(Block->getParent());
2890 if (ParentRegion->getExiting() == Block)
2891 ParentRegion->setExiting(PredVPBB);
2892 SmallVector<VPBlockBase *> Successors(Block->successors());
2893 for (auto *Succ : Successors) {
2894 VPBlockUtils::disconnectBlocks(Block, Succ);
2895 VPBlockUtils::connectBlocks(PredVPBB, Succ);
2896 }
2897 delete Block;
2898 return PredVPBB;
2899 }
2900
2901 /// Return an iterator range over \p Range which only includes \p BlockTy
2902 /// blocks. The accesses are casted to \p BlockTy.
2903 template <typename BlockTy, typename T>
2904 static auto blocksOnly(const T &Range) {
2905 // Create BaseTy with correct const-ness based on BlockTy.
2906 using BaseTy =
2907 typename std::conditional<std::is_const<BlockTy>::value,
2908 const VPBlockBase, VPBlockBase>::type;
2909
2910 // We need to first create an iterator range over (const) BlocktTy & instead
2911 // of (const) BlockTy * for filter_range to work properly.
2912 auto Mapped =
2913 map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2914 auto Filter = make_filter_range(
2915 Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2916 return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2917 return cast<BlockTy>(&Block);
2918 });
2919 }
2920 };
2921
2922 class VPInterleavedAccessInfo {
2923 DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2924 InterleaveGroupMap;
2925
2926 /// Type for mapping of instruction based interleave groups to VPInstruction
2927 /// interleave groups
2928 using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2929 InterleaveGroup<VPInstruction> *>;
2930
2931 /// Recursively \p Region and populate VPlan based interleave groups based on
2932 /// \p IAI.
2933 void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2934 InterleavedAccessInfo &IAI);
2935 /// Recursively traverse \p Block and populate VPlan based interleave groups
2936 /// based on \p IAI.
2937 void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
2938 InterleavedAccessInfo &IAI);
2939
2940 public:
2941 VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
2942
2943 ~VPInterleavedAccessInfo() {
2944 SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
2945 // Avoid releasing a pointer twice.
2946 for (auto &I : InterleaveGroupMap)
2947 DelSet.insert(I.second);
2948 for (auto *Ptr : DelSet)
2949 delete Ptr;
2950 }
2951
2952 /// Get the interleave group that \p Instr belongs to.
2953 ///
2954 /// \returns nullptr if doesn't have such group.
2955 InterleaveGroup<VPInstruction> *
2956 getInterleaveGroup(VPInstruction *Instr) const {
2957 return InterleaveGroupMap.lookup(Instr);
2958 }
2959 };
2960
2961 /// Class that maps (parts of) an existing VPlan to trees of combined
2962 /// VPInstructions.
2963 class VPlanSlp {
2964 enum class OpMode { Failed, Load, Opcode };
2965
2966 /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
2967 /// DenseMap keys.
2968 struct BundleDenseMapInfo {
2969 static SmallVector<VPValue *, 4> getEmptyKey() {
2970 return {reinterpret_cast<VPValue *>(-1)};
2971 }
2972
2973 static SmallVector<VPValue *, 4> getTombstoneKey() {
2974 return {reinterpret_cast<VPValue *>(-2)};
2975 }
2976
2977 static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
2978 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2979 }
2980
2981 static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
2982 const SmallVector<VPValue *, 4> &RHS) {
2983 return LHS == RHS;
2984 }
2985 };
2986
2987 /// Mapping of values in the original VPlan to a combined VPInstruction.
2988 DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
2989 BundleToCombined;
2990
2991 VPInterleavedAccessInfo &IAI;
2992
2993 /// Basic block to operate on. For now, only instructions in a single BB are
2994 /// considered.
2995 const VPBasicBlock &BB;
2996
2997 /// Indicates whether we managed to combine all visited instructions or not.
2998 bool CompletelySLP = true;
2999
3000 /// Width of the widest combined bundle in bits.
3001 unsigned WidestBundleBits = 0;
3002
3003 using MultiNodeOpTy =
3004 typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
3005
3006 // Input operand bundles for the current multi node. Each multi node operand
3007 // bundle contains values not matching the multi node's opcode. They will
3008 // be reordered in reorderMultiNodeOps, once we completed building a
3009 // multi node.
3010 SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
3011
3012 /// Indicates whether we are building a multi node currently.
3013 bool MultiNodeActive = false;
3014
3015 /// Check if we can vectorize Operands together.
3016 bool areVectorizable(ArrayRef<VPValue *> Operands) const;
3017
3018 /// Add combined instruction \p New for the bundle \p Operands.
3019 void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
3020
3021 /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
3022 VPInstruction *markFailed();
3023
3024 /// Reorder operands in the multi node to maximize sequential memory access
3025 /// and commutative operations.
3026 SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
3027
3028 /// Choose the best candidate to use for the lane after \p Last. The set of
3029 /// candidates to choose from are values with an opcode matching \p Last's
3030 /// or loads consecutive to \p Last.
3031 std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
3032 SmallPtrSetImpl<VPValue *> &Candidates,
3033 VPInterleavedAccessInfo &IAI);
3034
3035 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3036 /// Print bundle \p Values to dbgs().
3037 void dumpBundle(ArrayRef<VPValue *> Values);
3038 #endif
3039
3040 public:
3041 VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
3042
3043 ~VPlanSlp() = default;
3044
3045 /// Tries to build an SLP tree rooted at \p Operands and returns a
3046 /// VPInstruction combining \p Operands, if they can be combined.
3047 VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
3048
3049 /// Return the width of the widest combined bundle in bits.
3050 unsigned getWidestBundleBits() const { return WidestBundleBits; }
3051
3052 /// Return true if all visited instruction can be combined.
3053 bool isCompletelySLP() const { return CompletelySLP; }
3054 };
3055
3056 namespace vputils {
3057
3058 /// Returns true if only the first lane of \p Def is used.
3059 bool onlyFirstLaneUsed(VPValue *Def);
3060
3061 /// Get or create a VPValue that corresponds to the expansion of \p Expr. If \p
3062 /// Expr is a SCEVConstant or SCEVUnknown, return a VPValue wrapping the live-in
3063 /// value. Otherwise return a VPExpandSCEVRecipe to expand \p Expr. If \p Plan's
3064 /// pre-header already contains a recipe expanding \p Expr, return it. If not,
3065 /// create a new one.
3066 VPValue *getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
3067 ScalarEvolution &SE);
3068 } // end namespace vputils
3069
3070 } // end namespace llvm
3071
3072 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
3073