1 //===-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the SystemZ target.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "SystemZTargetMachine.h"
14 #include "SystemZISelLowering.h"
15 #include "llvm/Analysis/AliasAnalysis.h"
16 #include "llvm/CodeGen/SelectionDAGISel.h"
17 #include "llvm/Support/Debug.h"
18 #include "llvm/Support/KnownBits.h"
19 #include "llvm/Support/raw_ostream.h"
20
21 using namespace llvm;
22
23 #define DEBUG_TYPE "systemz-isel"
24
25 namespace {
26 // Used to build addressing modes.
27 struct SystemZAddressingMode {
28 // The shape of the address.
29 enum AddrForm {
30 // base+displacement
31 FormBD,
32
33 // base+displacement+index for load and store operands
34 FormBDXNormal,
35
36 // base+displacement+index for load address operands
37 FormBDXLA,
38
39 // base+displacement+index+ADJDYNALLOC
40 FormBDXDynAlloc
41 };
42 AddrForm Form;
43
44 // The type of displacement. The enum names here correspond directly
45 // to the definitions in SystemZOperand.td. We could split them into
46 // flags -- single/pair, 128-bit, etc. -- but it hardly seems worth it.
47 enum DispRange {
48 Disp12Only,
49 Disp12Pair,
50 Disp20Only,
51 Disp20Only128,
52 Disp20Pair
53 };
54 DispRange DR;
55
56 // The parts of the address. The address is equivalent to:
57 //
58 // Base + Disp + Index + (IncludesDynAlloc ? ADJDYNALLOC : 0)
59 SDValue Base;
60 int64_t Disp;
61 SDValue Index;
62 bool IncludesDynAlloc;
63
SystemZAddressingMode__anon6e09402b0111::SystemZAddressingMode64 SystemZAddressingMode(AddrForm form, DispRange dr)
65 : Form(form), DR(dr), Disp(0), IncludesDynAlloc(false) {}
66
67 // True if the address can have an index register.
hasIndexField__anon6e09402b0111::SystemZAddressingMode68 bool hasIndexField() { return Form != FormBD; }
69
70 // True if the address can (and must) include ADJDYNALLOC.
isDynAlloc__anon6e09402b0111::SystemZAddressingMode71 bool isDynAlloc() { return Form == FormBDXDynAlloc; }
72
dump__anon6e09402b0111::SystemZAddressingMode73 void dump(const llvm::SelectionDAG *DAG) {
74 errs() << "SystemZAddressingMode " << this << '\n';
75
76 errs() << " Base ";
77 if (Base.getNode())
78 Base.getNode()->dump(DAG);
79 else
80 errs() << "null\n";
81
82 if (hasIndexField()) {
83 errs() << " Index ";
84 if (Index.getNode())
85 Index.getNode()->dump(DAG);
86 else
87 errs() << "null\n";
88 }
89
90 errs() << " Disp " << Disp;
91 if (IncludesDynAlloc)
92 errs() << " + ADJDYNALLOC";
93 errs() << '\n';
94 }
95 };
96
97 // Return a mask with Count low bits set.
allOnes(unsigned int Count)98 static uint64_t allOnes(unsigned int Count) {
99 assert(Count <= 64);
100 if (Count > 63)
101 return UINT64_MAX;
102 return (uint64_t(1) << Count) - 1;
103 }
104
105 // Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation
106 // given by Opcode. The operands are: Input (R2), Start (I3), End (I4) and
107 // Rotate (I5). The combined operand value is effectively:
108 //
109 // (or (rotl Input, Rotate), ~Mask)
110 //
111 // for RNSBG and:
112 //
113 // (and (rotl Input, Rotate), Mask)
114 //
115 // otherwise. The output value has BitSize bits, although Input may be
116 // narrower (in which case the upper bits are don't care), or wider (in which
117 // case the result will be truncated as part of the operation).
118 struct RxSBGOperands {
RxSBGOperands__anon6e09402b0111::RxSBGOperands119 RxSBGOperands(unsigned Op, SDValue N)
120 : Opcode(Op), BitSize(N.getValueSizeInBits()),
121 Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63),
122 Rotate(0) {}
123
124 unsigned Opcode;
125 unsigned BitSize;
126 uint64_t Mask;
127 SDValue Input;
128 unsigned Start;
129 unsigned End;
130 unsigned Rotate;
131 };
132
133 class SystemZDAGToDAGISel : public SelectionDAGISel {
134 const SystemZSubtarget *Subtarget;
135
136 // Used by SystemZOperands.td to create integer constants.
getImm(const SDNode * Node,uint64_t Imm) const137 inline SDValue getImm(const SDNode *Node, uint64_t Imm) const {
138 return CurDAG->getTargetConstant(Imm, SDLoc(Node), Node->getValueType(0));
139 }
140
getTargetMachine() const141 const SystemZTargetMachine &getTargetMachine() const {
142 return static_cast<const SystemZTargetMachine &>(TM);
143 }
144
getInstrInfo() const145 const SystemZInstrInfo *getInstrInfo() const {
146 return Subtarget->getInstrInfo();
147 }
148
149 // Try to fold more of the base or index of AM into AM, where IsBase
150 // selects between the base and index.
151 bool expandAddress(SystemZAddressingMode &AM, bool IsBase) const;
152
153 // Try to describe N in AM, returning true on success.
154 bool selectAddress(SDValue N, SystemZAddressingMode &AM) const;
155
156 // Extract individual target operands from matched address AM.
157 void getAddressOperands(const SystemZAddressingMode &AM, EVT VT,
158 SDValue &Base, SDValue &Disp) const;
159 void getAddressOperands(const SystemZAddressingMode &AM, EVT VT,
160 SDValue &Base, SDValue &Disp, SDValue &Index) const;
161
162 // Try to match Addr as a FormBD address with displacement type DR.
163 // Return true on success, storing the base and displacement in
164 // Base and Disp respectively.
165 bool selectBDAddr(SystemZAddressingMode::DispRange DR, SDValue Addr,
166 SDValue &Base, SDValue &Disp) const;
167
168 // Try to match Addr as a FormBDX address with displacement type DR.
169 // Return true on success and if the result had no index. Store the
170 // base and displacement in Base and Disp respectively.
171 bool selectMVIAddr(SystemZAddressingMode::DispRange DR, SDValue Addr,
172 SDValue &Base, SDValue &Disp) const;
173
174 // Try to match Addr as a FormBDX* address of form Form with
175 // displacement type DR. Return true on success, storing the base,
176 // displacement and index in Base, Disp and Index respectively.
177 bool selectBDXAddr(SystemZAddressingMode::AddrForm Form,
178 SystemZAddressingMode::DispRange DR, SDValue Addr,
179 SDValue &Base, SDValue &Disp, SDValue &Index) const;
180
181 // PC-relative address matching routines used by SystemZOperands.td.
selectPCRelAddress(SDValue Addr,SDValue & Target) const182 bool selectPCRelAddress(SDValue Addr, SDValue &Target) const {
183 if (SystemZISD::isPCREL(Addr.getOpcode())) {
184 Target = Addr.getOperand(0);
185 return true;
186 }
187 return false;
188 }
189
190 // BD matching routines used by SystemZOperands.td.
selectBDAddr12Only(SDValue Addr,SDValue & Base,SDValue & Disp) const191 bool selectBDAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp) const {
192 return selectBDAddr(SystemZAddressingMode::Disp12Only, Addr, Base, Disp);
193 }
selectBDAddr12Pair(SDValue Addr,SDValue & Base,SDValue & Disp) const194 bool selectBDAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
195 return selectBDAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp);
196 }
selectBDAddr20Only(SDValue Addr,SDValue & Base,SDValue & Disp) const197 bool selectBDAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp) const {
198 return selectBDAddr(SystemZAddressingMode::Disp20Only, Addr, Base, Disp);
199 }
selectBDAddr20Pair(SDValue Addr,SDValue & Base,SDValue & Disp) const200 bool selectBDAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
201 return selectBDAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp);
202 }
203
204 // MVI matching routines used by SystemZOperands.td.
selectMVIAddr12Pair(SDValue Addr,SDValue & Base,SDValue & Disp) const205 bool selectMVIAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
206 return selectMVIAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp);
207 }
selectMVIAddr20Pair(SDValue Addr,SDValue & Base,SDValue & Disp) const208 bool selectMVIAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
209 return selectMVIAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp);
210 }
211
212 // BDX matching routines used by SystemZOperands.td.
selectBDXAddr12Only(SDValue Addr,SDValue & Base,SDValue & Disp,SDValue & Index) const213 bool selectBDXAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp,
214 SDValue &Index) const {
215 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
216 SystemZAddressingMode::Disp12Only,
217 Addr, Base, Disp, Index);
218 }
selectBDXAddr12Pair(SDValue Addr,SDValue & Base,SDValue & Disp,SDValue & Index) const219 bool selectBDXAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
220 SDValue &Index) const {
221 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
222 SystemZAddressingMode::Disp12Pair,
223 Addr, Base, Disp, Index);
224 }
selectDynAlloc12Only(SDValue Addr,SDValue & Base,SDValue & Disp,SDValue & Index) const225 bool selectDynAlloc12Only(SDValue Addr, SDValue &Base, SDValue &Disp,
226 SDValue &Index) const {
227 return selectBDXAddr(SystemZAddressingMode::FormBDXDynAlloc,
228 SystemZAddressingMode::Disp12Only,
229 Addr, Base, Disp, Index);
230 }
selectBDXAddr20Only(SDValue Addr,SDValue & Base,SDValue & Disp,SDValue & Index) const231 bool selectBDXAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp,
232 SDValue &Index) const {
233 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
234 SystemZAddressingMode::Disp20Only,
235 Addr, Base, Disp, Index);
236 }
selectBDXAddr20Only128(SDValue Addr,SDValue & Base,SDValue & Disp,SDValue & Index) const237 bool selectBDXAddr20Only128(SDValue Addr, SDValue &Base, SDValue &Disp,
238 SDValue &Index) const {
239 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
240 SystemZAddressingMode::Disp20Only128,
241 Addr, Base, Disp, Index);
242 }
selectBDXAddr20Pair(SDValue Addr,SDValue & Base,SDValue & Disp,SDValue & Index) const243 bool selectBDXAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
244 SDValue &Index) const {
245 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
246 SystemZAddressingMode::Disp20Pair,
247 Addr, Base, Disp, Index);
248 }
selectLAAddr12Pair(SDValue Addr,SDValue & Base,SDValue & Disp,SDValue & Index) const249 bool selectLAAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
250 SDValue &Index) const {
251 return selectBDXAddr(SystemZAddressingMode::FormBDXLA,
252 SystemZAddressingMode::Disp12Pair,
253 Addr, Base, Disp, Index);
254 }
selectLAAddr20Pair(SDValue Addr,SDValue & Base,SDValue & Disp,SDValue & Index) const255 bool selectLAAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
256 SDValue &Index) const {
257 return selectBDXAddr(SystemZAddressingMode::FormBDXLA,
258 SystemZAddressingMode::Disp20Pair,
259 Addr, Base, Disp, Index);
260 }
261
262 // Try to match Addr as an address with a base, 12-bit displacement
263 // and index, where the index is element Elem of a vector.
264 // Return true on success, storing the base, displacement and vector
265 // in Base, Disp and Index respectively.
266 bool selectBDVAddr12Only(SDValue Addr, SDValue Elem, SDValue &Base,
267 SDValue &Disp, SDValue &Index) const;
268
269 // Check whether (or Op (and X InsertMask)) is effectively an insertion
270 // of X into bits InsertMask of some Y != Op. Return true if so and
271 // set Op to that Y.
272 bool detectOrAndInsertion(SDValue &Op, uint64_t InsertMask) const;
273
274 // Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used.
275 // Return true on success.
276 bool refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask) const;
277
278 // Try to fold some of RxSBG.Input into other fields of RxSBG.
279 // Return true on success.
280 bool expandRxSBG(RxSBGOperands &RxSBG) const;
281
282 // Return an undefined value of type VT.
283 SDValue getUNDEF(const SDLoc &DL, EVT VT) const;
284
285 // Convert N to VT, if it isn't already.
286 SDValue convertTo(const SDLoc &DL, EVT VT, SDValue N) const;
287
288 // Try to implement AND or shift node N using RISBG with the zero flag set.
289 // Return the selected node on success, otherwise return null.
290 bool tryRISBGZero(SDNode *N);
291
292 // Try to use RISBG or Opcode to implement OR or XOR node N.
293 // Return the selected node on success, otherwise return null.
294 bool tryRxSBG(SDNode *N, unsigned Opcode);
295
296 // If Op0 is null, then Node is a constant that can be loaded using:
297 //
298 // (Opcode UpperVal LowerVal)
299 //
300 // If Op0 is nonnull, then Node can be implemented using:
301 //
302 // (Opcode (Opcode Op0 UpperVal) LowerVal)
303 void splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0,
304 uint64_t UpperVal, uint64_t LowerVal);
305
306 void loadVectorConstant(const SystemZVectorConstantInfo &VCI,
307 SDNode *Node);
308
309 // Try to use gather instruction Opcode to implement vector insertion N.
310 bool tryGather(SDNode *N, unsigned Opcode);
311
312 // Try to use scatter instruction Opcode to implement store Store.
313 bool tryScatter(StoreSDNode *Store, unsigned Opcode);
314
315 // Change a chain of {load; op; store} of the same value into a simple op
316 // through memory of that value, if the uses of the modified value and its
317 // address are suitable.
318 bool tryFoldLoadStoreIntoMemOperand(SDNode *Node);
319
320 // Return true if Load and Store are loads and stores of the same size
321 // and are guaranteed not to overlap. Such operations can be implemented
322 // using block (SS-format) instructions.
323 //
324 // Partial overlap would lead to incorrect code, since the block operations
325 // are logically bytewise, even though they have a fast path for the
326 // non-overlapping case. We also need to avoid full overlap (i.e. two
327 // addresses that might be equal at run time) because although that case
328 // would be handled correctly, it might be implemented by millicode.
329 bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load) const;
330
331 // N is a (store (load Y), X) pattern. Return true if it can use an MVC
332 // from Y to X.
333 bool storeLoadCanUseMVC(SDNode *N) const;
334
335 // N is a (store (op (load A[0]), (load A[1])), X) pattern. Return true
336 // if A[1 - I] == X and if N can use a block operation like NC from A[I]
337 // to X.
338 bool storeLoadCanUseBlockBinary(SDNode *N, unsigned I) const;
339
340 // Return true if N (a load or a store) fullfills the alignment
341 // requirements for a PC-relative access.
342 bool storeLoadIsAligned(SDNode *N) const;
343
344 // Try to expand a boolean SELECT_CCMASK using an IPM sequence.
345 SDValue expandSelectBoolean(SDNode *Node);
346
347 public:
SystemZDAGToDAGISel(SystemZTargetMachine & TM,CodeGenOpt::Level OptLevel)348 SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel)
349 : SelectionDAGISel(TM, OptLevel) {}
350
runOnMachineFunction(MachineFunction & MF)351 bool runOnMachineFunction(MachineFunction &MF) override {
352 const Function &F = MF.getFunction();
353 if (F.getFnAttribute("fentry-call").getValueAsString() != "true") {
354 if (F.hasFnAttribute("mnop-mcount"))
355 report_fatal_error("mnop-mcount only supported with fentry-call");
356 if (F.hasFnAttribute("mrecord-mcount"))
357 report_fatal_error("mrecord-mcount only supported with fentry-call");
358 }
359
360 Subtarget = &MF.getSubtarget<SystemZSubtarget>();
361 return SelectionDAGISel::runOnMachineFunction(MF);
362 }
363
364 // Override MachineFunctionPass.
getPassName() const365 StringRef getPassName() const override {
366 return "SystemZ DAG->DAG Pattern Instruction Selection";
367 }
368
369 // Override SelectionDAGISel.
370 void Select(SDNode *Node) override;
371 bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
372 std::vector<SDValue> &OutOps) override;
373 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
374 void PreprocessISelDAG() override;
375
376 // Include the pieces autogenerated from the target description.
377 #include "SystemZGenDAGISel.inc"
378 };
379 } // end anonymous namespace
380
createSystemZISelDag(SystemZTargetMachine & TM,CodeGenOpt::Level OptLevel)381 FunctionPass *llvm::createSystemZISelDag(SystemZTargetMachine &TM,
382 CodeGenOpt::Level OptLevel) {
383 return new SystemZDAGToDAGISel(TM, OptLevel);
384 }
385
386 // Return true if Val should be selected as a displacement for an address
387 // with range DR. Here we're interested in the range of both the instruction
388 // described by DR and of any pairing instruction.
selectDisp(SystemZAddressingMode::DispRange DR,int64_t Val)389 static bool selectDisp(SystemZAddressingMode::DispRange DR, int64_t Val) {
390 switch (DR) {
391 case SystemZAddressingMode::Disp12Only:
392 return isUInt<12>(Val);
393
394 case SystemZAddressingMode::Disp12Pair:
395 case SystemZAddressingMode::Disp20Only:
396 case SystemZAddressingMode::Disp20Pair:
397 return isInt<20>(Val);
398
399 case SystemZAddressingMode::Disp20Only128:
400 return isInt<20>(Val) && isInt<20>(Val + 8);
401 }
402 llvm_unreachable("Unhandled displacement range");
403 }
404
405 // Change the base or index in AM to Value, where IsBase selects
406 // between the base and index.
changeComponent(SystemZAddressingMode & AM,bool IsBase,SDValue Value)407 static void changeComponent(SystemZAddressingMode &AM, bool IsBase,
408 SDValue Value) {
409 if (IsBase)
410 AM.Base = Value;
411 else
412 AM.Index = Value;
413 }
414
415 // The base or index of AM is equivalent to Value + ADJDYNALLOC,
416 // where IsBase selects between the base and index. Try to fold the
417 // ADJDYNALLOC into AM.
expandAdjDynAlloc(SystemZAddressingMode & AM,bool IsBase,SDValue Value)418 static bool expandAdjDynAlloc(SystemZAddressingMode &AM, bool IsBase,
419 SDValue Value) {
420 if (AM.isDynAlloc() && !AM.IncludesDynAlloc) {
421 changeComponent(AM, IsBase, Value);
422 AM.IncludesDynAlloc = true;
423 return true;
424 }
425 return false;
426 }
427
428 // The base of AM is equivalent to Base + Index. Try to use Index as
429 // the index register.
expandIndex(SystemZAddressingMode & AM,SDValue Base,SDValue Index)430 static bool expandIndex(SystemZAddressingMode &AM, SDValue Base,
431 SDValue Index) {
432 if (AM.hasIndexField() && !AM.Index.getNode()) {
433 AM.Base = Base;
434 AM.Index = Index;
435 return true;
436 }
437 return false;
438 }
439
440 // The base or index of AM is equivalent to Op0 + Op1, where IsBase selects
441 // between the base and index. Try to fold Op1 into AM's displacement.
expandDisp(SystemZAddressingMode & AM,bool IsBase,SDValue Op0,uint64_t Op1)442 static bool expandDisp(SystemZAddressingMode &AM, bool IsBase,
443 SDValue Op0, uint64_t Op1) {
444 // First try adjusting the displacement.
445 int64_t TestDisp = AM.Disp + Op1;
446 if (selectDisp(AM.DR, TestDisp)) {
447 changeComponent(AM, IsBase, Op0);
448 AM.Disp = TestDisp;
449 return true;
450 }
451
452 // We could consider forcing the displacement into a register and
453 // using it as an index, but it would need to be carefully tuned.
454 return false;
455 }
456
expandAddress(SystemZAddressingMode & AM,bool IsBase) const457 bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode &AM,
458 bool IsBase) const {
459 SDValue N = IsBase ? AM.Base : AM.Index;
460 unsigned Opcode = N.getOpcode();
461 if (Opcode == ISD::TRUNCATE) {
462 N = N.getOperand(0);
463 Opcode = N.getOpcode();
464 }
465 if (Opcode == ISD::ADD || CurDAG->isBaseWithConstantOffset(N)) {
466 SDValue Op0 = N.getOperand(0);
467 SDValue Op1 = N.getOperand(1);
468
469 unsigned Op0Code = Op0->getOpcode();
470 unsigned Op1Code = Op1->getOpcode();
471
472 if (Op0Code == SystemZISD::ADJDYNALLOC)
473 return expandAdjDynAlloc(AM, IsBase, Op1);
474 if (Op1Code == SystemZISD::ADJDYNALLOC)
475 return expandAdjDynAlloc(AM, IsBase, Op0);
476
477 if (Op0Code == ISD::Constant)
478 return expandDisp(AM, IsBase, Op1,
479 cast<ConstantSDNode>(Op0)->getSExtValue());
480 if (Op1Code == ISD::Constant)
481 return expandDisp(AM, IsBase, Op0,
482 cast<ConstantSDNode>(Op1)->getSExtValue());
483
484 if (IsBase && expandIndex(AM, Op0, Op1))
485 return true;
486 }
487 if (Opcode == SystemZISD::PCREL_OFFSET) {
488 SDValue Full = N.getOperand(0);
489 SDValue Base = N.getOperand(1);
490 SDValue Anchor = Base.getOperand(0);
491 uint64_t Offset = (cast<GlobalAddressSDNode>(Full)->getOffset() -
492 cast<GlobalAddressSDNode>(Anchor)->getOffset());
493 return expandDisp(AM, IsBase, Base, Offset);
494 }
495 return false;
496 }
497
498 // Return true if an instruction with displacement range DR should be
499 // used for displacement value Val. selectDisp(DR, Val) must already hold.
isValidDisp(SystemZAddressingMode::DispRange DR,int64_t Val)500 static bool isValidDisp(SystemZAddressingMode::DispRange DR, int64_t Val) {
501 assert(selectDisp(DR, Val) && "Invalid displacement");
502 switch (DR) {
503 case SystemZAddressingMode::Disp12Only:
504 case SystemZAddressingMode::Disp20Only:
505 case SystemZAddressingMode::Disp20Only128:
506 return true;
507
508 case SystemZAddressingMode::Disp12Pair:
509 // Use the other instruction if the displacement is too large.
510 return isUInt<12>(Val);
511
512 case SystemZAddressingMode::Disp20Pair:
513 // Use the other instruction if the displacement is small enough.
514 return !isUInt<12>(Val);
515 }
516 llvm_unreachable("Unhandled displacement range");
517 }
518
519 // Return true if Base + Disp + Index should be performed by LA(Y).
shouldUseLA(SDNode * Base,int64_t Disp,SDNode * Index)520 static bool shouldUseLA(SDNode *Base, int64_t Disp, SDNode *Index) {
521 // Don't use LA(Y) for constants.
522 if (!Base)
523 return false;
524
525 // Always use LA(Y) for frame addresses, since we know that the destination
526 // register is almost always (perhaps always) going to be different from
527 // the frame register.
528 if (Base->getOpcode() == ISD::FrameIndex)
529 return true;
530
531 if (Disp) {
532 // Always use LA(Y) if there is a base, displacement and index.
533 if (Index)
534 return true;
535
536 // Always use LA if the displacement is small enough. It should always
537 // be no worse than AGHI (and better if it avoids a move).
538 if (isUInt<12>(Disp))
539 return true;
540
541 // For similar reasons, always use LAY if the constant is too big for AGHI.
542 // LAY should be no worse than AGFI.
543 if (!isInt<16>(Disp))
544 return true;
545 } else {
546 // Don't use LA for plain registers.
547 if (!Index)
548 return false;
549
550 // Don't use LA for plain addition if the index operand is only used
551 // once. It should be a natural two-operand addition in that case.
552 if (Index->hasOneUse())
553 return false;
554
555 // Prefer addition if the second operation is sign-extended, in the
556 // hope of using AGF.
557 unsigned IndexOpcode = Index->getOpcode();
558 if (IndexOpcode == ISD::SIGN_EXTEND ||
559 IndexOpcode == ISD::SIGN_EXTEND_INREG)
560 return false;
561 }
562
563 // Don't use LA for two-operand addition if either operand is only
564 // used once. The addition instructions are better in that case.
565 if (Base->hasOneUse())
566 return false;
567
568 return true;
569 }
570
571 // Return true if Addr is suitable for AM, updating AM if so.
selectAddress(SDValue Addr,SystemZAddressingMode & AM) const572 bool SystemZDAGToDAGISel::selectAddress(SDValue Addr,
573 SystemZAddressingMode &AM) const {
574 // Start out assuming that the address will need to be loaded separately,
575 // then try to extend it as much as we can.
576 AM.Base = Addr;
577
578 // First try treating the address as a constant.
579 if (Addr.getOpcode() == ISD::Constant &&
580 expandDisp(AM, true, SDValue(),
581 cast<ConstantSDNode>(Addr)->getSExtValue()))
582 ;
583 // Also see if it's a bare ADJDYNALLOC.
584 else if (Addr.getOpcode() == SystemZISD::ADJDYNALLOC &&
585 expandAdjDynAlloc(AM, true, SDValue()))
586 ;
587 else
588 // Otherwise try expanding each component.
589 while (expandAddress(AM, true) ||
590 (AM.Index.getNode() && expandAddress(AM, false)))
591 continue;
592
593 // Reject cases where it isn't profitable to use LA(Y).
594 if (AM.Form == SystemZAddressingMode::FormBDXLA &&
595 !shouldUseLA(AM.Base.getNode(), AM.Disp, AM.Index.getNode()))
596 return false;
597
598 // Reject cases where the other instruction in a pair should be used.
599 if (!isValidDisp(AM.DR, AM.Disp))
600 return false;
601
602 // Make sure that ADJDYNALLOC is included where necessary.
603 if (AM.isDynAlloc() && !AM.IncludesDynAlloc)
604 return false;
605
606 LLVM_DEBUG(AM.dump(CurDAG));
607 return true;
608 }
609
610 // Insert a node into the DAG at least before Pos. This will reposition
611 // the node as needed, and will assign it a node ID that is <= Pos's ID.
612 // Note that this does *not* preserve the uniqueness of node IDs!
613 // The selection DAG must no longer depend on their uniqueness when this
614 // function is used.
insertDAGNode(SelectionDAG * DAG,SDNode * Pos,SDValue N)615 static void insertDAGNode(SelectionDAG *DAG, SDNode *Pos, SDValue N) {
616 if (N->getNodeId() == -1 ||
617 (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) >
618 SelectionDAGISel::getUninvalidatedNodeId(Pos))) {
619 DAG->RepositionNode(Pos->getIterator(), N.getNode());
620 // Mark Node as invalid for pruning as after this it may be a successor to a
621 // selected node but otherwise be in the same position of Pos.
622 // Conservatively mark it with the same -abs(Id) to assure node id
623 // invariant is preserved.
624 N->setNodeId(Pos->getNodeId());
625 SelectionDAGISel::InvalidateNodeId(N.getNode());
626 }
627 }
628
getAddressOperands(const SystemZAddressingMode & AM,EVT VT,SDValue & Base,SDValue & Disp) const629 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
630 EVT VT, SDValue &Base,
631 SDValue &Disp) const {
632 Base = AM.Base;
633 if (!Base.getNode())
634 // Register 0 means "no base". This is mostly useful for shifts.
635 Base = CurDAG->getRegister(0, VT);
636 else if (Base.getOpcode() == ISD::FrameIndex) {
637 // Lower a FrameIndex to a TargetFrameIndex.
638 int64_t FrameIndex = cast<FrameIndexSDNode>(Base)->getIndex();
639 Base = CurDAG->getTargetFrameIndex(FrameIndex, VT);
640 } else if (Base.getValueType() != VT) {
641 // Truncate values from i64 to i32, for shifts.
642 assert(VT == MVT::i32 && Base.getValueType() == MVT::i64 &&
643 "Unexpected truncation");
644 SDLoc DL(Base);
645 SDValue Trunc = CurDAG->getNode(ISD::TRUNCATE, DL, VT, Base);
646 insertDAGNode(CurDAG, Base.getNode(), Trunc);
647 Base = Trunc;
648 }
649
650 // Lower the displacement to a TargetConstant.
651 Disp = CurDAG->getTargetConstant(AM.Disp, SDLoc(Base), VT);
652 }
653
getAddressOperands(const SystemZAddressingMode & AM,EVT VT,SDValue & Base,SDValue & Disp,SDValue & Index) const654 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
655 EVT VT, SDValue &Base,
656 SDValue &Disp,
657 SDValue &Index) const {
658 getAddressOperands(AM, VT, Base, Disp);
659
660 Index = AM.Index;
661 if (!Index.getNode())
662 // Register 0 means "no index".
663 Index = CurDAG->getRegister(0, VT);
664 }
665
selectBDAddr(SystemZAddressingMode::DispRange DR,SDValue Addr,SDValue & Base,SDValue & Disp) const666 bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR,
667 SDValue Addr, SDValue &Base,
668 SDValue &Disp) const {
669 SystemZAddressingMode AM(SystemZAddressingMode::FormBD, DR);
670 if (!selectAddress(Addr, AM))
671 return false;
672
673 getAddressOperands(AM, Addr.getValueType(), Base, Disp);
674 return true;
675 }
676
selectMVIAddr(SystemZAddressingMode::DispRange DR,SDValue Addr,SDValue & Base,SDValue & Disp) const677 bool SystemZDAGToDAGISel::selectMVIAddr(SystemZAddressingMode::DispRange DR,
678 SDValue Addr, SDValue &Base,
679 SDValue &Disp) const {
680 SystemZAddressingMode AM(SystemZAddressingMode::FormBDXNormal, DR);
681 if (!selectAddress(Addr, AM) || AM.Index.getNode())
682 return false;
683
684 getAddressOperands(AM, Addr.getValueType(), Base, Disp);
685 return true;
686 }
687
selectBDXAddr(SystemZAddressingMode::AddrForm Form,SystemZAddressingMode::DispRange DR,SDValue Addr,SDValue & Base,SDValue & Disp,SDValue & Index) const688 bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form,
689 SystemZAddressingMode::DispRange DR,
690 SDValue Addr, SDValue &Base,
691 SDValue &Disp, SDValue &Index) const {
692 SystemZAddressingMode AM(Form, DR);
693 if (!selectAddress(Addr, AM))
694 return false;
695
696 getAddressOperands(AM, Addr.getValueType(), Base, Disp, Index);
697 return true;
698 }
699
selectBDVAddr12Only(SDValue Addr,SDValue Elem,SDValue & Base,SDValue & Disp,SDValue & Index) const700 bool SystemZDAGToDAGISel::selectBDVAddr12Only(SDValue Addr, SDValue Elem,
701 SDValue &Base,
702 SDValue &Disp,
703 SDValue &Index) const {
704 SDValue Regs[2];
705 if (selectBDXAddr12Only(Addr, Regs[0], Disp, Regs[1]) &&
706 Regs[0].getNode() && Regs[1].getNode()) {
707 for (unsigned int I = 0; I < 2; ++I) {
708 Base = Regs[I];
709 Index = Regs[1 - I];
710 // We can't tell here whether the index vector has the right type
711 // for the access; the caller needs to do that instead.
712 if (Index.getOpcode() == ISD::ZERO_EXTEND)
713 Index = Index.getOperand(0);
714 if (Index.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
715 Index.getOperand(1) == Elem) {
716 Index = Index.getOperand(0);
717 return true;
718 }
719 }
720 }
721 return false;
722 }
723
detectOrAndInsertion(SDValue & Op,uint64_t InsertMask) const724 bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
725 uint64_t InsertMask) const {
726 // We're only interested in cases where the insertion is into some operand
727 // of Op, rather than into Op itself. The only useful case is an AND.
728 if (Op.getOpcode() != ISD::AND)
729 return false;
730
731 // We need a constant mask.
732 auto *MaskNode = dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
733 if (!MaskNode)
734 return false;
735
736 // It's not an insertion of Op.getOperand(0) if the two masks overlap.
737 uint64_t AndMask = MaskNode->getZExtValue();
738 if (InsertMask & AndMask)
739 return false;
740
741 // It's only an insertion if all bits are covered or are known to be zero.
742 // The inner check covers all cases but is more expensive.
743 uint64_t Used = allOnes(Op.getValueSizeInBits());
744 if (Used != (AndMask | InsertMask)) {
745 KnownBits Known = CurDAG->computeKnownBits(Op.getOperand(0));
746 if (Used != (AndMask | InsertMask | Known.Zero.getZExtValue()))
747 return false;
748 }
749
750 Op = Op.getOperand(0);
751 return true;
752 }
753
refineRxSBGMask(RxSBGOperands & RxSBG,uint64_t Mask) const754 bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands &RxSBG,
755 uint64_t Mask) const {
756 const SystemZInstrInfo *TII = getInstrInfo();
757 if (RxSBG.Rotate != 0)
758 Mask = (Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate));
759 Mask &= RxSBG.Mask;
760 if (TII->isRxSBGMask(Mask, RxSBG.BitSize, RxSBG.Start, RxSBG.End)) {
761 RxSBG.Mask = Mask;
762 return true;
763 }
764 return false;
765 }
766
767 // Return true if any bits of (RxSBG.Input & Mask) are significant.
maskMatters(RxSBGOperands & RxSBG,uint64_t Mask)768 static bool maskMatters(RxSBGOperands &RxSBG, uint64_t Mask) {
769 // Rotate the mask in the same way as RxSBG.Input is rotated.
770 if (RxSBG.Rotate != 0)
771 Mask = ((Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)));
772 return (Mask & RxSBG.Mask) != 0;
773 }
774
expandRxSBG(RxSBGOperands & RxSBG) const775 bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
776 SDValue N = RxSBG.Input;
777 unsigned Opcode = N.getOpcode();
778 switch (Opcode) {
779 case ISD::TRUNCATE: {
780 if (RxSBG.Opcode == SystemZ::RNSBG)
781 return false;
782 uint64_t BitSize = N.getValueSizeInBits();
783 uint64_t Mask = allOnes(BitSize);
784 if (!refineRxSBGMask(RxSBG, Mask))
785 return false;
786 RxSBG.Input = N.getOperand(0);
787 return true;
788 }
789 case ISD::AND: {
790 if (RxSBG.Opcode == SystemZ::RNSBG)
791 return false;
792
793 auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
794 if (!MaskNode)
795 return false;
796
797 SDValue Input = N.getOperand(0);
798 uint64_t Mask = MaskNode->getZExtValue();
799 if (!refineRxSBGMask(RxSBG, Mask)) {
800 // If some bits of Input are already known zeros, those bits will have
801 // been removed from the mask. See if adding them back in makes the
802 // mask suitable.
803 KnownBits Known = CurDAG->computeKnownBits(Input);
804 Mask |= Known.Zero.getZExtValue();
805 if (!refineRxSBGMask(RxSBG, Mask))
806 return false;
807 }
808 RxSBG.Input = Input;
809 return true;
810 }
811
812 case ISD::OR: {
813 if (RxSBG.Opcode != SystemZ::RNSBG)
814 return false;
815
816 auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
817 if (!MaskNode)
818 return false;
819
820 SDValue Input = N.getOperand(0);
821 uint64_t Mask = ~MaskNode->getZExtValue();
822 if (!refineRxSBGMask(RxSBG, Mask)) {
823 // If some bits of Input are already known ones, those bits will have
824 // been removed from the mask. See if adding them back in makes the
825 // mask suitable.
826 KnownBits Known = CurDAG->computeKnownBits(Input);
827 Mask &= ~Known.One.getZExtValue();
828 if (!refineRxSBGMask(RxSBG, Mask))
829 return false;
830 }
831 RxSBG.Input = Input;
832 return true;
833 }
834
835 case ISD::ROTL: {
836 // Any 64-bit rotate left can be merged into the RxSBG.
837 if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64)
838 return false;
839 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
840 if (!CountNode)
841 return false;
842
843 RxSBG.Rotate = (RxSBG.Rotate + CountNode->getZExtValue()) & 63;
844 RxSBG.Input = N.getOperand(0);
845 return true;
846 }
847
848 case ISD::ANY_EXTEND:
849 // Bits above the extended operand are don't-care.
850 RxSBG.Input = N.getOperand(0);
851 return true;
852
853 case ISD::ZERO_EXTEND:
854 if (RxSBG.Opcode != SystemZ::RNSBG) {
855 // Restrict the mask to the extended operand.
856 unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits();
857 if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize)))
858 return false;
859
860 RxSBG.Input = N.getOperand(0);
861 return true;
862 }
863 LLVM_FALLTHROUGH;
864
865 case ISD::SIGN_EXTEND: {
866 // Check that the extension bits are don't-care (i.e. are masked out
867 // by the final mask).
868 unsigned BitSize = N.getValueSizeInBits();
869 unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits();
870 if (maskMatters(RxSBG, allOnes(BitSize) - allOnes(InnerBitSize))) {
871 // In the case where only the sign bit is active, increase Rotate with
872 // the extension width.
873 if (RxSBG.Mask == 1 && RxSBG.Rotate == 1)
874 RxSBG.Rotate += (BitSize - InnerBitSize);
875 else
876 return false;
877 }
878
879 RxSBG.Input = N.getOperand(0);
880 return true;
881 }
882
883 case ISD::SHL: {
884 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
885 if (!CountNode)
886 return false;
887
888 uint64_t Count = CountNode->getZExtValue();
889 unsigned BitSize = N.getValueSizeInBits();
890 if (Count < 1 || Count >= BitSize)
891 return false;
892
893 if (RxSBG.Opcode == SystemZ::RNSBG) {
894 // Treat (shl X, count) as (rotl X, size-count) as long as the bottom
895 // count bits from RxSBG.Input are ignored.
896 if (maskMatters(RxSBG, allOnes(Count)))
897 return false;
898 } else {
899 // Treat (shl X, count) as (and (rotl X, count), ~0<<count).
900 if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count) << Count))
901 return false;
902 }
903
904 RxSBG.Rotate = (RxSBG.Rotate + Count) & 63;
905 RxSBG.Input = N.getOperand(0);
906 return true;
907 }
908
909 case ISD::SRL:
910 case ISD::SRA: {
911 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
912 if (!CountNode)
913 return false;
914
915 uint64_t Count = CountNode->getZExtValue();
916 unsigned BitSize = N.getValueSizeInBits();
917 if (Count < 1 || Count >= BitSize)
918 return false;
919
920 if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) {
921 // Treat (srl|sra X, count) as (rotl X, size-count) as long as the top
922 // count bits from RxSBG.Input are ignored.
923 if (maskMatters(RxSBG, allOnes(Count) << (BitSize - Count)))
924 return false;
925 } else {
926 // Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count),
927 // which is similar to SLL above.
928 if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count)))
929 return false;
930 }
931
932 RxSBG.Rotate = (RxSBG.Rotate - Count) & 63;
933 RxSBG.Input = N.getOperand(0);
934 return true;
935 }
936 default:
937 return false;
938 }
939 }
940
getUNDEF(const SDLoc & DL,EVT VT) const941 SDValue SystemZDAGToDAGISel::getUNDEF(const SDLoc &DL, EVT VT) const {
942 SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
943 return SDValue(N, 0);
944 }
945
convertTo(const SDLoc & DL,EVT VT,SDValue N) const946 SDValue SystemZDAGToDAGISel::convertTo(const SDLoc &DL, EVT VT,
947 SDValue N) const {
948 if (N.getValueType() == MVT::i32 && VT == MVT::i64)
949 return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32,
950 DL, VT, getUNDEF(DL, MVT::i64), N);
951 if (N.getValueType() == MVT::i64 && VT == MVT::i32)
952 return CurDAG->getTargetExtractSubreg(SystemZ::subreg_l32, DL, VT, N);
953 assert(N.getValueType() == VT && "Unexpected value types");
954 return N;
955 }
956
tryRISBGZero(SDNode * N)957 bool SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
958 SDLoc DL(N);
959 EVT VT = N->getValueType(0);
960 if (!VT.isInteger() || VT.getSizeInBits() > 64)
961 return false;
962 RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0));
963 unsigned Count = 0;
964 while (expandRxSBG(RISBG))
965 // The widening or narrowing is expected to be free.
966 // Counting widening or narrowing as a saved operation will result in
967 // preferring an R*SBG over a simple shift/logical instruction.
968 if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND &&
969 RISBG.Input.getOpcode() != ISD::TRUNCATE)
970 Count += 1;
971 if (Count == 0 || isa<ConstantSDNode>(RISBG.Input))
972 return false;
973
974 // Prefer to use normal shift instructions over RISBG, since they can handle
975 // all cases and are sometimes shorter.
976 if (Count == 1 && N->getOpcode() != ISD::AND)
977 return false;
978
979 // Prefer register extensions like LLC over RISBG. Also prefer to start
980 // out with normal ANDs if one instruction would be enough. We can convert
981 // these ANDs into an RISBG later if a three-address instruction is useful.
982 if (RISBG.Rotate == 0) {
983 bool PreferAnd = false;
984 // Prefer AND for any 32-bit and-immediate operation.
985 if (VT == MVT::i32)
986 PreferAnd = true;
987 // As well as for any 64-bit operation that can be implemented via LLC(R),
988 // LLH(R), LLGT(R), or one of the and-immediate instructions.
989 else if (RISBG.Mask == 0xff ||
990 RISBG.Mask == 0xffff ||
991 RISBG.Mask == 0x7fffffff ||
992 SystemZ::isImmLF(~RISBG.Mask) ||
993 SystemZ::isImmHF(~RISBG.Mask))
994 PreferAnd = true;
995 // And likewise for the LLZRGF instruction, which doesn't have a register
996 // to register version.
997 else if (auto *Load = dyn_cast<LoadSDNode>(RISBG.Input)) {
998 if (Load->getMemoryVT() == MVT::i32 &&
999 (Load->getExtensionType() == ISD::EXTLOAD ||
1000 Load->getExtensionType() == ISD::ZEXTLOAD) &&
1001 RISBG.Mask == 0xffffff00 &&
1002 Subtarget->hasLoadAndZeroRightmostByte())
1003 PreferAnd = true;
1004 }
1005 if (PreferAnd) {
1006 // Replace the current node with an AND. Note that the current node
1007 // might already be that same AND, in which case it is already CSE'd
1008 // with it, and we must not call ReplaceNode.
1009 SDValue In = convertTo(DL, VT, RISBG.Input);
1010 SDValue Mask = CurDAG->getConstant(RISBG.Mask, DL, VT);
1011 SDValue New = CurDAG->getNode(ISD::AND, DL, VT, In, Mask);
1012 if (N != New.getNode()) {
1013 insertDAGNode(CurDAG, N, Mask);
1014 insertDAGNode(CurDAG, N, New);
1015 ReplaceNode(N, New.getNode());
1016 N = New.getNode();
1017 }
1018 // Now, select the machine opcode to implement this operation.
1019 if (!N->isMachineOpcode())
1020 SelectCode(N);
1021 return true;
1022 }
1023 }
1024
1025 unsigned Opcode = SystemZ::RISBG;
1026 // Prefer RISBGN if available, since it does not clobber CC.
1027 if (Subtarget->hasMiscellaneousExtensions())
1028 Opcode = SystemZ::RISBGN;
1029 EVT OpcodeVT = MVT::i64;
1030 if (VT == MVT::i32 && Subtarget->hasHighWord() &&
1031 // We can only use the 32-bit instructions if all source bits are
1032 // in the low 32 bits without wrapping, both after rotation (because
1033 // of the smaller range for Start and End) and before rotation
1034 // (because the input value is truncated).
1035 RISBG.Start >= 32 && RISBG.End >= RISBG.Start &&
1036 ((RISBG.Start + RISBG.Rotate) & 63) >= 32 &&
1037 ((RISBG.End + RISBG.Rotate) & 63) >=
1038 ((RISBG.Start + RISBG.Rotate) & 63)) {
1039 Opcode = SystemZ::RISBMux;
1040 OpcodeVT = MVT::i32;
1041 RISBG.Start &= 31;
1042 RISBG.End &= 31;
1043 }
1044 SDValue Ops[5] = {
1045 getUNDEF(DL, OpcodeVT),
1046 convertTo(DL, OpcodeVT, RISBG.Input),
1047 CurDAG->getTargetConstant(RISBG.Start, DL, MVT::i32),
1048 CurDAG->getTargetConstant(RISBG.End | 128, DL, MVT::i32),
1049 CurDAG->getTargetConstant(RISBG.Rotate, DL, MVT::i32)
1050 };
1051 SDValue New = convertTo(
1052 DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, OpcodeVT, Ops), 0));
1053 ReplaceNode(N, New.getNode());
1054 return true;
1055 }
1056
tryRxSBG(SDNode * N,unsigned Opcode)1057 bool SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
1058 SDLoc DL(N);
1059 EVT VT = N->getValueType(0);
1060 if (!VT.isInteger() || VT.getSizeInBits() > 64)
1061 return false;
1062 // Try treating each operand of N as the second operand of the RxSBG
1063 // and see which goes deepest.
1064 RxSBGOperands RxSBG[] = {
1065 RxSBGOperands(Opcode, N->getOperand(0)),
1066 RxSBGOperands(Opcode, N->getOperand(1))
1067 };
1068 unsigned Count[] = { 0, 0 };
1069 for (unsigned I = 0; I < 2; ++I)
1070 while (expandRxSBG(RxSBG[I]))
1071 // The widening or narrowing is expected to be free.
1072 // Counting widening or narrowing as a saved operation will result in
1073 // preferring an R*SBG over a simple shift/logical instruction.
1074 if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND &&
1075 RxSBG[I].Input.getOpcode() != ISD::TRUNCATE)
1076 Count[I] += 1;
1077
1078 // Do nothing if neither operand is suitable.
1079 if (Count[0] == 0 && Count[1] == 0)
1080 return false;
1081
1082 // Pick the deepest second operand.
1083 unsigned I = Count[0] > Count[1] ? 0 : 1;
1084 SDValue Op0 = N->getOperand(I ^ 1);
1085
1086 // Prefer IC for character insertions from memory.
1087 if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0)
1088 if (auto *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
1089 if (Load->getMemoryVT() == MVT::i8)
1090 return false;
1091
1092 // See whether we can avoid an AND in the first operand by converting
1093 // ROSBG to RISBG.
1094 if (Opcode == SystemZ::ROSBG && detectOrAndInsertion(Op0, RxSBG[I].Mask)) {
1095 Opcode = SystemZ::RISBG;
1096 // Prefer RISBGN if available, since it does not clobber CC.
1097 if (Subtarget->hasMiscellaneousExtensions())
1098 Opcode = SystemZ::RISBGN;
1099 }
1100
1101 SDValue Ops[5] = {
1102 convertTo(DL, MVT::i64, Op0),
1103 convertTo(DL, MVT::i64, RxSBG[I].Input),
1104 CurDAG->getTargetConstant(RxSBG[I].Start, DL, MVT::i32),
1105 CurDAG->getTargetConstant(RxSBG[I].End, DL, MVT::i32),
1106 CurDAG->getTargetConstant(RxSBG[I].Rotate, DL, MVT::i32)
1107 };
1108 SDValue New = convertTo(
1109 DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, MVT::i64, Ops), 0));
1110 ReplaceNode(N, New.getNode());
1111 return true;
1112 }
1113
splitLargeImmediate(unsigned Opcode,SDNode * Node,SDValue Op0,uint64_t UpperVal,uint64_t LowerVal)1114 void SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node,
1115 SDValue Op0, uint64_t UpperVal,
1116 uint64_t LowerVal) {
1117 EVT VT = Node->getValueType(0);
1118 SDLoc DL(Node);
1119 SDValue Upper = CurDAG->getConstant(UpperVal, DL, VT);
1120 if (Op0.getNode())
1121 Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper);
1122
1123 {
1124 // When we haven't passed in Op0, Upper will be a constant. In order to
1125 // prevent folding back to the large immediate in `Or = getNode(...)` we run
1126 // SelectCode first and end up with an opaque machine node. This means that
1127 // we need to use a handle to keep track of Upper in case it gets CSE'd by
1128 // SelectCode.
1129 //
1130 // Note that in the case where Op0 is passed in we could just call
1131 // SelectCode(Upper) later, along with the SelectCode(Or), and avoid needing
1132 // the handle at all, but it's fine to do it here.
1133 //
1134 // TODO: This is a pretty hacky way to do this. Can we do something that
1135 // doesn't require a two paragraph explanation?
1136 HandleSDNode Handle(Upper);
1137 SelectCode(Upper.getNode());
1138 Upper = Handle.getValue();
1139 }
1140
1141 SDValue Lower = CurDAG->getConstant(LowerVal, DL, VT);
1142 SDValue Or = CurDAG->getNode(Opcode, DL, VT, Upper, Lower);
1143
1144 ReplaceNode(Node, Or.getNode());
1145
1146 SelectCode(Or.getNode());
1147 }
1148
loadVectorConstant(const SystemZVectorConstantInfo & VCI,SDNode * Node)1149 void SystemZDAGToDAGISel::loadVectorConstant(
1150 const SystemZVectorConstantInfo &VCI, SDNode *Node) {
1151 assert((VCI.Opcode == SystemZISD::BYTE_MASK ||
1152 VCI.Opcode == SystemZISD::REPLICATE ||
1153 VCI.Opcode == SystemZISD::ROTATE_MASK) &&
1154 "Bad opcode!");
1155 assert(VCI.VecVT.getSizeInBits() == 128 && "Expected a vector type");
1156 EVT VT = Node->getValueType(0);
1157 SDLoc DL(Node);
1158 SmallVector<SDValue, 2> Ops;
1159 for (unsigned OpVal : VCI.OpVals)
1160 Ops.push_back(CurDAG->getTargetConstant(OpVal, DL, MVT::i32));
1161 SDValue Op = CurDAG->getNode(VCI.Opcode, DL, VCI.VecVT, Ops);
1162
1163 if (VCI.VecVT == VT.getSimpleVT())
1164 ReplaceNode(Node, Op.getNode());
1165 else if (VT.getSizeInBits() == 128) {
1166 SDValue BitCast = CurDAG->getNode(ISD::BITCAST, DL, VT, Op);
1167 ReplaceNode(Node, BitCast.getNode());
1168 SelectCode(BitCast.getNode());
1169 } else { // float or double
1170 unsigned SubRegIdx =
1171 (VT.getSizeInBits() == 32 ? SystemZ::subreg_h32 : SystemZ::subreg_h64);
1172 ReplaceNode(
1173 Node, CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, Op).getNode());
1174 }
1175 SelectCode(Op.getNode());
1176 }
1177
tryGather(SDNode * N,unsigned Opcode)1178 bool SystemZDAGToDAGISel::tryGather(SDNode *N, unsigned Opcode) {
1179 SDValue ElemV = N->getOperand(2);
1180 auto *ElemN = dyn_cast<ConstantSDNode>(ElemV);
1181 if (!ElemN)
1182 return false;
1183
1184 unsigned Elem = ElemN->getZExtValue();
1185 EVT VT = N->getValueType(0);
1186 if (Elem >= VT.getVectorNumElements())
1187 return false;
1188
1189 auto *Load = dyn_cast<LoadSDNode>(N->getOperand(1));
1190 if (!Load || !Load->hasNUsesOfValue(1, 0))
1191 return false;
1192 if (Load->getMemoryVT().getSizeInBits() !=
1193 Load->getValueType(0).getSizeInBits())
1194 return false;
1195
1196 SDValue Base, Disp, Index;
1197 if (!selectBDVAddr12Only(Load->getBasePtr(), ElemV, Base, Disp, Index) ||
1198 Index.getValueType() != VT.changeVectorElementTypeToInteger())
1199 return false;
1200
1201 SDLoc DL(Load);
1202 SDValue Ops[] = {
1203 N->getOperand(0), Base, Disp, Index,
1204 CurDAG->getTargetConstant(Elem, DL, MVT::i32), Load->getChain()
1205 };
1206 SDNode *Res = CurDAG->getMachineNode(Opcode, DL, VT, MVT::Other, Ops);
1207 ReplaceUses(SDValue(Load, 1), SDValue(Res, 1));
1208 ReplaceNode(N, Res);
1209 return true;
1210 }
1211
tryScatter(StoreSDNode * Store,unsigned Opcode)1212 bool SystemZDAGToDAGISel::tryScatter(StoreSDNode *Store, unsigned Opcode) {
1213 SDValue Value = Store->getValue();
1214 if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1215 return false;
1216 if (Store->getMemoryVT().getSizeInBits() != Value.getValueSizeInBits())
1217 return false;
1218
1219 SDValue ElemV = Value.getOperand(1);
1220 auto *ElemN = dyn_cast<ConstantSDNode>(ElemV);
1221 if (!ElemN)
1222 return false;
1223
1224 SDValue Vec = Value.getOperand(0);
1225 EVT VT = Vec.getValueType();
1226 unsigned Elem = ElemN->getZExtValue();
1227 if (Elem >= VT.getVectorNumElements())
1228 return false;
1229
1230 SDValue Base, Disp, Index;
1231 if (!selectBDVAddr12Only(Store->getBasePtr(), ElemV, Base, Disp, Index) ||
1232 Index.getValueType() != VT.changeVectorElementTypeToInteger())
1233 return false;
1234
1235 SDLoc DL(Store);
1236 SDValue Ops[] = {
1237 Vec, Base, Disp, Index, CurDAG->getTargetConstant(Elem, DL, MVT::i32),
1238 Store->getChain()
1239 };
1240 ReplaceNode(Store, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
1241 return true;
1242 }
1243
1244 // Check whether or not the chain ending in StoreNode is suitable for doing
1245 // the {load; op; store} to modify transformation.
isFusableLoadOpStorePattern(StoreSDNode * StoreNode,SDValue StoredVal,SelectionDAG * CurDAG,LoadSDNode * & LoadNode,SDValue & InputChain)1246 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
1247 SDValue StoredVal, SelectionDAG *CurDAG,
1248 LoadSDNode *&LoadNode,
1249 SDValue &InputChain) {
1250 // Is the stored value result 0 of the operation?
1251 if (StoredVal.getResNo() != 0)
1252 return false;
1253
1254 // Are there other uses of the loaded value than the operation?
1255 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0))
1256 return false;
1257
1258 // Is the store non-extending and non-indexed?
1259 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
1260 return false;
1261
1262 SDValue Load = StoredVal->getOperand(0);
1263 // Is the stored value a non-extending and non-indexed load?
1264 if (!ISD::isNormalLoad(Load.getNode()))
1265 return false;
1266
1267 // Return LoadNode by reference.
1268 LoadNode = cast<LoadSDNode>(Load);
1269
1270 // Is store the only read of the loaded value?
1271 if (!Load.hasOneUse())
1272 return false;
1273
1274 // Is the address of the store the same as the load?
1275 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
1276 LoadNode->getOffset() != StoreNode->getOffset())
1277 return false;
1278
1279 // Check if the chain is produced by the load or is a TokenFactor with
1280 // the load output chain as an operand. Return InputChain by reference.
1281 SDValue Chain = StoreNode->getChain();
1282
1283 bool ChainCheck = false;
1284 if (Chain == Load.getValue(1)) {
1285 ChainCheck = true;
1286 InputChain = LoadNode->getChain();
1287 } else if (Chain.getOpcode() == ISD::TokenFactor) {
1288 SmallVector<SDValue, 4> ChainOps;
1289 SmallVector<const SDNode *, 4> LoopWorklist;
1290 SmallPtrSet<const SDNode *, 16> Visited;
1291 const unsigned int Max = 1024;
1292 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
1293 SDValue Op = Chain.getOperand(i);
1294 if (Op == Load.getValue(1)) {
1295 ChainCheck = true;
1296 // Drop Load, but keep its chain. No cycle check necessary.
1297 ChainOps.push_back(Load.getOperand(0));
1298 continue;
1299 }
1300 LoopWorklist.push_back(Op.getNode());
1301 ChainOps.push_back(Op);
1302 }
1303
1304 if (ChainCheck) {
1305 // Add the other operand of StoredVal to worklist.
1306 for (SDValue Op : StoredVal->ops())
1307 if (Op.getNode() != LoadNode)
1308 LoopWorklist.push_back(Op.getNode());
1309
1310 // Check if Load is reachable from any of the nodes in the worklist.
1311 if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
1312 true))
1313 return false;
1314
1315 // Make a new TokenFactor with all the other input chains except
1316 // for the load.
1317 InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
1318 MVT::Other, ChainOps);
1319 }
1320 }
1321 if (!ChainCheck)
1322 return false;
1323
1324 return true;
1325 }
1326
1327 // Change a chain of {load; op; store} of the same value into a simple op
1328 // through memory of that value, if the uses of the modified value and its
1329 // address are suitable.
1330 //
1331 // The tablegen pattern memory operand pattern is currently not able to match
1332 // the case where the CC on the original operation are used.
1333 //
1334 // See the equivalent routine in X86ISelDAGToDAG for further comments.
tryFoldLoadStoreIntoMemOperand(SDNode * Node)1335 bool SystemZDAGToDAGISel::tryFoldLoadStoreIntoMemOperand(SDNode *Node) {
1336 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
1337 SDValue StoredVal = StoreNode->getOperand(1);
1338 unsigned Opc = StoredVal->getOpcode();
1339 SDLoc DL(StoreNode);
1340
1341 // Before we try to select anything, make sure this is memory operand size
1342 // and opcode we can handle. Note that this must match the code below that
1343 // actually lowers the opcodes.
1344 EVT MemVT = StoreNode->getMemoryVT();
1345 unsigned NewOpc = 0;
1346 bool NegateOperand = false;
1347 switch (Opc) {
1348 default:
1349 return false;
1350 case SystemZISD::SSUBO:
1351 NegateOperand = true;
1352 LLVM_FALLTHROUGH;
1353 case SystemZISD::SADDO:
1354 if (MemVT == MVT::i32)
1355 NewOpc = SystemZ::ASI;
1356 else if (MemVT == MVT::i64)
1357 NewOpc = SystemZ::AGSI;
1358 else
1359 return false;
1360 break;
1361 case SystemZISD::USUBO:
1362 NegateOperand = true;
1363 LLVM_FALLTHROUGH;
1364 case SystemZISD::UADDO:
1365 if (MemVT == MVT::i32)
1366 NewOpc = SystemZ::ALSI;
1367 else if (MemVT == MVT::i64)
1368 NewOpc = SystemZ::ALGSI;
1369 else
1370 return false;
1371 break;
1372 }
1373
1374 LoadSDNode *LoadNode = nullptr;
1375 SDValue InputChain;
1376 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadNode,
1377 InputChain))
1378 return false;
1379
1380 SDValue Operand = StoredVal.getOperand(1);
1381 auto *OperandC = dyn_cast<ConstantSDNode>(Operand);
1382 if (!OperandC)
1383 return false;
1384 auto OperandV = OperandC->getAPIntValue();
1385 if (NegateOperand)
1386 OperandV = -OperandV;
1387 if (OperandV.getMinSignedBits() > 8)
1388 return false;
1389 Operand = CurDAG->getTargetConstant(OperandV, DL, MemVT);
1390
1391 SDValue Base, Disp;
1392 if (!selectBDAddr20Only(StoreNode->getBasePtr(), Base, Disp))
1393 return false;
1394
1395 SDValue Ops[] = { Base, Disp, Operand, InputChain };
1396 MachineSDNode *Result =
1397 CurDAG->getMachineNode(NewOpc, DL, MVT::i32, MVT::Other, Ops);
1398 CurDAG->setNodeMemRefs(
1399 Result, {StoreNode->getMemOperand(), LoadNode->getMemOperand()});
1400
1401 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
1402 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
1403 CurDAG->RemoveDeadNode(Node);
1404 return true;
1405 }
1406
canUseBlockOperation(StoreSDNode * Store,LoadSDNode * Load) const1407 bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
1408 LoadSDNode *Load) const {
1409 // Check that the two memory operands have the same size.
1410 if (Load->getMemoryVT() != Store->getMemoryVT())
1411 return false;
1412
1413 // Volatility stops an access from being decomposed.
1414 if (Load->isVolatile() || Store->isVolatile())
1415 return false;
1416
1417 // There's no chance of overlap if the load is invariant.
1418 if (Load->isInvariant() && Load->isDereferenceable())
1419 return true;
1420
1421 // Otherwise we need to check whether there's an alias.
1422 const Value *V1 = Load->getMemOperand()->getValue();
1423 const Value *V2 = Store->getMemOperand()->getValue();
1424 if (!V1 || !V2)
1425 return false;
1426
1427 // Reject equality.
1428 uint64_t Size = Load->getMemoryVT().getStoreSize();
1429 int64_t End1 = Load->getSrcValueOffset() + Size;
1430 int64_t End2 = Store->getSrcValueOffset() + Size;
1431 if (V1 == V2 && End1 == End2)
1432 return false;
1433
1434 return AA->isNoAlias(MemoryLocation(V1, End1, Load->getAAInfo()),
1435 MemoryLocation(V2, End2, Store->getAAInfo()));
1436 }
1437
storeLoadCanUseMVC(SDNode * N) const1438 bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
1439 auto *Store = cast<StoreSDNode>(N);
1440 auto *Load = cast<LoadSDNode>(Store->getValue());
1441
1442 // Prefer not to use MVC if either address can use ... RELATIVE LONG
1443 // instructions.
1444 uint64_t Size = Load->getMemoryVT().getStoreSize();
1445 if (Size > 1 && Size <= 8) {
1446 // Prefer LHRL, LRL and LGRL.
1447 if (SystemZISD::isPCREL(Load->getBasePtr().getOpcode()))
1448 return false;
1449 // Prefer STHRL, STRL and STGRL.
1450 if (SystemZISD::isPCREL(Store->getBasePtr().getOpcode()))
1451 return false;
1452 }
1453
1454 return canUseBlockOperation(Store, Load);
1455 }
1456
storeLoadCanUseBlockBinary(SDNode * N,unsigned I) const1457 bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N,
1458 unsigned I) const {
1459 auto *StoreA = cast<StoreSDNode>(N);
1460 auto *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
1461 auto *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
1462 return !LoadA->isVolatile() && LoadA->getMemoryVT() == LoadB->getMemoryVT() &&
1463 canUseBlockOperation(StoreA, LoadB);
1464 }
1465
storeLoadIsAligned(SDNode * N) const1466 bool SystemZDAGToDAGISel::storeLoadIsAligned(SDNode *N) const {
1467
1468 auto *MemAccess = cast<LSBaseSDNode>(N);
1469 TypeSize StoreSize = MemAccess->getMemoryVT().getStoreSize();
1470 SDValue BasePtr = MemAccess->getBasePtr();
1471 MachineMemOperand *MMO = MemAccess->getMemOperand();
1472 assert(MMO && "Expected a memory operand.");
1473
1474 // The memory access must have a proper alignment and no index register.
1475 if (MemAccess->getAlign().value() < StoreSize ||
1476 !MemAccess->getOffset().isUndef())
1477 return false;
1478
1479 // The MMO must not have an unaligned offset.
1480 if (MMO->getOffset() % StoreSize != 0)
1481 return false;
1482
1483 // An access to GOT or the Constant Pool is aligned.
1484 if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
1485 if ((PSV->isGOT() || PSV->isConstantPool()))
1486 return true;
1487
1488 // Check the alignment of a Global Address.
1489 if (BasePtr.getNumOperands())
1490 if (GlobalAddressSDNode *GA =
1491 dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0))) {
1492 // The immediate offset must be aligned.
1493 if (GA->getOffset() % StoreSize != 0)
1494 return false;
1495
1496 // The alignment of the symbol itself must be at least the store size.
1497 const GlobalValue *GV = GA->getGlobal();
1498 const DataLayout &DL = GV->getParent()->getDataLayout();
1499 if (GV->getPointerAlignment(DL).value() < StoreSize)
1500 return false;
1501 }
1502
1503 return true;
1504 }
1505
Select(SDNode * Node)1506 void SystemZDAGToDAGISel::Select(SDNode *Node) {
1507 // If we have a custom node, we already have selected!
1508 if (Node->isMachineOpcode()) {
1509 LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
1510 Node->setNodeId(-1);
1511 return;
1512 }
1513
1514 unsigned Opcode = Node->getOpcode();
1515 switch (Opcode) {
1516 case ISD::OR:
1517 if (Node->getOperand(1).getOpcode() != ISD::Constant)
1518 if (tryRxSBG(Node, SystemZ::ROSBG))
1519 return;
1520 goto or_xor;
1521
1522 case ISD::XOR:
1523 if (Node->getOperand(1).getOpcode() != ISD::Constant)
1524 if (tryRxSBG(Node, SystemZ::RXSBG))
1525 return;
1526 // Fall through.
1527 or_xor:
1528 // If this is a 64-bit operation in which both 32-bit halves are nonzero,
1529 // split the operation into two. If both operands here happen to be
1530 // constant, leave this to common code to optimize.
1531 if (Node->getValueType(0) == MVT::i64 &&
1532 Node->getOperand(0).getOpcode() != ISD::Constant)
1533 if (auto *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
1534 uint64_t Val = Op1->getZExtValue();
1535 // Don't split the operation if we can match one of the combined
1536 // logical operations provided by miscellaneous-extensions-3.
1537 if (Subtarget->hasMiscellaneousExtensions3()) {
1538 unsigned ChildOpcode = Node->getOperand(0).getOpcode();
1539 // Check whether this expression matches NAND/NOR/NXOR.
1540 if (Val == (uint64_t)-1 && Opcode == ISD::XOR)
1541 if (ChildOpcode == ISD::AND || ChildOpcode == ISD::OR ||
1542 ChildOpcode == ISD::XOR)
1543 break;
1544 // Check whether this expression matches OR-with-complement
1545 // (or matches an alternate pattern for NXOR).
1546 if (ChildOpcode == ISD::XOR) {
1547 auto Op0 = Node->getOperand(0);
1548 if (auto *Op0Op1 = dyn_cast<ConstantSDNode>(Op0->getOperand(1)))
1549 if (Op0Op1->getZExtValue() == (uint64_t)-1)
1550 break;
1551 }
1552 }
1553 if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val)) {
1554 splitLargeImmediate(Opcode, Node, Node->getOperand(0),
1555 Val - uint32_t(Val), uint32_t(Val));
1556 return;
1557 }
1558 }
1559 break;
1560
1561 case ISD::AND:
1562 if (Node->getOperand(1).getOpcode() != ISD::Constant)
1563 if (tryRxSBG(Node, SystemZ::RNSBG))
1564 return;
1565 LLVM_FALLTHROUGH;
1566 case ISD::ROTL:
1567 case ISD::SHL:
1568 case ISD::SRL:
1569 case ISD::ZERO_EXTEND:
1570 if (tryRISBGZero(Node))
1571 return;
1572 break;
1573
1574 case ISD::Constant:
1575 // If this is a 64-bit constant that is out of the range of LLILF,
1576 // LLIHF and LGFI, split it into two 32-bit pieces.
1577 if (Node->getValueType(0) == MVT::i64) {
1578 uint64_t Val = cast<ConstantSDNode>(Node)->getZExtValue();
1579 if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) {
1580 splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val),
1581 uint32_t(Val));
1582 return;
1583 }
1584 }
1585 break;
1586
1587 case SystemZISD::SELECT_CCMASK: {
1588 SDValue Op0 = Node->getOperand(0);
1589 SDValue Op1 = Node->getOperand(1);
1590 // Prefer to put any load first, so that it can be matched as a
1591 // conditional load. Likewise for constants in range for LOCHI.
1592 if ((Op1.getOpcode() == ISD::LOAD && Op0.getOpcode() != ISD::LOAD) ||
1593 (Subtarget->hasLoadStoreOnCond2() &&
1594 Node->getValueType(0).isInteger() &&
1595 Op1.getOpcode() == ISD::Constant &&
1596 isInt<16>(cast<ConstantSDNode>(Op1)->getSExtValue()) &&
1597 !(Op0.getOpcode() == ISD::Constant &&
1598 isInt<16>(cast<ConstantSDNode>(Op0)->getSExtValue())))) {
1599 SDValue CCValid = Node->getOperand(2);
1600 SDValue CCMask = Node->getOperand(3);
1601 uint64_t ConstCCValid =
1602 cast<ConstantSDNode>(CCValid.getNode())->getZExtValue();
1603 uint64_t ConstCCMask =
1604 cast<ConstantSDNode>(CCMask.getNode())->getZExtValue();
1605 // Invert the condition.
1606 CCMask = CurDAG->getTargetConstant(ConstCCValid ^ ConstCCMask,
1607 SDLoc(Node), CCMask.getValueType());
1608 SDValue Op4 = Node->getOperand(4);
1609 SDNode *UpdatedNode =
1610 CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4);
1611 if (UpdatedNode != Node) {
1612 // In case this node already exists then replace Node with it.
1613 ReplaceNode(Node, UpdatedNode);
1614 Node = UpdatedNode;
1615 }
1616 }
1617 break;
1618 }
1619
1620 case ISD::INSERT_VECTOR_ELT: {
1621 EVT VT = Node->getValueType(0);
1622 unsigned ElemBitSize = VT.getScalarSizeInBits();
1623 if (ElemBitSize == 32) {
1624 if (tryGather(Node, SystemZ::VGEF))
1625 return;
1626 } else if (ElemBitSize == 64) {
1627 if (tryGather(Node, SystemZ::VGEG))
1628 return;
1629 }
1630 break;
1631 }
1632
1633 case ISD::BUILD_VECTOR: {
1634 auto *BVN = cast<BuildVectorSDNode>(Node);
1635 SystemZVectorConstantInfo VCI(BVN);
1636 if (VCI.isVectorConstantLegal(*Subtarget)) {
1637 loadVectorConstant(VCI, Node);
1638 return;
1639 }
1640 break;
1641 }
1642
1643 case ISD::ConstantFP: {
1644 APFloat Imm = cast<ConstantFPSDNode>(Node)->getValueAPF();
1645 if (Imm.isZero() || Imm.isNegZero())
1646 break;
1647 SystemZVectorConstantInfo VCI(Imm);
1648 bool Success = VCI.isVectorConstantLegal(*Subtarget); (void)Success;
1649 assert(Success && "Expected legal FP immediate");
1650 loadVectorConstant(VCI, Node);
1651 return;
1652 }
1653
1654 case ISD::STORE: {
1655 if (tryFoldLoadStoreIntoMemOperand(Node))
1656 return;
1657 auto *Store = cast<StoreSDNode>(Node);
1658 unsigned ElemBitSize = Store->getValue().getValueSizeInBits();
1659 if (ElemBitSize == 32) {
1660 if (tryScatter(Store, SystemZ::VSCEF))
1661 return;
1662 } else if (ElemBitSize == 64) {
1663 if (tryScatter(Store, SystemZ::VSCEG))
1664 return;
1665 }
1666 break;
1667 }
1668 }
1669
1670 SelectCode(Node);
1671 }
1672
1673 bool SystemZDAGToDAGISel::
SelectInlineAsmMemoryOperand(const SDValue & Op,unsigned ConstraintID,std::vector<SDValue> & OutOps)1674 SelectInlineAsmMemoryOperand(const SDValue &Op,
1675 unsigned ConstraintID,
1676 std::vector<SDValue> &OutOps) {
1677 SystemZAddressingMode::AddrForm Form;
1678 SystemZAddressingMode::DispRange DispRange;
1679 SDValue Base, Disp, Index;
1680
1681 switch(ConstraintID) {
1682 default:
1683 llvm_unreachable("Unexpected asm memory constraint");
1684 case InlineAsm::Constraint_i:
1685 case InlineAsm::Constraint_Q:
1686 case InlineAsm::Constraint_ZQ:
1687 // Accept an address with a short displacement, but no index.
1688 Form = SystemZAddressingMode::FormBD;
1689 DispRange = SystemZAddressingMode::Disp12Only;
1690 break;
1691 case InlineAsm::Constraint_R:
1692 case InlineAsm::Constraint_ZR:
1693 // Accept an address with a short displacement and an index.
1694 Form = SystemZAddressingMode::FormBDXNormal;
1695 DispRange = SystemZAddressingMode::Disp12Only;
1696 break;
1697 case InlineAsm::Constraint_S:
1698 case InlineAsm::Constraint_ZS:
1699 // Accept an address with a long displacement, but no index.
1700 Form = SystemZAddressingMode::FormBD;
1701 DispRange = SystemZAddressingMode::Disp20Only;
1702 break;
1703 case InlineAsm::Constraint_T:
1704 case InlineAsm::Constraint_m:
1705 case InlineAsm::Constraint_o:
1706 case InlineAsm::Constraint_p:
1707 case InlineAsm::Constraint_ZT:
1708 // Accept an address with a long displacement and an index.
1709 // m works the same as T, as this is the most general case.
1710 // We don't really have any special handling of "offsettable"
1711 // memory addresses, so just treat o the same as m.
1712 Form = SystemZAddressingMode::FormBDXNormal;
1713 DispRange = SystemZAddressingMode::Disp20Only;
1714 break;
1715 }
1716
1717 if (selectBDXAddr(Form, DispRange, Op, Base, Disp, Index)) {
1718 const TargetRegisterClass *TRC =
1719 Subtarget->getRegisterInfo()->getPointerRegClass(*MF);
1720 SDLoc DL(Base);
1721 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), DL, MVT::i32);
1722
1723 // Make sure that the base address doesn't go into %r0.
1724 // If it's a TargetFrameIndex or a fixed register, we shouldn't do anything.
1725 if (Base.getOpcode() != ISD::TargetFrameIndex &&
1726 Base.getOpcode() != ISD::Register) {
1727 Base =
1728 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1729 DL, Base.getValueType(),
1730 Base, RC), 0);
1731 }
1732
1733 // Make sure that the index register isn't assigned to %r0 either.
1734 if (Index.getOpcode() != ISD::Register) {
1735 Index =
1736 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1737 DL, Index.getValueType(),
1738 Index, RC), 0);
1739 }
1740
1741 OutOps.push_back(Base);
1742 OutOps.push_back(Disp);
1743 OutOps.push_back(Index);
1744 return false;
1745 }
1746
1747 return true;
1748 }
1749
1750 // IsProfitableToFold - Returns true if is profitable to fold the specific
1751 // operand node N of U during instruction selection that starts at Root.
1752 bool
IsProfitableToFold(SDValue N,SDNode * U,SDNode * Root) const1753 SystemZDAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
1754 SDNode *Root) const {
1755 // We want to avoid folding a LOAD into an ICMP node if as a result
1756 // we would be forced to spill the condition code into a GPR.
1757 if (N.getOpcode() == ISD::LOAD && U->getOpcode() == SystemZISD::ICMP) {
1758 if (!N.hasOneUse() || !U->hasOneUse())
1759 return false;
1760
1761 // The user of the CC value will usually be a CopyToReg into the
1762 // physical CC register, which in turn is glued and chained to the
1763 // actual instruction that uses the CC value. Bail out if we have
1764 // anything else than that.
1765 SDNode *CCUser = *U->use_begin();
1766 SDNode *CCRegUser = nullptr;
1767 if (CCUser->getOpcode() == ISD::CopyToReg ||
1768 cast<RegisterSDNode>(CCUser->getOperand(1))->getReg() == SystemZ::CC) {
1769 for (auto *U : CCUser->uses()) {
1770 if (CCRegUser == nullptr)
1771 CCRegUser = U;
1772 else if (CCRegUser != U)
1773 return false;
1774 }
1775 }
1776 if (CCRegUser == nullptr)
1777 return false;
1778
1779 // If the actual instruction is a branch, the only thing that remains to be
1780 // checked is whether the CCUser chain is a predecessor of the load.
1781 if (CCRegUser->isMachineOpcode() &&
1782 CCRegUser->getMachineOpcode() == SystemZ::BRC)
1783 return !N->isPredecessorOf(CCUser->getOperand(0).getNode());
1784
1785 // Otherwise, the instruction may have multiple operands, and we need to
1786 // verify that none of them are a predecessor of the load. This is exactly
1787 // the same check that would be done by common code if the CC setter were
1788 // glued to the CC user, so simply invoke that check here.
1789 if (!IsLegalToFold(N, U, CCRegUser, OptLevel, false))
1790 return false;
1791 }
1792
1793 return true;
1794 }
1795
1796 namespace {
1797 // Represents a sequence for extracting a 0/1 value from an IPM result:
1798 // (((X ^ XORValue) + AddValue) >> Bit)
1799 struct IPMConversion {
IPMConversion__anon6e09402b0211::IPMConversion1800 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
1801 : XORValue(xorValue), AddValue(addValue), Bit(bit) {}
1802
1803 int64_t XORValue;
1804 int64_t AddValue;
1805 unsigned Bit;
1806 };
1807 } // end anonymous namespace
1808
1809 // Return a sequence for getting a 1 from an IPM result when CC has a
1810 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
1811 // The handling of CC values outside CCValid doesn't matter.
getIPMConversion(unsigned CCValid,unsigned CCMask)1812 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
1813 // Deal with cases where the result can be taken directly from a bit
1814 // of the IPM result.
1815 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
1816 return IPMConversion(0, 0, SystemZ::IPM_CC);
1817 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
1818 return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
1819
1820 // Deal with cases where we can add a value to force the sign bit
1821 // to contain the right value. Putting the bit in 31 means we can
1822 // use SRL rather than RISBG(L), and also makes it easier to get a
1823 // 0/-1 value, so it has priority over the other tests below.
1824 //
1825 // These sequences rely on the fact that the upper two bits of the
1826 // IPM result are zero.
1827 uint64_t TopBit = uint64_t(1) << 31;
1828 if (CCMask == (CCValid & SystemZ::CCMASK_0))
1829 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
1830 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
1831 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
1832 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1833 | SystemZ::CCMASK_1
1834 | SystemZ::CCMASK_2)))
1835 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
1836 if (CCMask == (CCValid & SystemZ::CCMASK_3))
1837 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
1838 if (CCMask == (CCValid & (SystemZ::CCMASK_1
1839 | SystemZ::CCMASK_2
1840 | SystemZ::CCMASK_3)))
1841 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
1842
1843 // Next try inverting the value and testing a bit. 0/1 could be
1844 // handled this way too, but we dealt with that case above.
1845 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
1846 return IPMConversion(-1, 0, SystemZ::IPM_CC);
1847
1848 // Handle cases where adding a value forces a non-sign bit to contain
1849 // the right value.
1850 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
1851 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
1852 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
1853 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
1854
1855 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
1856 // can be done by inverting the low CC bit and applying one of the
1857 // sign-based extractions above.
1858 if (CCMask == (CCValid & SystemZ::CCMASK_1))
1859 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
1860 if (CCMask == (CCValid & SystemZ::CCMASK_2))
1861 return IPMConversion(1 << SystemZ::IPM_CC,
1862 TopBit - (3 << SystemZ::IPM_CC), 31);
1863 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1864 | SystemZ::CCMASK_1
1865 | SystemZ::CCMASK_3)))
1866 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
1867 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1868 | SystemZ::CCMASK_2
1869 | SystemZ::CCMASK_3)))
1870 return IPMConversion(1 << SystemZ::IPM_CC,
1871 TopBit - (1 << SystemZ::IPM_CC), 31);
1872
1873 llvm_unreachable("Unexpected CC combination");
1874 }
1875
expandSelectBoolean(SDNode * Node)1876 SDValue SystemZDAGToDAGISel::expandSelectBoolean(SDNode *Node) {
1877 auto *TrueOp = dyn_cast<ConstantSDNode>(Node->getOperand(0));
1878 auto *FalseOp = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1879 if (!TrueOp || !FalseOp)
1880 return SDValue();
1881 if (FalseOp->getZExtValue() != 0)
1882 return SDValue();
1883 if (TrueOp->getSExtValue() != 1 && TrueOp->getSExtValue() != -1)
1884 return SDValue();
1885
1886 auto *CCValidOp = dyn_cast<ConstantSDNode>(Node->getOperand(2));
1887 auto *CCMaskOp = dyn_cast<ConstantSDNode>(Node->getOperand(3));
1888 if (!CCValidOp || !CCMaskOp)
1889 return SDValue();
1890 int CCValid = CCValidOp->getZExtValue();
1891 int CCMask = CCMaskOp->getZExtValue();
1892
1893 SDLoc DL(Node);
1894 SDValue CCReg = Node->getOperand(4);
1895 IPMConversion IPM = getIPMConversion(CCValid, CCMask);
1896 SDValue Result = CurDAG->getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
1897
1898 if (IPM.XORValue)
1899 Result = CurDAG->getNode(ISD::XOR, DL, MVT::i32, Result,
1900 CurDAG->getConstant(IPM.XORValue, DL, MVT::i32));
1901
1902 if (IPM.AddValue)
1903 Result = CurDAG->getNode(ISD::ADD, DL, MVT::i32, Result,
1904 CurDAG->getConstant(IPM.AddValue, DL, MVT::i32));
1905
1906 EVT VT = Node->getValueType(0);
1907 if (VT == MVT::i32 && IPM.Bit == 31) {
1908 unsigned ShiftOp = TrueOp->getSExtValue() == 1 ? ISD::SRL : ISD::SRA;
1909 Result = CurDAG->getNode(ShiftOp, DL, MVT::i32, Result,
1910 CurDAG->getConstant(IPM.Bit, DL, MVT::i32));
1911 } else {
1912 if (VT != MVT::i32)
1913 Result = CurDAG->getNode(ISD::ANY_EXTEND, DL, VT, Result);
1914
1915 if (TrueOp->getSExtValue() == 1) {
1916 // The SHR/AND sequence should get optimized to an RISBG.
1917 Result = CurDAG->getNode(ISD::SRL, DL, VT, Result,
1918 CurDAG->getConstant(IPM.Bit, DL, MVT::i32));
1919 Result = CurDAG->getNode(ISD::AND, DL, VT, Result,
1920 CurDAG->getConstant(1, DL, VT));
1921 } else {
1922 // Sign-extend from IPM.Bit using a pair of shifts.
1923 int ShlAmt = VT.getSizeInBits() - 1 - IPM.Bit;
1924 int SraAmt = VT.getSizeInBits() - 1;
1925 Result = CurDAG->getNode(ISD::SHL, DL, VT, Result,
1926 CurDAG->getConstant(ShlAmt, DL, MVT::i32));
1927 Result = CurDAG->getNode(ISD::SRA, DL, VT, Result,
1928 CurDAG->getConstant(SraAmt, DL, MVT::i32));
1929 }
1930 }
1931
1932 return Result;
1933 }
1934
PreprocessISelDAG()1935 void SystemZDAGToDAGISel::PreprocessISelDAG() {
1936 // If we have conditional immediate loads, we always prefer
1937 // using those over an IPM sequence.
1938 if (Subtarget->hasLoadStoreOnCond2())
1939 return;
1940
1941 bool MadeChange = false;
1942
1943 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
1944 E = CurDAG->allnodes_end();
1945 I != E;) {
1946 SDNode *N = &*I++;
1947 if (N->use_empty())
1948 continue;
1949
1950 SDValue Res;
1951 switch (N->getOpcode()) {
1952 default: break;
1953 case SystemZISD::SELECT_CCMASK:
1954 Res = expandSelectBoolean(N);
1955 break;
1956 }
1957
1958 if (Res) {
1959 LLVM_DEBUG(dbgs() << "SystemZ DAG preprocessing replacing:\nOld: ");
1960 LLVM_DEBUG(N->dump(CurDAG));
1961 LLVM_DEBUG(dbgs() << "\nNew: ");
1962 LLVM_DEBUG(Res.getNode()->dump(CurDAG));
1963 LLVM_DEBUG(dbgs() << "\n");
1964
1965 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1966 MadeChange = true;
1967 }
1968 }
1969
1970 if (MadeChange)
1971 CurDAG->RemoveDeadNodes();
1972 }
1973