1 //===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "AMDKernelCodeT.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "MCTargetDesc/AMDGPUTargetStreamer.h"
13 #include "SIDefines.h"
14 #include "Utils/AMDGPUBaseInfo.h"
15 #include "Utils/AMDKernelCodeTUtils.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallBitVector.h"
19 #include "llvm/ADT/SmallString.h"
20 #include "llvm/ADT/StringSwitch.h"
21 #include "llvm/ADT/Twine.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCExpr.h"
24 #include "llvm/MC/MCInst.h"
25 #include "llvm/MC/MCInstrInfo.h"
26 #include "llvm/MC/MCParser/MCAsmLexer.h"
27 #include "llvm/MC/MCParser/MCAsmParser.h"
28 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
29 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
30 #include "llvm/MC/MCRegisterInfo.h"
31 #include "llvm/MC/MCStreamer.h"
32 #include "llvm/MC/MCSubtargetInfo.h"
33 #include "llvm/MC/MCSymbolELF.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ELF.h"
36 #include "llvm/Support/SourceMgr.h"
37 #include "llvm/Support/TargetRegistry.h"
38 #include "llvm/Support/raw_ostream.h"
39 
40 // FIXME ODR: Move this to some common place for AsmParser and InstPrinter
41 namespace llvm {
42 namespace AMDGPU {
43 namespace SendMsg {
44 
45 // This must be in sync with llvm::AMDGPU::SendMsg::Id enum members.
46 static
47 const char* const IdSymbolic[] = {
48   nullptr,
49   "MSG_INTERRUPT",
50   "MSG_GS",
51   "MSG_GS_DONE",
52   nullptr,
53   nullptr,
54   nullptr,
55   nullptr,
56   nullptr,
57   nullptr,
58   nullptr,
59   nullptr,
60   nullptr,
61   nullptr,
62   nullptr,
63   "MSG_SYSMSG"
64 };
65 
66 // These two must be in sync with llvm::AMDGPU::SendMsg::Op enum members.
67 static
68 const char* const OpSysSymbolic[] = {
69   nullptr,
70   "SYSMSG_OP_ECC_ERR_INTERRUPT",
71   "SYSMSG_OP_REG_RD",
72   "SYSMSG_OP_HOST_TRAP_ACK",
73   "SYSMSG_OP_TTRACE_PC"
74 };
75 
76 static
77 const char* const OpGsSymbolic[] = {
78   "GS_OP_NOP",
79   "GS_OP_CUT",
80   "GS_OP_EMIT",
81   "GS_OP_EMIT_CUT"
82 };
83 
84 } // namespace SendMsg
85 } // namespace AMDGPU
86 } // namespace llvm
87 
88 using namespace llvm;
89 
90 namespace {
91 
92 struct OptionalOperand;
93 
94 enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
95 
96 class AMDGPUOperand : public MCParsedAsmOperand {
97   enum KindTy {
98     Token,
99     Immediate,
100     Register,
101     Expression
102   } Kind;
103 
104   SMLoc StartLoc, EndLoc;
105 
106 public:
107   AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
108 
109   MCContext *Ctx;
110 
111   typedef std::unique_ptr<AMDGPUOperand> Ptr;
112 
113   enum ImmTy {
114     ImmTyNone,
115     ImmTyGDS,
116     ImmTyOffen,
117     ImmTyIdxen,
118     ImmTyAddr64,
119     ImmTyOffset,
120     ImmTyOffset0,
121     ImmTyOffset1,
122     ImmTyGLC,
123     ImmTySLC,
124     ImmTyTFE,
125     ImmTyClampSI,
126     ImmTyOModSI,
127     ImmTyDppCtrl,
128     ImmTyDppRowMask,
129     ImmTyDppBankMask,
130     ImmTyDppBoundCtrl,
131     ImmTySdwaSel,
132     ImmTySdwaDstUnused,
133     ImmTyDMask,
134     ImmTyUNorm,
135     ImmTyDA,
136     ImmTyR128,
137     ImmTyLWE,
138     ImmTyHwreg,
139     ImmTySendMsg,
140   };
141 
142   struct TokOp {
143     const char *Data;
144     unsigned Length;
145   };
146 
147   struct ImmOp {
148     bool IsFPImm;
149     ImmTy Type;
150     int64_t Val;
151     int Modifiers;
152   };
153 
154   struct RegOp {
155     unsigned RegNo;
156     int Modifiers;
157     const MCRegisterInfo *TRI;
158     const MCSubtargetInfo *STI;
159     bool IsForcedVOP3;
160   };
161 
162   union {
163     TokOp Tok;
164     ImmOp Imm;
165     RegOp Reg;
166     const MCExpr *Expr;
167   };
168 
169   void addImmOperands(MCInst &Inst, unsigned N) const {
170     Inst.addOperand(MCOperand::createImm(getImm()));
171   }
172 
173   StringRef getToken() const {
174     return StringRef(Tok.Data, Tok.Length);
175   }
176 
177   void addRegOperands(MCInst &Inst, unsigned N) const {
178     Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
179   }
180 
181   void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
182     if (isRegKind())
183       addRegOperands(Inst, N);
184     else
185       addImmOperands(Inst, N);
186   }
187 
188   void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
189     if (isRegKind()) {
190       Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
191       addRegOperands(Inst, N);
192     } else {
193       Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
194       addImmOperands(Inst, N);
195     }
196   }
197 
198   void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
199     if (isImm())
200       addImmOperands(Inst, N);
201     else {
202       assert(isExpr());
203       Inst.addOperand(MCOperand::createExpr(Expr));
204     }
205   }
206 
207   bool isToken() const override {
208     return Kind == Token;
209   }
210 
211   bool isImm() const override {
212     return Kind == Immediate;
213   }
214 
215   bool isInlinableImm() const {
216     if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
217       immediates are inlinable (e.g. "clamp" attribute is not) */ )
218       return false;
219     // TODO: We should avoid using host float here. It would be better to
220     // check the float bit values which is what a few other places do.
221     // We've had bot failures before due to weird NaN support on mips hosts.
222     const float F = BitsToFloat(Imm.Val);
223     // TODO: Add 1/(2*pi) for VI
224     return (Imm.Val <= 64 && Imm.Val >= -16) ||
225            (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
226            F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
227   }
228 
229   int64_t getImm() const {
230     return Imm.Val;
231   }
232 
233   enum ImmTy getImmTy() const {
234     assert(isImm());
235     return Imm.Type;
236   }
237 
238   bool isRegKind() const {
239     return Kind == Register;
240   }
241 
242   bool isReg() const override {
243     return Kind == Register && Reg.Modifiers == 0;
244   }
245 
246   bool isRegOrImmWithInputMods() const {
247     return Kind == Register || isInlinableImm();
248   }
249 
250   bool isImmTy(ImmTy ImmT) const {
251     return isImm() && Imm.Type == ImmT;
252   }
253 
254   bool isClampSI() const {
255     return isImmTy(ImmTyClampSI);
256   }
257 
258   bool isOModSI() const {
259     return isImmTy(ImmTyOModSI);
260   }
261 
262   bool isImmModifier() const {
263     return Kind == Immediate && Imm.Type != ImmTyNone;
264   }
265 
266   bool isDMask() const {
267     return isImmTy(ImmTyDMask);
268   }
269 
270   bool isUNorm() const { return isImmTy(ImmTyUNorm); }
271   bool isDA() const { return isImmTy(ImmTyDA); }
272   bool isR128() const { return isImmTy(ImmTyUNorm); }
273   bool isLWE() const { return isImmTy(ImmTyLWE); }
274 
275   bool isMod() const {
276     return isClampSI() || isOModSI();
277   }
278 
279   bool isOffen() const { return isImmTy(ImmTyOffen); }
280   bool isIdxen() const { return isImmTy(ImmTyIdxen); }
281   bool isAddr64() const { return isImmTy(ImmTyAddr64); }
282   bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
283   bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
284   bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
285   bool isGDS() const { return isImmTy(ImmTyGDS); }
286   bool isGLC() const { return isImmTy(ImmTyGLC); }
287   bool isSLC() const { return isImmTy(ImmTySLC); }
288   bool isTFE() const { return isImmTy(ImmTyTFE); }
289 
290   bool isBankMask() const {
291     return isImmTy(ImmTyDppBankMask);
292   }
293 
294   bool isRowMask() const {
295     return isImmTy(ImmTyDppRowMask);
296   }
297 
298   bool isBoundCtrl() const {
299     return isImmTy(ImmTyDppBoundCtrl);
300   }
301 
302   bool isSDWASel() const {
303     return isImmTy(ImmTySdwaSel);
304   }
305 
306   bool isSDWADstUnused() const {
307     return isImmTy(ImmTySdwaDstUnused);
308   }
309 
310   void setModifiers(unsigned Mods) {
311     assert(isReg() || (isImm() && Imm.Modifiers == 0));
312     if (isReg())
313       Reg.Modifiers = Mods;
314     else
315       Imm.Modifiers = Mods;
316   }
317 
318   bool hasModifiers() const {
319     assert(isRegKind() || isImm());
320     return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
321   }
322 
323   unsigned getReg() const override {
324     return Reg.RegNo;
325   }
326 
327   bool isRegOrImm() const {
328     return isReg() || isImm();
329   }
330 
331   bool isRegClass(unsigned RCID) const {
332     return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
333   }
334 
335   bool isSCSrc32() const {
336     return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
337   }
338 
339   bool isSCSrc64() const {
340     return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
341   }
342 
343   bool isSSrc32() const {
344     return isImm() || isSCSrc32();
345   }
346 
347   bool isSSrc64() const {
348     // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
349     // See isVSrc64().
350     return isImm() || isSCSrc64();
351   }
352 
353   bool isVCSrc32() const {
354     return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
355   }
356 
357   bool isVCSrc64() const {
358     return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
359   }
360 
361   bool isVSrc32() const {
362     return isImm() || isVCSrc32();
363   }
364 
365   bool isVSrc64() const {
366     // TODO: Check if the 64-bit value (coming from assembly source) can be
367     // narrowed to 32 bits (in the instruction stream). That require knowledge
368     // of instruction type (unsigned/signed, floating or "untyped"/B64),
369     // see [AMD GCN3 ISA 6.3.1].
370     // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
371     return isImm() || isVCSrc64();
372   }
373 
374   bool isMem() const override {
375     return false;
376   }
377 
378   bool isExpr() const {
379     return Kind == Expression;
380   }
381 
382   bool isSoppBrTarget() const {
383     return isExpr() || isImm();
384   }
385 
386   SMLoc getStartLoc() const override {
387     return StartLoc;
388   }
389 
390   SMLoc getEndLoc() const override {
391     return EndLoc;
392   }
393 
394   void printImmTy(raw_ostream& OS, ImmTy Type) const {
395     switch (Type) {
396     case ImmTyNone: OS << "None"; break;
397     case ImmTyGDS: OS << "GDS"; break;
398     case ImmTyOffen: OS << "Offen"; break;
399     case ImmTyIdxen: OS << "Idxen"; break;
400     case ImmTyAddr64: OS << "Addr64"; break;
401     case ImmTyOffset: OS << "Offset"; break;
402     case ImmTyOffset0: OS << "Offset0"; break;
403     case ImmTyOffset1: OS << "Offset1"; break;
404     case ImmTyGLC: OS << "GLC"; break;
405     case ImmTySLC: OS << "SLC"; break;
406     case ImmTyTFE: OS << "TFE"; break;
407     case ImmTyClampSI: OS << "ClampSI"; break;
408     case ImmTyOModSI: OS << "OModSI"; break;
409     case ImmTyDppCtrl: OS << "DppCtrl"; break;
410     case ImmTyDppRowMask: OS << "DppRowMask"; break;
411     case ImmTyDppBankMask: OS << "DppBankMask"; break;
412     case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
413     case ImmTySdwaSel: OS << "SdwaSel"; break;
414     case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
415     case ImmTyDMask: OS << "DMask"; break;
416     case ImmTyUNorm: OS << "UNorm"; break;
417     case ImmTyDA: OS << "DA"; break;
418     case ImmTyR128: OS << "R128"; break;
419     case ImmTyLWE: OS << "LWE"; break;
420     case ImmTyHwreg: OS << "Hwreg"; break;
421     case ImmTySendMsg: OS << "SendMsg"; break;
422     }
423   }
424 
425   void print(raw_ostream &OS) const override {
426     switch (Kind) {
427     case Register:
428       OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
429       break;
430     case Immediate:
431       OS << '<' << getImm();
432       if (getImmTy() != ImmTyNone) {
433         OS << " type: "; printImmTy(OS, getImmTy());
434       }
435       OS << " mods: " << Imm.Modifiers << '>';
436       break;
437     case Token:
438       OS << '\'' << getToken() << '\'';
439       break;
440     case Expression:
441       OS << "<expr " << *Expr << '>';
442       break;
443     }
444   }
445 
446   static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
447                                       enum ImmTy Type = ImmTyNone,
448                                       bool IsFPImm = false) {
449     auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
450     Op->Imm.Val = Val;
451     Op->Imm.IsFPImm = IsFPImm;
452     Op->Imm.Type = Type;
453     Op->Imm.Modifiers = 0;
454     Op->StartLoc = Loc;
455     Op->EndLoc = Loc;
456     return Op;
457   }
458 
459   static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
460                                         bool HasExplicitEncodingSize = true) {
461     auto Res = llvm::make_unique<AMDGPUOperand>(Token);
462     Res->Tok.Data = Str.data();
463     Res->Tok.Length = Str.size();
464     Res->StartLoc = Loc;
465     Res->EndLoc = Loc;
466     return Res;
467   }
468 
469   static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
470                                       SMLoc E,
471                                       const MCRegisterInfo *TRI,
472                                       const MCSubtargetInfo *STI,
473                                       bool ForceVOP3) {
474     auto Op = llvm::make_unique<AMDGPUOperand>(Register);
475     Op->Reg.RegNo = RegNo;
476     Op->Reg.TRI = TRI;
477     Op->Reg.STI = STI;
478     Op->Reg.Modifiers = 0;
479     Op->Reg.IsForcedVOP3 = ForceVOP3;
480     Op->StartLoc = S;
481     Op->EndLoc = E;
482     return Op;
483   }
484 
485   static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
486     auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
487     Op->Expr = Expr;
488     Op->StartLoc = S;
489     Op->EndLoc = S;
490     return Op;
491   }
492 
493   bool isSWaitCnt() const;
494   bool isHwreg() const;
495   bool isSendMsg() const;
496   bool isMubufOffset() const;
497   bool isSMRDOffset() const;
498   bool isSMRDLiteralOffset() const;
499   bool isDPPCtrl() const;
500 };
501 
502 class AMDGPUAsmParser : public MCTargetAsmParser {
503   const MCInstrInfo &MII;
504   MCAsmParser &Parser;
505 
506   unsigned ForcedEncodingSize;
507 
508   bool isSI() const {
509     return AMDGPU::isSI(getSTI());
510   }
511 
512   bool isCI() const {
513     return AMDGPU::isCI(getSTI());
514   }
515 
516   bool isVI() const {
517     return AMDGPU::isVI(getSTI());
518   }
519 
520   bool hasSGPR102_SGPR103() const {
521     return !isVI();
522   }
523 
524   /// @name Auto-generated Match Functions
525   /// {
526 
527 #define GET_ASSEMBLER_HEADER
528 #include "AMDGPUGenAsmMatcher.inc"
529 
530   /// }
531 
532 private:
533   bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
534   bool ParseDirectiveHSACodeObjectVersion();
535   bool ParseDirectiveHSACodeObjectISA();
536   bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
537   bool ParseDirectiveAMDKernelCodeT();
538   bool ParseSectionDirectiveHSAText();
539   bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
540   bool ParseDirectiveAMDGPUHsaKernel();
541   bool ParseDirectiveAMDGPUHsaModuleGlobal();
542   bool ParseDirectiveAMDGPUHsaProgramGlobal();
543   bool ParseSectionDirectiveHSADataGlobalAgent();
544   bool ParseSectionDirectiveHSADataGlobalProgram();
545   bool ParseSectionDirectiveHSARodataReadonlyAgent();
546   bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
547   bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
548 
549 public:
550   enum AMDGPUMatchResultTy {
551     Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
552   };
553 
554   AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
555                const MCInstrInfo &MII,
556                const MCTargetOptions &Options)
557       : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
558         ForcedEncodingSize(0) {
559     MCAsmParserExtension::Initialize(Parser);
560 
561     if (getSTI().getFeatureBits().none()) {
562       // Set default features.
563       copySTI().ToggleFeature("SOUTHERN_ISLANDS");
564     }
565 
566     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
567   }
568 
569   AMDGPUTargetStreamer &getTargetStreamer() {
570     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
571     return static_cast<AMDGPUTargetStreamer &>(TS);
572   }
573 
574   unsigned getForcedEncodingSize() const {
575     return ForcedEncodingSize;
576   }
577 
578   void setForcedEncodingSize(unsigned Size) {
579     ForcedEncodingSize = Size;
580   }
581 
582   bool isForcedVOP3() const {
583     return ForcedEncodingSize == 64;
584   }
585 
586   std::unique_ptr<AMDGPUOperand> parseRegister();
587   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
588   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
589   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
590                                OperandVector &Operands, MCStreamer &Out,
591                                uint64_t &ErrorInfo,
592                                bool MatchingInlineAsm) override;
593   bool ParseDirective(AsmToken DirectiveID) override;
594   OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
595   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
596                         SMLoc NameLoc, OperandVector &Operands) override;
597 
598   OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
599                                           int64_t Default = 0, bool AddDefault = false);
600   OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
601                                           OperandVector &Operands,
602                                           enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
603                                           int64_t Default = 0, bool AddDefault = false,
604                                           bool (*ConvertResult)(int64_t&) = 0);
605   OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
606                                      enum AMDGPUOperand::ImmTy ImmTy =
607                                                       AMDGPUOperand::ImmTyNone,
608                                      bool AddDefault = false);
609   OperandMatchResultTy parseOptionalOps(
610                                    const ArrayRef<OptionalOperand> &OptionalOps,
611                                    OperandVector &Operands);
612   OperandMatchResultTy parseStringWithPrefix(const char *Prefix, StringRef &Value);
613 
614   OperandMatchResultTy parseOptionalOperand(OperandVector &Operands, const OptionalOperand& Op, bool AddDefault);
615   OperandMatchResultTy parseAMDGPUOperand(OperandVector &Operands, StringRef Name);
616 
617   void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
618   void cvtDS(MCInst &Inst, const OperandVector &Operands);
619 
620   bool parseCnt(int64_t &IntVal);
621   OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
622   bool parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier);
623   OperandMatchResultTy parseHwreg(OperandVector &Operands);
624 private:
625   struct OperandInfoTy {
626     int64_t Id;
627     bool IsSymbolic;
628     OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
629   };
630   bool parseSendMsg(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
631 public:
632   OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
633   OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
634   AMDGPUOperand::Ptr defaultHwreg() const;
635 
636 
637   void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
638   AMDGPUOperand::Ptr defaultMubufOffset() const;
639   AMDGPUOperand::Ptr defaultGLC() const;
640   AMDGPUOperand::Ptr defaultSLC() const;
641   AMDGPUOperand::Ptr defaultTFE() const;
642 
643   OperandMatchResultTy parseOModSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "omod"); }
644   OperandMatchResultTy parseClampSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "clamp"); }
645   OperandMatchResultTy parseSMRDOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_offset"); }
646   OperandMatchResultTy parseSMRDLiteralOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_literal_offset"); }
647   OperandMatchResultTy parseDPPCtrl(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "dpp_ctrl"); }
648   OperandMatchResultTy parseRowMask(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "row_mask"); }
649   OperandMatchResultTy parseBankMask(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "bank_mask"); }
650   OperandMatchResultTy parseBoundCtrl(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "bound_ctrl"); }
651   OperandMatchResultTy parseOffen(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offen"); }
652   OperandMatchResultTy parseIdxen(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "idxen"); }
653   OperandMatchResultTy parseAddr64(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "addr64"); }
654   OperandMatchResultTy parseOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset"); }
655   OperandMatchResultTy parseOffset0(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset0"); }
656   OperandMatchResultTy parseOffset1(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset1"); }
657   OperandMatchResultTy parseGLC(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "glc"); }
658   OperandMatchResultTy parseSLC(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "slc"); }
659   OperandMatchResultTy parseTFE(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "tfe"); }
660   OperandMatchResultTy parseGDS(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "gds"); }
661 
662   OperandMatchResultTy parseDMask(OperandVector &Operands);
663   OperandMatchResultTy parseUNorm(OperandVector &Operands);
664   OperandMatchResultTy parseDA(OperandVector &Operands);
665   OperandMatchResultTy parseR128(OperandVector &Operands);
666   OperandMatchResultTy parseLWE(OperandVector &Operands);
667   AMDGPUOperand::Ptr defaultDMask() const;
668   AMDGPUOperand::Ptr defaultUNorm() const;
669   AMDGPUOperand::Ptr defaultDA() const;
670   AMDGPUOperand::Ptr defaultR128() const;
671   AMDGPUOperand::Ptr defaultLWE() const;
672   AMDGPUOperand::Ptr defaultSMRDOffset() const;
673   AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
674 
675   AMDGPUOperand::Ptr defaultClampSI() const;
676   AMDGPUOperand::Ptr defaultOModSI() const;
677 
678   OperandMatchResultTy parseOModOperand(OperandVector &Operands);
679 
680   void cvtId(MCInst &Inst, const OperandVector &Operands);
681   void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
682   void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
683 
684   void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
685   void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
686 
687   OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands, bool AddDefault);
688   AMDGPUOperand::Ptr defaultRowMask() const;
689   AMDGPUOperand::Ptr defaultBankMask() const;
690   AMDGPUOperand::Ptr defaultBoundCtrl() const;
691   void cvtDPP(MCInst &Inst, const OperandVector &Operands);
692 
693   OperandMatchResultTy parseSDWASel(OperandVector &Operands);
694   OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
695   AMDGPUOperand::Ptr defaultSDWASel() const;
696   AMDGPUOperand::Ptr defaultSDWADstUnused() const;
697 };
698 
699 struct OptionalOperand {
700   const char *Name;
701   AMDGPUOperand::ImmTy Type;
702   bool IsBit;
703   int64_t Default;
704   bool (*ConvertResult)(int64_t&);
705 };
706 
707 }
708 
709 static int getRegClass(RegisterKind Is, unsigned RegWidth) {
710   if (Is == IS_VGPR) {
711     switch (RegWidth) {
712       default: return -1;
713       case 1: return AMDGPU::VGPR_32RegClassID;
714       case 2: return AMDGPU::VReg_64RegClassID;
715       case 3: return AMDGPU::VReg_96RegClassID;
716       case 4: return AMDGPU::VReg_128RegClassID;
717       case 8: return AMDGPU::VReg_256RegClassID;
718       case 16: return AMDGPU::VReg_512RegClassID;
719     }
720   } else if (Is == IS_TTMP) {
721     switch (RegWidth) {
722       default: return -1;
723       case 1: return AMDGPU::TTMP_32RegClassID;
724       case 2: return AMDGPU::TTMP_64RegClassID;
725       case 4: return AMDGPU::TTMP_128RegClassID;
726     }
727   } else if (Is == IS_SGPR) {
728     switch (RegWidth) {
729       default: return -1;
730       case 1: return AMDGPU::SGPR_32RegClassID;
731       case 2: return AMDGPU::SGPR_64RegClassID;
732       case 4: return AMDGPU::SGPR_128RegClassID;
733       case 8: return AMDGPU::SReg_256RegClassID;
734       case 16: return AMDGPU::SReg_512RegClassID;
735     }
736   }
737   return -1;
738 }
739 
740 static unsigned getSpecialRegForName(StringRef RegName) {
741   return StringSwitch<unsigned>(RegName)
742     .Case("exec", AMDGPU::EXEC)
743     .Case("vcc", AMDGPU::VCC)
744     .Case("flat_scratch", AMDGPU::FLAT_SCR)
745     .Case("m0", AMDGPU::M0)
746     .Case("scc", AMDGPU::SCC)
747     .Case("tba", AMDGPU::TBA)
748     .Case("tma", AMDGPU::TMA)
749     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
750     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
751     .Case("vcc_lo", AMDGPU::VCC_LO)
752     .Case("vcc_hi", AMDGPU::VCC_HI)
753     .Case("exec_lo", AMDGPU::EXEC_LO)
754     .Case("exec_hi", AMDGPU::EXEC_HI)
755     .Case("tma_lo", AMDGPU::TMA_LO)
756     .Case("tma_hi", AMDGPU::TMA_HI)
757     .Case("tba_lo", AMDGPU::TBA_LO)
758     .Case("tba_hi", AMDGPU::TBA_HI)
759     .Default(0);
760 }
761 
762 bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
763   auto R = parseRegister();
764   if (!R) return true;
765   assert(R->isReg());
766   RegNo = R->getReg();
767   StartLoc = R->getStartLoc();
768   EndLoc = R->getEndLoc();
769   return false;
770 }
771 
772 bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
773 {
774   switch (RegKind) {
775   case IS_SPECIAL:
776     if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
777     if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
778     if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
779     if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
780     if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
781     return false;
782   case IS_VGPR:
783   case IS_SGPR:
784   case IS_TTMP:
785     if (Reg1 != Reg + RegWidth) { return false; }
786     RegWidth++;
787     return true;
788   default:
789     assert(false); return false;
790   }
791 }
792 
793 bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
794 {
795   const MCRegisterInfo *TRI = getContext().getRegisterInfo();
796   if (getLexer().is(AsmToken::Identifier)) {
797     StringRef RegName = Parser.getTok().getString();
798     if ((Reg = getSpecialRegForName(RegName))) {
799       Parser.Lex();
800       RegKind = IS_SPECIAL;
801     } else {
802       unsigned RegNumIndex = 0;
803       if (RegName[0] == 'v') { RegNumIndex = 1; RegKind = IS_VGPR; }
804       else if (RegName[0] == 's') { RegNumIndex = 1; RegKind = IS_SGPR; }
805       else if (RegName.startswith("ttmp")) { RegNumIndex = strlen("ttmp"); RegKind = IS_TTMP; }
806       else { return false; }
807       if (RegName.size() > RegNumIndex) {
808         // Single 32-bit register: vXX.
809         if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum)) { return false; }
810         Parser.Lex();
811         RegWidth = 1;
812       } else {
813         // Range of registers: v[XX:YY].
814         Parser.Lex();
815         int64_t RegLo, RegHi;
816         if (getLexer().isNot(AsmToken::LBrac)) { return false; }
817         Parser.Lex();
818 
819         if (getParser().parseAbsoluteExpression(RegLo)) { return false; }
820 
821         if (getLexer().isNot(AsmToken::Colon)) { return false; }
822         Parser.Lex();
823 
824         if (getParser().parseAbsoluteExpression(RegHi)) { return false; }
825 
826         if (getLexer().isNot(AsmToken::RBrac)) { return false; }
827         Parser.Lex();
828 
829         RegNum = (unsigned) RegLo;
830         RegWidth = (RegHi - RegLo) + 1;
831       }
832     }
833   } else if (getLexer().is(AsmToken::LBrac)) {
834     // List of consecutive registers: [s0,s1,s2,s3]
835     Parser.Lex();
836     if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) { return false; }
837     if (RegWidth != 1) { return false; }
838     RegisterKind RegKind1;
839     unsigned Reg1, RegNum1, RegWidth1;
840     do {
841       if (getLexer().is(AsmToken::Comma)) {
842         Parser.Lex();
843       } else if (getLexer().is(AsmToken::RBrac)) {
844         Parser.Lex();
845         break;
846       } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
847         if (RegWidth1 != 1) { return false; }
848         if (RegKind1 != RegKind) { return false; }
849         if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) { return false; }
850       } else {
851         return false;
852       }
853     } while (true);
854   } else {
855     return false;
856   }
857   switch (RegKind) {
858   case IS_SPECIAL:
859     RegNum = 0;
860     RegWidth = 1;
861     break;
862   case IS_VGPR:
863   case IS_SGPR:
864   case IS_TTMP:
865   {
866     unsigned Size = 1;
867     if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
868       // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
869       Size = std::min(RegWidth, 4u);
870     }
871     if (RegNum % Size != 0) { return false; }
872     RegNum = RegNum / Size;
873     int RCID = getRegClass(RegKind, RegWidth);
874     if (RCID == -1) { return false; }
875     const MCRegisterClass RC = TRI->getRegClass(RCID);
876     if (RegNum >= RC.getNumRegs()) { return false; }
877     Reg = RC.getRegister(RegNum);
878     break;
879   }
880 
881   default:
882     assert(false); return false;
883   }
884 
885   if (!subtargetHasRegister(*TRI, Reg)) { return false; }
886   return true;
887 }
888 
889 std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
890   const auto &Tok = Parser.getTok();
891   SMLoc StartLoc = Tok.getLoc();
892   SMLoc EndLoc = Tok.getEndLoc();
893   const MCRegisterInfo *TRI = getContext().getRegisterInfo();
894 
895   RegisterKind RegKind;
896   unsigned Reg, RegNum, RegWidth;
897 
898   if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
899     return nullptr;
900   }
901   return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
902                                   TRI, &getSTI(), false);
903 }
904 
905 unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
906 
907   uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
908 
909   if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
910       (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
911     return Match_InvalidOperand;
912 
913   if ((TSFlags & SIInstrFlags::VOP3) &&
914       (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
915       getForcedEncodingSize() != 64)
916     return Match_PreferE32;
917 
918   return Match_Success;
919 }
920 
921 
922 bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
923                                               OperandVector &Operands,
924                                               MCStreamer &Out,
925                                               uint64_t &ErrorInfo,
926                                               bool MatchingInlineAsm) {
927   MCInst Inst;
928 
929   switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
930     default: break;
931     case Match_Success:
932       Inst.setLoc(IDLoc);
933       Out.EmitInstruction(Inst, getSTI());
934       return false;
935     case Match_MissingFeature:
936       return Error(IDLoc, "instruction not supported on this GPU");
937 
938     case Match_MnemonicFail:
939       return Error(IDLoc, "unrecognized instruction mnemonic");
940 
941     case Match_InvalidOperand: {
942       SMLoc ErrorLoc = IDLoc;
943       if (ErrorInfo != ~0ULL) {
944         if (ErrorInfo >= Operands.size()) {
945           return Error(IDLoc, "too few operands for instruction");
946         }
947         ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
948         if (ErrorLoc == SMLoc())
949           ErrorLoc = IDLoc;
950       }
951       return Error(ErrorLoc, "invalid operand for instruction");
952     }
953     case Match_PreferE32:
954       return Error(IDLoc, "internal error: instruction without _e64 suffix "
955                           "should be encoded as e32");
956   }
957   llvm_unreachable("Implement any new match types added!");
958 }
959 
960 bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
961                                                uint32_t &Minor) {
962   if (getLexer().isNot(AsmToken::Integer))
963     return TokError("invalid major version");
964 
965   Major = getLexer().getTok().getIntVal();
966   Lex();
967 
968   if (getLexer().isNot(AsmToken::Comma))
969     return TokError("minor version number required, comma expected");
970   Lex();
971 
972   if (getLexer().isNot(AsmToken::Integer))
973     return TokError("invalid minor version");
974 
975   Minor = getLexer().getTok().getIntVal();
976   Lex();
977 
978   return false;
979 }
980 
981 bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
982 
983   uint32_t Major;
984   uint32_t Minor;
985 
986   if (ParseDirectiveMajorMinor(Major, Minor))
987     return true;
988 
989   getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
990   return false;
991 }
992 
993 bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
994 
995   uint32_t Major;
996   uint32_t Minor;
997   uint32_t Stepping;
998   StringRef VendorName;
999   StringRef ArchName;
1000 
1001   // If this directive has no arguments, then use the ISA version for the
1002   // targeted GPU.
1003   if (getLexer().is(AsmToken::EndOfStatement)) {
1004     AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
1005     getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1006                                                       Isa.Stepping,
1007                                                       "AMD", "AMDGPU");
1008     return false;
1009   }
1010 
1011 
1012   if (ParseDirectiveMajorMinor(Major, Minor))
1013     return true;
1014 
1015   if (getLexer().isNot(AsmToken::Comma))
1016     return TokError("stepping version number required, comma expected");
1017   Lex();
1018 
1019   if (getLexer().isNot(AsmToken::Integer))
1020     return TokError("invalid stepping version");
1021 
1022   Stepping = getLexer().getTok().getIntVal();
1023   Lex();
1024 
1025   if (getLexer().isNot(AsmToken::Comma))
1026     return TokError("vendor name required, comma expected");
1027   Lex();
1028 
1029   if (getLexer().isNot(AsmToken::String))
1030     return TokError("invalid vendor name");
1031 
1032   VendorName = getLexer().getTok().getStringContents();
1033   Lex();
1034 
1035   if (getLexer().isNot(AsmToken::Comma))
1036     return TokError("arch name required, comma expected");
1037   Lex();
1038 
1039   if (getLexer().isNot(AsmToken::String))
1040     return TokError("invalid arch name");
1041 
1042   ArchName = getLexer().getTok().getStringContents();
1043   Lex();
1044 
1045   getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1046                                                     VendorName, ArchName);
1047   return false;
1048 }
1049 
1050 bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1051                                                amd_kernel_code_t &Header) {
1052   SmallString<40> ErrStr;
1053   raw_svector_ostream Err(ErrStr);
1054   if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
1055     return TokError(Err.str());
1056   }
1057   Lex();
1058   return false;
1059 }
1060 
1061 bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1062 
1063   amd_kernel_code_t Header;
1064   AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
1065 
1066   while (true) {
1067 
1068     if (getLexer().isNot(AsmToken::EndOfStatement))
1069       return TokError("amd_kernel_code_t values must begin on a new line");
1070 
1071     // Lex EndOfStatement.  This is in a while loop, because lexing a comment
1072     // will set the current token to EndOfStatement.
1073     while(getLexer().is(AsmToken::EndOfStatement))
1074       Lex();
1075 
1076     if (getLexer().isNot(AsmToken::Identifier))
1077       return TokError("expected value identifier or .end_amd_kernel_code_t");
1078 
1079     StringRef ID = getLexer().getTok().getIdentifier();
1080     Lex();
1081 
1082     if (ID == ".end_amd_kernel_code_t")
1083       break;
1084 
1085     if (ParseAMDKernelCodeTValue(ID, Header))
1086       return true;
1087   }
1088 
1089   getTargetStreamer().EmitAMDKernelCodeT(Header);
1090 
1091   return false;
1092 }
1093 
1094 bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1095   getParser().getStreamer().SwitchSection(
1096       AMDGPU::getHSATextSection(getContext()));
1097   return false;
1098 }
1099 
1100 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1101   if (getLexer().isNot(AsmToken::Identifier))
1102     return TokError("expected symbol name");
1103 
1104   StringRef KernelName = Parser.getTok().getString();
1105 
1106   getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1107                                            ELF::STT_AMDGPU_HSA_KERNEL);
1108   Lex();
1109   return false;
1110 }
1111 
1112 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1113   if (getLexer().isNot(AsmToken::Identifier))
1114     return TokError("expected symbol name");
1115 
1116   StringRef GlobalName = Parser.getTok().getIdentifier();
1117 
1118   getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1119   Lex();
1120   return false;
1121 }
1122 
1123 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1124   if (getLexer().isNot(AsmToken::Identifier))
1125     return TokError("expected symbol name");
1126 
1127   StringRef GlobalName = Parser.getTok().getIdentifier();
1128 
1129   getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1130   Lex();
1131   return false;
1132 }
1133 
1134 bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1135   getParser().getStreamer().SwitchSection(
1136       AMDGPU::getHSADataGlobalAgentSection(getContext()));
1137   return false;
1138 }
1139 
1140 bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1141   getParser().getStreamer().SwitchSection(
1142       AMDGPU::getHSADataGlobalProgramSection(getContext()));
1143   return false;
1144 }
1145 
1146 bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1147   getParser().getStreamer().SwitchSection(
1148       AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1149   return false;
1150 }
1151 
1152 bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
1153   StringRef IDVal = DirectiveID.getString();
1154 
1155   if (IDVal == ".hsa_code_object_version")
1156     return ParseDirectiveHSACodeObjectVersion();
1157 
1158   if (IDVal == ".hsa_code_object_isa")
1159     return ParseDirectiveHSACodeObjectISA();
1160 
1161   if (IDVal == ".amd_kernel_code_t")
1162     return ParseDirectiveAMDKernelCodeT();
1163 
1164   if (IDVal == ".hsatext")
1165     return ParseSectionDirectiveHSAText();
1166 
1167   if (IDVal == ".amdgpu_hsa_kernel")
1168     return ParseDirectiveAMDGPUHsaKernel();
1169 
1170   if (IDVal == ".amdgpu_hsa_module_global")
1171     return ParseDirectiveAMDGPUHsaModuleGlobal();
1172 
1173   if (IDVal == ".amdgpu_hsa_program_global")
1174     return ParseDirectiveAMDGPUHsaProgramGlobal();
1175 
1176   if (IDVal == ".hsadata_global_agent")
1177     return ParseSectionDirectiveHSADataGlobalAgent();
1178 
1179   if (IDVal == ".hsadata_global_program")
1180     return ParseSectionDirectiveHSADataGlobalProgram();
1181 
1182   if (IDVal == ".hsarodata_readonly_agent")
1183     return ParseSectionDirectiveHSARodataReadonlyAgent();
1184 
1185   return true;
1186 }
1187 
1188 bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1189                                            unsigned RegNo) const {
1190   if (isCI())
1191     return true;
1192 
1193   if (isSI()) {
1194     // No flat_scr
1195     switch (RegNo) {
1196     case AMDGPU::FLAT_SCR:
1197     case AMDGPU::FLAT_SCR_LO:
1198     case AMDGPU::FLAT_SCR_HI:
1199       return false;
1200     default:
1201       return true;
1202     }
1203   }
1204 
1205   // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1206   // SI/CI have.
1207   for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1208        R.isValid(); ++R) {
1209     if (*R == RegNo)
1210       return false;
1211   }
1212 
1213   return true;
1214 }
1215 
1216 AMDGPUAsmParser::OperandMatchResultTy
1217 AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1218 
1219   // Try to parse with a custom parser
1220   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1221 
1222   // If we successfully parsed the operand or if there as an error parsing,
1223   // we are done.
1224   //
1225   // If we are parsing after we reach EndOfStatement then this means we
1226   // are appending default values to the Operands list.  This is only done
1227   // by custom parser, so we shouldn't continue on to the generic parsing.
1228   if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
1229       getLexer().is(AsmToken::EndOfStatement))
1230     return ResTy;
1231 
1232   bool Negate = false, Abs = false, Abs2 = false;
1233 
1234   if (getLexer().getKind()== AsmToken::Minus) {
1235     Parser.Lex();
1236     Negate = true;
1237   }
1238 
1239   if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1240     Parser.Lex();
1241     Abs2 = true;
1242     if (getLexer().isNot(AsmToken::LParen)) {
1243       Error(Parser.getTok().getLoc(), "expected left paren after abs");
1244       return MatchOperand_ParseFail;
1245     }
1246     Parser.Lex();
1247   }
1248 
1249   if (getLexer().getKind() == AsmToken::Pipe) {
1250     Parser.Lex();
1251     Abs = true;
1252   }
1253 
1254   switch(getLexer().getKind()) {
1255     case AsmToken::Integer: {
1256       SMLoc S = Parser.getTok().getLoc();
1257       int64_t IntVal;
1258       if (getParser().parseAbsoluteExpression(IntVal))
1259         return MatchOperand_ParseFail;
1260       if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
1261         Error(S, "invalid immediate: only 32-bit values are legal");
1262         return MatchOperand_ParseFail;
1263       }
1264 
1265       if (Negate)
1266         IntVal *= -1;
1267       Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1268       return MatchOperand_Success;
1269     }
1270     case AsmToken::Real: {
1271       // FIXME: We should emit an error if a double precisions floating-point
1272       // value is used.  I'm not sure the best way to detect this.
1273       SMLoc S = Parser.getTok().getLoc();
1274       int64_t IntVal;
1275       if (getParser().parseAbsoluteExpression(IntVal))
1276         return MatchOperand_ParseFail;
1277 
1278       APFloat F((float)BitsToDouble(IntVal));
1279       if (Negate)
1280         F.changeSign();
1281       Operands.push_back(
1282           AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1283       return MatchOperand_Success;
1284     }
1285     case AsmToken::LBrac:
1286     case AsmToken::Identifier: {
1287       if (auto R = parseRegister()) {
1288         unsigned Modifiers = 0;
1289 
1290         if (Negate)
1291           Modifiers |= 0x1;
1292 
1293         if (Abs) {
1294           if (getLexer().getKind() != AsmToken::Pipe)
1295             return MatchOperand_ParseFail;
1296           Parser.Lex();
1297           Modifiers |= 0x2;
1298         }
1299         if (Abs2) {
1300           if (getLexer().isNot(AsmToken::RParen)) {
1301             return MatchOperand_ParseFail;
1302           }
1303           Parser.Lex();
1304           Modifiers |= 0x2;
1305         }
1306         assert(R->isReg());
1307         R->Reg.IsForcedVOP3 = isForcedVOP3();
1308         if (Modifiers) {
1309           R->setModifiers(Modifiers);
1310         }
1311         Operands.push_back(std::move(R));
1312       } else {
1313         if (ResTy == MatchOperand_NoMatch) {
1314           const auto &Tok = Parser.getTok();
1315           Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(),
1316                                                         Tok.getLoc()));
1317           Parser.Lex();
1318           if (getLexer().is(AsmToken::Colon)) {
1319             Parser.Lex();
1320             if (getLexer().is(AsmToken::Identifier)) {
1321               Parser.Lex();
1322             }
1323           }
1324         } else {
1325           return ResTy;
1326         }
1327       }
1328       return MatchOperand_Success;
1329     }
1330     default:
1331       return MatchOperand_NoMatch;
1332   }
1333 }
1334 
1335 bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1336                                        StringRef Name,
1337                                        SMLoc NameLoc, OperandVector &Operands) {
1338 
1339   // Clear any forced encodings from the previous instruction.
1340   setForcedEncodingSize(0);
1341 
1342   if (Name.endswith("_e64"))
1343     setForcedEncodingSize(64);
1344   else if (Name.endswith("_e32"))
1345     setForcedEncodingSize(32);
1346 
1347   // Add the instruction mnemonic
1348   Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1349 
1350 
1351   if (Name.endswith("_e64")) { Name = Name.substr(0, Name.size() - 4); }
1352   if (Name.endswith("_e32")) { Name = Name.substr(0, Name.size() - 4); }
1353 
1354   while (!getLexer().is(AsmToken::EndOfStatement)) {
1355     AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1356 
1357     // Eat the comma or space if there is one.
1358     if (getLexer().is(AsmToken::Comma))
1359       Parser.Lex();
1360 
1361     switch (Res) {
1362       case MatchOperand_Success: break;
1363       case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1364                                                 "failed parsing operand.");
1365       case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1366                                               "not a valid operand.");
1367     }
1368   }
1369 
1370   return false;
1371 }
1372 
1373 //===----------------------------------------------------------------------===//
1374 // Utility functions
1375 //===----------------------------------------------------------------------===//
1376 
1377 AMDGPUAsmParser::OperandMatchResultTy
1378 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1379                                     int64_t Default, bool AddDefault) {
1380   // We are at the end of the statement, and this is a default argument, so
1381   // use a default value.
1382   if (getLexer().is(AsmToken::EndOfStatement)) {
1383     Int = Default;
1384     return MatchOperand_Success;
1385   }
1386 
1387   switch(getLexer().getKind()) {
1388     default: return MatchOperand_NoMatch;
1389     case AsmToken::Identifier: {
1390       StringRef Name = Parser.getTok().getString();
1391       if (!Name.equals(Prefix)) {
1392         if (AddDefault) {
1393           Int = Default;
1394           return MatchOperand_Success;
1395         }
1396         return MatchOperand_NoMatch;
1397       }
1398 
1399       Parser.Lex();
1400       if (getLexer().isNot(AsmToken::Colon))
1401         return MatchOperand_ParseFail;
1402 
1403       Parser.Lex();
1404       if (getLexer().isNot(AsmToken::Integer))
1405         return MatchOperand_ParseFail;
1406 
1407       if (getParser().parseAbsoluteExpression(Int))
1408         return MatchOperand_ParseFail;
1409       break;
1410     }
1411   }
1412   return MatchOperand_Success;
1413 }
1414 
1415 AMDGPUAsmParser::OperandMatchResultTy
1416 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1417                                     enum AMDGPUOperand::ImmTy ImmTy,
1418                                     int64_t Default, bool AddDefault,
1419                                     bool (*ConvertResult)(int64_t&)) {
1420 
1421   SMLoc S = Parser.getTok().getLoc();
1422   int64_t Value = 0;
1423 
1424   AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value, Default, AddDefault);
1425   if (Res != MatchOperand_Success)
1426     return Res;
1427 
1428   if (ConvertResult && !ConvertResult(Value)) {
1429     return MatchOperand_ParseFail;
1430   }
1431 
1432   Operands.push_back(AMDGPUOperand::CreateImm(Value, S, ImmTy));
1433   return MatchOperand_Success;
1434 }
1435 
1436 AMDGPUAsmParser::OperandMatchResultTy
1437 AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1438                                enum AMDGPUOperand::ImmTy ImmTy,
1439                                      bool AddDefault) {
1440   int64_t Bit = 0;
1441   SMLoc S = Parser.getTok().getLoc();
1442 
1443   // We are at the end of the statement, and this is a default argument, so
1444   // use a default value.
1445   if (getLexer().isNot(AsmToken::EndOfStatement)) {
1446     switch(getLexer().getKind()) {
1447       case AsmToken::Identifier: {
1448         StringRef Tok = Parser.getTok().getString();
1449         if (Tok == Name) {
1450           Bit = 1;
1451           Parser.Lex();
1452         } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1453           Bit = 0;
1454           Parser.Lex();
1455         } else {
1456           if (AddDefault) {
1457             Bit = 0;
1458           } else {
1459             return MatchOperand_NoMatch;
1460           }
1461         }
1462         break;
1463       }
1464       default:
1465         return MatchOperand_NoMatch;
1466     }
1467   }
1468 
1469   Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1470   return MatchOperand_Success;
1471 }
1472 
1473 typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1474 
1475 void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1476                            OptionalImmIndexMap& OptionalIdx,
1477                            enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
1478   auto i = OptionalIdx.find(ImmT);
1479   if (i != OptionalIdx.end()) {
1480     unsigned Idx = i->second;
1481     ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1482   } else {
1483     Inst.addOperand(MCOperand::createImm(Default));
1484   }
1485 }
1486 
1487 static bool operandsHasOptionalOp(const OperandVector &Operands,
1488                                   const OptionalOperand &OOp) {
1489   for (unsigned i = 0; i < Operands.size(); i++) {
1490     const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1491     if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1492         (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1493       return true;
1494 
1495   }
1496   return false;
1497 }
1498 
1499 AMDGPUAsmParser::OperandMatchResultTy
1500 AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1501                                    OperandVector &Operands) {
1502   SMLoc S = Parser.getTok().getLoc();
1503   for (const OptionalOperand &Op : OptionalOps) {
1504     if (operandsHasOptionalOp(Operands, Op))
1505       continue;
1506     AMDGPUAsmParser::OperandMatchResultTy Res;
1507     int64_t Value;
1508     if (Op.IsBit) {
1509       Res = parseNamedBit(Op.Name, Operands, Op.Type);
1510       if (Res == MatchOperand_NoMatch)
1511         continue;
1512       return Res;
1513     }
1514 
1515     Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1516 
1517     if (Res == MatchOperand_NoMatch)
1518       continue;
1519 
1520     if (Res != MatchOperand_Success)
1521       return Res;
1522 
1523     bool DefaultValue = (Value == Op.Default);
1524 
1525     if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1526       return MatchOperand_ParseFail;
1527     }
1528 
1529     if (!DefaultValue) {
1530       Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1531     }
1532     return MatchOperand_Success;
1533   }
1534   return MatchOperand_NoMatch;
1535 }
1536 
1537 AMDGPUAsmParser::OperandMatchResultTy
1538 AMDGPUAsmParser::parseStringWithPrefix(const char *Prefix, StringRef &Value) {
1539   if (getLexer().isNot(AsmToken::Identifier)) {
1540     return MatchOperand_NoMatch;
1541   }
1542   StringRef Tok = Parser.getTok().getString();
1543   if (Tok != Prefix) {
1544     return MatchOperand_NoMatch;
1545   }
1546 
1547   Parser.Lex();
1548   if (getLexer().isNot(AsmToken::Colon)) {
1549     return MatchOperand_ParseFail;
1550   }
1551 
1552   Parser.Lex();
1553   if (getLexer().isNot(AsmToken::Identifier)) {
1554     return MatchOperand_ParseFail;
1555   }
1556 
1557   Value = Parser.getTok().getString();
1558   return MatchOperand_Success;
1559 }
1560 
1561 //===----------------------------------------------------------------------===//
1562 // ds
1563 //===----------------------------------------------------------------------===//
1564 
1565 void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1566                                     const OperandVector &Operands) {
1567 
1568   OptionalImmIndexMap OptionalIdx;
1569 
1570   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1571     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1572 
1573     // Add the register arguments
1574     if (Op.isReg()) {
1575       Op.addRegOperands(Inst, 1);
1576       continue;
1577     }
1578 
1579     // Handle optional arguments
1580     OptionalIdx[Op.getImmTy()] = i;
1581   }
1582 
1583   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1584   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
1585   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1586 
1587   Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1588 }
1589 
1590 void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1591 
1592   std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1593   bool GDSOnly = false;
1594 
1595   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1596     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1597 
1598     // Add the register arguments
1599     if (Op.isReg()) {
1600       Op.addRegOperands(Inst, 1);
1601       continue;
1602     }
1603 
1604     if (Op.isToken() && Op.getToken() == "gds") {
1605       GDSOnly = true;
1606       continue;
1607     }
1608 
1609     // Handle optional arguments
1610     OptionalIdx[Op.getImmTy()] = i;
1611   }
1612 
1613   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1614   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1615 
1616   if (!GDSOnly) {
1617     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1618   }
1619   Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1620 }
1621 
1622 
1623 //===----------------------------------------------------------------------===//
1624 // s_waitcnt
1625 //===----------------------------------------------------------------------===//
1626 
1627 bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1628   StringRef CntName = Parser.getTok().getString();
1629   int64_t CntVal;
1630 
1631   Parser.Lex();
1632   if (getLexer().isNot(AsmToken::LParen))
1633     return true;
1634 
1635   Parser.Lex();
1636   if (getLexer().isNot(AsmToken::Integer))
1637     return true;
1638 
1639   if (getParser().parseAbsoluteExpression(CntVal))
1640     return true;
1641 
1642   if (getLexer().isNot(AsmToken::RParen))
1643     return true;
1644 
1645   Parser.Lex();
1646   if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1647     Parser.Lex();
1648 
1649   int CntShift;
1650   int CntMask;
1651 
1652   if (CntName == "vmcnt") {
1653     CntMask = 0xf;
1654     CntShift = 0;
1655   } else if (CntName == "expcnt") {
1656     CntMask = 0x7;
1657     CntShift = 4;
1658   } else if (CntName == "lgkmcnt") {
1659     CntMask = 0xf;
1660     CntShift = 8;
1661   } else {
1662     return true;
1663   }
1664 
1665   IntVal &= ~(CntMask << CntShift);
1666   IntVal |= (CntVal << CntShift);
1667   return false;
1668 }
1669 
1670 AMDGPUAsmParser::OperandMatchResultTy
1671 AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1672   // Disable all counters by default.
1673   // vmcnt   [3:0]
1674   // expcnt  [6:4]
1675   // lgkmcnt [11:8]
1676   int64_t CntVal = 0xf7f;
1677   SMLoc S = Parser.getTok().getLoc();
1678 
1679   switch(getLexer().getKind()) {
1680     default: return MatchOperand_ParseFail;
1681     case AsmToken::Integer:
1682       // The operand can be an integer value.
1683       if (getParser().parseAbsoluteExpression(CntVal))
1684         return MatchOperand_ParseFail;
1685       break;
1686 
1687     case AsmToken::Identifier:
1688       do {
1689         if (parseCnt(CntVal))
1690           return MatchOperand_ParseFail;
1691       } while(getLexer().isNot(AsmToken::EndOfStatement));
1692       break;
1693   }
1694   Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1695   return MatchOperand_Success;
1696 }
1697 
1698 bool AMDGPUAsmParser::parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier) {
1699   if (Parser.getTok().getString() != "hwreg")
1700     return true;
1701   Parser.Lex();
1702 
1703   if (getLexer().isNot(AsmToken::LParen))
1704     return true;
1705   Parser.Lex();
1706 
1707   if (getLexer().is(AsmToken::Identifier)) {
1708     IsIdentifier = true;
1709     HwRegCode = StringSwitch<unsigned>(Parser.getTok().getString())
1710       .Case("HW_REG_MODE"     , 1)
1711       .Case("HW_REG_STATUS"   , 2)
1712       .Case("HW_REG_TRAPSTS"  , 3)
1713       .Case("HW_REG_HW_ID"    , 4)
1714       .Case("HW_REG_GPR_ALLOC", 5)
1715       .Case("HW_REG_LDS_ALLOC", 6)
1716       .Case("HW_REG_IB_STS"   , 7)
1717       .Default(-1);
1718     Parser.Lex();
1719   } else {
1720     IsIdentifier = false;
1721     if (getLexer().isNot(AsmToken::Integer))
1722       return true;
1723     if (getParser().parseAbsoluteExpression(HwRegCode))
1724       return true;
1725   }
1726 
1727   if (getLexer().is(AsmToken::RParen)) {
1728     Parser.Lex();
1729     return false;
1730   }
1731 
1732   // optional params
1733   if (getLexer().isNot(AsmToken::Comma))
1734     return true;
1735   Parser.Lex();
1736 
1737   if (getLexer().isNot(AsmToken::Integer))
1738     return true;
1739   if (getParser().parseAbsoluteExpression(Offset))
1740     return true;
1741 
1742   if (getLexer().isNot(AsmToken::Comma))
1743     return true;
1744   Parser.Lex();
1745 
1746   if (getLexer().isNot(AsmToken::Integer))
1747     return true;
1748   if (getParser().parseAbsoluteExpression(Width))
1749     return true;
1750 
1751   if (getLexer().isNot(AsmToken::RParen))
1752     return true;
1753   Parser.Lex();
1754 
1755   return false;
1756 }
1757 
1758 AMDGPUAsmParser::OperandMatchResultTy
1759 AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
1760   int64_t Imm16Val = 0;
1761   SMLoc S = Parser.getTok().getLoc();
1762 
1763   switch(getLexer().getKind()) {
1764     default: return MatchOperand_ParseFail;
1765     case AsmToken::Integer:
1766       // The operand can be an integer value.
1767       if (getParser().parseAbsoluteExpression(Imm16Val))
1768         return MatchOperand_ParseFail;
1769       if (!isInt<16>(Imm16Val) && !isUInt<16>(Imm16Val)) {
1770         Error(S, "invalid immediate: only 16-bit values are legal");
1771         // Do not return error code, but create an imm operand anyway and proceed
1772         // to the next operand, if any. That avoids unneccessary error messages.
1773       }
1774       break;
1775 
1776     case AsmToken::Identifier: {
1777         bool IsIdentifier = false;
1778         int64_t HwRegCode = -1;
1779         int64_t Offset = 0; // default
1780         int64_t Width = 32; // default
1781         if (parseHwregOperand(HwRegCode, Offset, Width, IsIdentifier))
1782           return MatchOperand_ParseFail;
1783         // HwRegCode (6) [5:0]
1784         // Offset (5) [10:6]
1785         // WidthMinusOne (5) [15:11]
1786         if (HwRegCode < 0 || HwRegCode > 63) {
1787           if (IsIdentifier)
1788             Error(S, "invalid symbolic name of hardware register");
1789           else
1790             Error(S, "invalid code of hardware register: only 6-bit values are legal");
1791         }
1792         if (Offset < 0 || Offset > 31)
1793           Error(S, "invalid bit offset: only 5-bit values are legal");
1794         if (Width < 1 || Width > 32)
1795           Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
1796         Imm16Val = HwRegCode | (Offset << 6) | ((Width-1) << 11);
1797       }
1798       break;
1799   }
1800   Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
1801   return MatchOperand_Success;
1802 }
1803 
1804 bool AMDGPUOperand::isSWaitCnt() const {
1805   return isImm();
1806 }
1807 
1808 bool AMDGPUOperand::isHwreg() const {
1809   return isImmTy(ImmTyHwreg);
1810 }
1811 
1812 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultHwreg() const {
1813   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyHwreg);
1814 }
1815 
1816 bool AMDGPUAsmParser::parseSendMsg(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
1817   using namespace llvm::AMDGPU::SendMsg;
1818 
1819   if (Parser.getTok().getString() != "sendmsg")
1820     return true;
1821   Parser.Lex();
1822 
1823   if (getLexer().isNot(AsmToken::LParen))
1824     return true;
1825   Parser.Lex();
1826 
1827   if (getLexer().is(AsmToken::Identifier)) {
1828     Msg.IsSymbolic = true;
1829     Msg.Id = ID_UNKNOWN_;
1830     const std::string tok = Parser.getTok().getString();
1831     for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
1832       switch(i) {
1833         default: continue; // Omit gaps.
1834         case ID_INTERRUPT: case ID_GS: case ID_GS_DONE:  case ID_SYSMSG: break;
1835       }
1836       if (tok == IdSymbolic[i]) {
1837         Msg.Id = i;
1838         break;
1839       }
1840     }
1841     Parser.Lex();
1842   } else {
1843     Msg.IsSymbolic = false;
1844     if (getLexer().isNot(AsmToken::Integer))
1845       return true;
1846     if (getParser().parseAbsoluteExpression(Msg.Id))
1847       return true;
1848     if (getLexer().is(AsmToken::Integer))
1849       if (getParser().parseAbsoluteExpression(Msg.Id))
1850         Msg.Id = ID_UNKNOWN_;
1851   }
1852   if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
1853     return false;
1854 
1855   if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
1856     if (getLexer().isNot(AsmToken::RParen))
1857       return true;
1858     Parser.Lex();
1859     return false;
1860   }
1861 
1862   if (getLexer().isNot(AsmToken::Comma))
1863     return true;
1864   Parser.Lex();
1865 
1866   assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
1867   Operation.Id = ID_UNKNOWN_;
1868   if (getLexer().is(AsmToken::Identifier)) {
1869     Operation.IsSymbolic = true;
1870     const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1871     const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1872     const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1873     const std::string Tok = Parser.getTok().getString();
1874     for (int i = F; i < L; ++i) {
1875       if (Tok == S[i]) {
1876         Operation.Id = i;
1877         break;
1878       }
1879     }
1880     Parser.Lex();
1881   } else {
1882     Operation.IsSymbolic = false;
1883     if (getLexer().isNot(AsmToken::Integer))
1884       return true;
1885     if (getParser().parseAbsoluteExpression(Operation.Id))
1886       return true;
1887   }
1888 
1889   if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1890     // Stream id is optional.
1891     if (getLexer().is(AsmToken::RParen)) {
1892       Parser.Lex();
1893       return false;
1894     }
1895 
1896     if (getLexer().isNot(AsmToken::Comma))
1897       return true;
1898     Parser.Lex();
1899 
1900     if (getLexer().isNot(AsmToken::Integer))
1901       return true;
1902     if (getParser().parseAbsoluteExpression(StreamId))
1903       return true;
1904   }
1905 
1906   if (getLexer().isNot(AsmToken::RParen))
1907     return true;
1908   Parser.Lex();
1909   return false;
1910 }
1911 
1912 AMDGPUAsmParser::OperandMatchResultTy
1913 AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
1914   using namespace llvm::AMDGPU::SendMsg;
1915 
1916   int64_t Imm16Val = 0;
1917   SMLoc S = Parser.getTok().getLoc();
1918 
1919   switch(getLexer().getKind()) {
1920   default:
1921     return MatchOperand_NoMatch;
1922   case AsmToken::Integer:
1923     // The operand can be an integer value.
1924     if (getParser().parseAbsoluteExpression(Imm16Val))
1925       return MatchOperand_NoMatch;
1926     if (!isInt<16>(Imm16Val) && !isUInt<16>(Imm16Val)) {
1927       Error(S, "invalid immediate: only 16-bit values are legal");
1928       // Do not return error code, but create an imm operand anyway and proceed
1929       // to the next operand, if any. That avoids unneccessary error messages.
1930     }
1931     break;
1932   case AsmToken::Identifier: {
1933       OperandInfoTy Msg(ID_UNKNOWN_);
1934       OperandInfoTy Operation(OP_UNKNOWN_);
1935       int64_t StreamId = STREAM_ID_DEFAULT;
1936       if (parseSendMsg(Msg, Operation, StreamId))
1937         return MatchOperand_NoMatch;
1938       do {
1939         // Validate and encode message ID.
1940         if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
1941                 || Msg.Id == ID_SYSMSG)) {
1942           if (Msg.IsSymbolic)
1943             Error(S, "invalid/unsupported symbolic name of message");
1944           else
1945             Error(S, "invalid/unsupported code of message");
1946           break;
1947         }
1948         Imm16Val = Msg.Id;
1949         // Validate and encode operation ID.
1950         if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
1951           if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
1952             if (Operation.IsSymbolic)
1953               Error(S, "invalid symbolic name of GS_OP");
1954             else
1955               Error(S, "invalid code of GS_OP: only 2-bit values are legal");
1956             break;
1957           }
1958           if (Operation.Id == OP_GS_NOP
1959               && Msg.Id != ID_GS_DONE) {
1960             Error(S, "invalid GS_OP: NOP is for GS_DONE only");
1961             break;
1962           }
1963           Imm16Val |= (Operation.Id << OP_SHIFT_);
1964         }
1965         if (Msg.Id == ID_SYSMSG) {
1966           if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
1967             if (Operation.IsSymbolic)
1968               Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
1969             else
1970               Error(S, "invalid/unsupported code of SYSMSG_OP");
1971             break;
1972           }
1973           Imm16Val |= (Operation.Id << OP_SHIFT_);
1974         }
1975         // Validate and encode stream ID.
1976         if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1977           if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
1978             Error(S, "invalid stream id: only 2-bit values are legal");
1979             break;
1980           }
1981           Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
1982         }
1983       } while (0);
1984     }
1985     break;
1986   }
1987   Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
1988   return MatchOperand_Success;
1989 }
1990 
1991 bool AMDGPUOperand::isSendMsg() const {
1992   return isImmTy(ImmTySendMsg);
1993 }
1994 
1995 //===----------------------------------------------------------------------===//
1996 // sopp branch targets
1997 //===----------------------------------------------------------------------===//
1998 
1999 AMDGPUAsmParser::OperandMatchResultTy
2000 AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2001   SMLoc S = Parser.getTok().getLoc();
2002 
2003   switch (getLexer().getKind()) {
2004     default: return MatchOperand_ParseFail;
2005     case AsmToken::Integer: {
2006       int64_t Imm;
2007       if (getParser().parseAbsoluteExpression(Imm))
2008         return MatchOperand_ParseFail;
2009       Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
2010       return MatchOperand_Success;
2011     }
2012 
2013     case AsmToken::Identifier:
2014       Operands.push_back(AMDGPUOperand::CreateExpr(
2015           MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2016                                   Parser.getTok().getString()), getContext()), S));
2017       Parser.Lex();
2018       return MatchOperand_Success;
2019   }
2020 }
2021 
2022 //===----------------------------------------------------------------------===//
2023 // flat
2024 //===----------------------------------------------------------------------===//
2025 
2026 //===----------------------------------------------------------------------===//
2027 // mubuf
2028 //===----------------------------------------------------------------------===//
2029 
2030 bool AMDGPUOperand::isMubufOffset() const {
2031   return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
2032 }
2033 
2034 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultMubufOffset() const {
2035   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2036 }
2037 
2038 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
2039   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
2040 }
2041 
2042 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
2043   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
2044 }
2045 
2046 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
2047   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
2048 }
2049 
2050 void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
2051                                const OperandVector &Operands) {
2052   OptionalImmIndexMap OptionalIdx;
2053 
2054   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2055     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2056 
2057     // Add the register arguments
2058     if (Op.isReg()) {
2059       Op.addRegOperands(Inst, 1);
2060       continue;
2061     }
2062 
2063     // Handle the case where soffset is an immediate
2064     if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2065       Op.addImmOperands(Inst, 1);
2066       continue;
2067     }
2068 
2069     // Handle tokens like 'offen' which are sometimes hard-coded into the
2070     // asm string.  There are no MCInst operands for these.
2071     if (Op.isToken()) {
2072       continue;
2073     }
2074     assert(Op.isImm());
2075 
2076     // Handle optional arguments
2077     OptionalIdx[Op.getImmTy()] = i;
2078   }
2079 
2080   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
2081   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2082   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2083   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2084 }
2085 
2086 //===----------------------------------------------------------------------===//
2087 // mimg
2088 //===----------------------------------------------------------------------===//
2089 
2090 AMDGPUAsmParser::OperandMatchResultTy
2091 AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
2092   return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
2093 }
2094 
2095 AMDGPUAsmParser::OperandMatchResultTy
2096 AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
2097   return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
2098 }
2099 
2100 AMDGPUAsmParser::OperandMatchResultTy
2101 AMDGPUAsmParser::parseDA(OperandVector &Operands) {
2102   return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
2103 }
2104 
2105 AMDGPUAsmParser::OperandMatchResultTy
2106 AMDGPUAsmParser::parseR128(OperandVector &Operands) {
2107   return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
2108 }
2109 
2110 AMDGPUAsmParser::OperandMatchResultTy
2111 AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
2112   return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
2113 }
2114 
2115 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
2116   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
2117 }
2118 
2119 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
2120   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
2121 }
2122 
2123 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
2124   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
2125 }
2126 
2127 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
2128   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
2129 }
2130 
2131 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
2132   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
2133 }
2134 
2135 //===----------------------------------------------------------------------===//
2136 // smrd
2137 //===----------------------------------------------------------------------===//
2138 
2139 bool AMDGPUOperand::isSMRDOffset() const {
2140 
2141   // FIXME: Support 20-bit offsets on VI.  We need to to pass subtarget
2142   // information here.
2143   return isImm() && isUInt<8>(getImm());
2144 }
2145 
2146 bool AMDGPUOperand::isSMRDLiteralOffset() const {
2147   // 32-bit literals are only supported on CI and we only want to use them
2148   // when the offset is > 8-bits.
2149   return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2150 }
2151 
2152 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
2153   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2154 }
2155 
2156 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
2157   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2158 }
2159 
2160 //===----------------------------------------------------------------------===//
2161 // vop3
2162 //===----------------------------------------------------------------------===//
2163 
2164 static bool ConvertOmodMul(int64_t &Mul) {
2165   if (Mul != 1 && Mul != 2 && Mul != 4)
2166     return false;
2167 
2168   Mul >>= 1;
2169   return true;
2170 }
2171 
2172 static bool ConvertOmodDiv(int64_t &Div) {
2173   if (Div == 1) {
2174     Div = 0;
2175     return true;
2176   }
2177 
2178   if (Div == 2) {
2179     Div = 3;
2180     return true;
2181   }
2182 
2183   return false;
2184 }
2185 
2186 static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2187   if (BoundCtrl == 0) {
2188     BoundCtrl = 1;
2189     return true;
2190   } else if (BoundCtrl == -1) {
2191     BoundCtrl = 0;
2192     return true;
2193   }
2194   return false;
2195 }
2196 
2197 // Note: the order in this table matches the order of operands in AsmString.
2198 static const OptionalOperand AMDGPUOperandTable[] = {
2199   {"offen",   AMDGPUOperand::ImmTyOffen, true, 0, nullptr},
2200   {"offset0", AMDGPUOperand::ImmTyOffset0, false, 0, nullptr},
2201   {"offset1", AMDGPUOperand::ImmTyOffset1, false, 0, nullptr},
2202   {"gds",     AMDGPUOperand::ImmTyGDS, true, 0, nullptr},
2203   {"offset",  AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
2204   {"glc",     AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
2205   {"slc",     AMDGPUOperand::ImmTySLC, true, 0, nullptr},
2206   {"tfe",     AMDGPUOperand::ImmTyTFE, true, 0, nullptr},
2207   {"clamp",   AMDGPUOperand::ImmTyClampSI, true, 0, nullptr},
2208   {"omod",    AMDGPUOperand::ImmTyOModSI, false, 1, ConvertOmodMul},
2209   {"unorm",   AMDGPUOperand::ImmTyUNorm, true, 0, nullptr},
2210   {"da",      AMDGPUOperand::ImmTyDA,    true, 0, nullptr},
2211   {"r128",    AMDGPUOperand::ImmTyR128,  true, 0, nullptr},
2212   {"lwe",     AMDGPUOperand::ImmTyLWE,   true, 0, nullptr},
2213   {"dmask",   AMDGPUOperand::ImmTyDMask, false, 0, nullptr},
2214   {"dpp_ctrl", AMDGPUOperand::ImmTyDppCtrl, false, -1, nullptr},
2215   {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, 0xf, nullptr},
2216   {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, 0xf, nullptr},
2217   {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, -1, ConvertBoundCtrl},
2218 };
2219 
2220 AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands, const OptionalOperand& Op, bool AddDefault)
2221 {
2222   if (Op.IsBit) {
2223     return parseNamedBit(Op.Name, Operands, Op.Type, AddDefault);
2224   } else if (Op.Type == AMDGPUOperand::ImmTyDppCtrl) {
2225     return parseDPPCtrlOps(Operands, AddDefault);
2226   } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2227     return parseOModOperand(Operands);
2228   } else {
2229     return parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.Default, AddDefault, Op.ConvertResult);
2230   }
2231 }
2232 
2233 AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseAMDGPUOperand(OperandVector &Operands, StringRef Name)
2234 {
2235   StringRef Tok;
2236   if (getLexer().is(AsmToken::Identifier)) {
2237     Tok = Parser.getTok().getString();
2238   }
2239   bool optional = false;
2240   if (Tok == "mul" || Tok == "div") { optional = true; }
2241   for (const OptionalOperand &Op1 : AMDGPUOperandTable) {
2242     if (Op1.Name == Tok) { optional = true; break; }
2243   }
2244   // Attemp to parse current optional operand.
2245   for (const OptionalOperand &Op : AMDGPUOperandTable) {
2246     // TODO: For now, omod is handled separately because
2247     // token name does not match name in table.
2248     bool parseThis =
2249       Name == "" ||
2250       (Op.Name == Name) ||
2251       (Name == "omod" && Op.Type == AMDGPUOperand::ImmTyOModSI);
2252     if (parseThis && Tok == Name) {
2253       // Exactly the expected token for optional operand.
2254       // Parse it and add operand normally.
2255       return parseOptionalOperand(Operands, Op, true);
2256     } else if (parseThis) {
2257       // Token for optional operand which is later in the table
2258       // than the one we expect. If needed, add default value
2259       // for the operand we expect, do not consume anything
2260       // and return MatchOperand_NoMatch. Parsing will continue.
2261       return parseOptionalOperand(Operands, Op, optional);
2262     } else if (Op.Name == Tok) {
2263       // This looks like optional operand, but we do not expect it.
2264       // This is the case when AsmString has token in it.
2265       return MatchOperand_NoMatch;
2266     }
2267   }
2268   return MatchOperand_NoMatch;
2269 }
2270 
2271 AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
2272 {
2273   StringRef Name = Parser.getTok().getString();
2274   if (Name == "mul") {
2275     return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, 0, false, ConvertOmodMul);
2276   } else if (Name == "div") {
2277     return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, 0, false, ConvertOmodDiv);
2278   } else {
2279     return MatchOperand_NoMatch;
2280   }
2281 }
2282 
2283 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultClampSI() const {
2284   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyClampSI);
2285 }
2286 
2287 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOModSI() const {
2288   return AMDGPUOperand::CreateImm(1, SMLoc(), AMDGPUOperand::ImmTyOModSI);
2289 }
2290 
2291 void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2292   unsigned I = 1;
2293   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2294   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2295     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2296   }
2297   for (unsigned E = Operands.size(); I != E; ++I)
2298     ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2299 }
2300 
2301 void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
2302   uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2303   if (TSFlags & SIInstrFlags::VOP3) {
2304     cvtVOP3(Inst, Operands);
2305   } else {
2306     cvtId(Inst, Operands);
2307   }
2308 }
2309 
2310 void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
2311   OptionalImmIndexMap OptionalIdx;
2312   unsigned I = 1;
2313   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2314   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2315     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2316   }
2317 
2318   for (unsigned E = Operands.size(); I != E; ++I) {
2319     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2320     if (Op.isRegOrImmWithInputMods()) {
2321       Op.addRegOrImmWithInputModsOperands(Inst, 2);
2322     } else if (Op.isImm()) {
2323       OptionalIdx[Op.getImmTy()] = I;
2324     } else {
2325       assert(false);
2326     }
2327   }
2328 
2329   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2330   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
2331 }
2332 
2333 void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2334   unsigned I = 1;
2335   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2336   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2337     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2338   }
2339 
2340   OptionalImmIndexMap OptionalIdx;
2341 
2342   for (unsigned E = Operands.size(); I != E; ++I) {
2343     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2344 
2345     // Add the register arguments
2346     if (Op.isRegOrImm()) {
2347       Op.addRegOrImmOperands(Inst, 1);
2348       continue;
2349     } else if (Op.isImmModifier()) {
2350       OptionalIdx[Op.getImmTy()] = I;
2351     } else {
2352       assert(false);
2353     }
2354   }
2355 
2356   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2357   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2358   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2359   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2360   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2361   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2362   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2363   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2364 }
2365 
2366 void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2367   unsigned I = 1;
2368   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2369   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2370     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2371   }
2372 
2373   // Add src, same as dst
2374   ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2375 
2376   OptionalImmIndexMap OptionalIdx;
2377 
2378   for (unsigned E = Operands.size(); I != E; ++I) {
2379     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2380 
2381     // Add the register arguments
2382     if (Op.isRegOrImm()) {
2383       Op.addRegOrImmOperands(Inst, 1);
2384       continue;
2385     } else if (Op.isImmModifier()) {
2386       OptionalIdx[Op.getImmTy()] = I;
2387     } else {
2388       assert(false);
2389     }
2390   }
2391 
2392   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2393   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2394   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2395   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2396   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2397   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2398   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2399   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2400 }
2401 
2402 //===----------------------------------------------------------------------===//
2403 // dpp
2404 //===----------------------------------------------------------------------===//
2405 
2406 bool AMDGPUOperand::isDPPCtrl() const {
2407   bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2408   if (result) {
2409     int64_t Imm = getImm();
2410     return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2411            ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2412            ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2413            ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2414            (Imm == 0x130) ||
2415            (Imm == 0x134) ||
2416            (Imm == 0x138) ||
2417            (Imm == 0x13c) ||
2418            (Imm == 0x140) ||
2419            (Imm == 0x141) ||
2420            (Imm == 0x142) ||
2421            (Imm == 0x143);
2422   }
2423   return false;
2424 }
2425 
2426 AMDGPUAsmParser::OperandMatchResultTy
2427 AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands, bool AddDefault) {
2428   SMLoc S = Parser.getTok().getLoc();
2429   StringRef Prefix;
2430   int64_t Int;
2431 
2432   if (getLexer().getKind() == AsmToken::Identifier) {
2433     Prefix = Parser.getTok().getString();
2434   } else {
2435     return MatchOperand_NoMatch;
2436   }
2437 
2438   if (Prefix == "row_mirror") {
2439     Int = 0x140;
2440   } else if (Prefix == "row_half_mirror") {
2441     Int = 0x141;
2442   } else {
2443     // Check to prevent parseDPPCtrlOps from eating invalid tokens
2444     if (Prefix != "quad_perm"
2445         && Prefix != "row_shl"
2446         && Prefix != "row_shr"
2447         && Prefix != "row_ror"
2448         && Prefix != "wave_shl"
2449         && Prefix != "wave_rol"
2450         && Prefix != "wave_shr"
2451         && Prefix != "wave_ror"
2452         && Prefix != "row_bcast") {
2453       if (AddDefault) {
2454         Operands.push_back(AMDGPUOperand::CreateImm(0, S, AMDGPUOperand::ImmTyDppCtrl));
2455         return MatchOperand_Success;
2456       } else {
2457         return MatchOperand_NoMatch;
2458       }
2459     }
2460 
2461     Parser.Lex();
2462     if (getLexer().isNot(AsmToken::Colon))
2463       return MatchOperand_ParseFail;
2464 
2465     if (Prefix == "quad_perm") {
2466       // quad_perm:[%d,%d,%d,%d]
2467       Parser.Lex();
2468       if (getLexer().isNot(AsmToken::LBrac))
2469         return MatchOperand_ParseFail;
2470 
2471       Parser.Lex();
2472       if (getLexer().isNot(AsmToken::Integer))
2473         return MatchOperand_ParseFail;
2474       Int = getLexer().getTok().getIntVal();
2475 
2476       Parser.Lex();
2477       if (getLexer().isNot(AsmToken::Comma))
2478         return MatchOperand_ParseFail;
2479       Parser.Lex();
2480       if (getLexer().isNot(AsmToken::Integer))
2481         return MatchOperand_ParseFail;
2482       Int += (getLexer().getTok().getIntVal() << 2);
2483 
2484       Parser.Lex();
2485       if (getLexer().isNot(AsmToken::Comma))
2486         return MatchOperand_ParseFail;
2487       Parser.Lex();
2488       if (getLexer().isNot(AsmToken::Integer))
2489         return MatchOperand_ParseFail;
2490       Int += (getLexer().getTok().getIntVal() << 4);
2491 
2492       Parser.Lex();
2493       if (getLexer().isNot(AsmToken::Comma))
2494         return MatchOperand_ParseFail;
2495       Parser.Lex();
2496       if (getLexer().isNot(AsmToken::Integer))
2497         return MatchOperand_ParseFail;
2498       Int += (getLexer().getTok().getIntVal() << 6);
2499 
2500       Parser.Lex();
2501       if (getLexer().isNot(AsmToken::RBrac))
2502         return MatchOperand_ParseFail;
2503 
2504     } else {
2505       // sel:%d
2506       Parser.Lex();
2507       if (getLexer().isNot(AsmToken::Integer))
2508         return MatchOperand_ParseFail;
2509       Int = getLexer().getTok().getIntVal();
2510 
2511       if (Prefix == "row_shl") {
2512         Int |= 0x100;
2513       } else if (Prefix == "row_shr") {
2514         Int |= 0x110;
2515       } else if (Prefix == "row_ror") {
2516         Int |= 0x120;
2517       } else if (Prefix == "wave_shl") {
2518         Int = 0x130;
2519       } else if (Prefix == "wave_rol") {
2520         Int = 0x134;
2521       } else if (Prefix == "wave_shr") {
2522         Int = 0x138;
2523       } else if (Prefix == "wave_ror") {
2524         Int = 0x13C;
2525       } else if (Prefix == "row_bcast") {
2526         if (Int == 15) {
2527           Int = 0x142;
2528         } else if (Int == 31) {
2529           Int = 0x143;
2530         }
2531       } else {
2532         return MatchOperand_ParseFail;
2533       }
2534     }
2535   }
2536   Parser.Lex(); // eat last token
2537 
2538   Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2539                                               AMDGPUOperand::ImmTyDppCtrl));
2540   return MatchOperand_Success;
2541 }
2542 
2543 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
2544   return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
2545 }
2546 
2547 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
2548   return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
2549 }
2550 
2551 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
2552   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
2553 }
2554 
2555 void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
2556   OptionalImmIndexMap OptionalIdx;
2557 
2558   unsigned I = 1;
2559   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2560   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2561     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2562   }
2563 
2564   for (unsigned E = Operands.size(); I != E; ++I) {
2565     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2566     // Add the register arguments
2567     if (Op.isRegOrImmWithInputMods()) {
2568       // We convert only instructions with modifiers
2569       Op.addRegOrImmWithInputModsOperands(Inst, 2);
2570     } else if (Op.isDPPCtrl()) {
2571       Op.addImmOperands(Inst, 1);
2572     } else if (Op.isImm()) {
2573       // Handle optional arguments
2574       OptionalIdx[Op.getImmTy()] = I;
2575     } else {
2576       llvm_unreachable("Invalid operand type");
2577     }
2578   }
2579 
2580   // ToDo: fix default values for row_mask and bank_mask
2581   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2582   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2583   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2584 }
2585 
2586 //===----------------------------------------------------------------------===//
2587 // sdwa
2588 //===----------------------------------------------------------------------===//
2589 
2590 AMDGPUAsmParser::OperandMatchResultTy
2591 AMDGPUAsmParser::parseSDWASel(OperandVector &Operands) {
2592   SMLoc S = Parser.getTok().getLoc();
2593   StringRef Value;
2594   AMDGPUAsmParser::OperandMatchResultTy res;
2595 
2596   res = parseStringWithPrefix("dst_sel", Value);
2597   if (res == MatchOperand_ParseFail) {
2598     return MatchOperand_ParseFail;
2599   } else if (res == MatchOperand_NoMatch) {
2600     res = parseStringWithPrefix("src0_sel", Value);
2601     if (res == MatchOperand_ParseFail) {
2602       return MatchOperand_ParseFail;
2603     } else if (res == MatchOperand_NoMatch) {
2604       res = parseStringWithPrefix("src1_sel", Value);
2605       if (res != MatchOperand_Success) {
2606         return res;
2607       }
2608     }
2609   }
2610 
2611   int64_t Int;
2612   Int = StringSwitch<int64_t>(Value)
2613         .Case("BYTE_0", 0)
2614         .Case("BYTE_1", 1)
2615         .Case("BYTE_2", 2)
2616         .Case("BYTE_3", 3)
2617         .Case("WORD_0", 4)
2618         .Case("WORD_1", 5)
2619         .Case("DWORD", 6)
2620         .Default(0xffffffff);
2621   Parser.Lex(); // eat last token
2622 
2623   if (Int == 0xffffffff) {
2624     return MatchOperand_ParseFail;
2625   }
2626 
2627   Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2628                                               AMDGPUOperand::ImmTySdwaSel));
2629   return MatchOperand_Success;
2630 }
2631 
2632 AMDGPUAsmParser::OperandMatchResultTy
2633 AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
2634   SMLoc S = Parser.getTok().getLoc();
2635   StringRef Value;
2636   AMDGPUAsmParser::OperandMatchResultTy res;
2637 
2638   res = parseStringWithPrefix("dst_unused", Value);
2639   if (res != MatchOperand_Success) {
2640     return res;
2641   }
2642 
2643   int64_t Int;
2644   Int = StringSwitch<int64_t>(Value)
2645         .Case("UNUSED_PAD", 0)
2646         .Case("UNUSED_SEXT", 1)
2647         .Case("UNUSED_PRESERVE", 2)
2648         .Default(0xffffffff);
2649   Parser.Lex(); // eat last token
2650 
2651   if (Int == 0xffffffff) {
2652     return MatchOperand_ParseFail;
2653   }
2654 
2655   Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2656                                               AMDGPUOperand::ImmTySdwaDstUnused));
2657   return MatchOperand_Success;
2658 }
2659 
2660 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWASel() const {
2661   return AMDGPUOperand::CreateImm(6, SMLoc(), AMDGPUOperand::ImmTySdwaSel);
2662 }
2663 
2664 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWADstUnused() const {
2665   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySdwaDstUnused);
2666 }
2667 
2668 
2669 /// Force static initialization.
2670 extern "C" void LLVMInitializeAMDGPUAsmParser() {
2671   RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2672   RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2673 }
2674 
2675 #define GET_REGISTER_MATCHER
2676 #define GET_MATCHER_IMPLEMENTATION
2677 #include "AMDGPUGenAsmMatcher.inc"
2678